Support invoke-interface in optimizing.

Change-Id: Ic18d7c3d2810557231caf0571956e0c431f5d384
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 64fb764..8baf3c5 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -381,20 +381,28 @@
   const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
 
   HInvoke* invoke = nullptr;
-  if (invoke_type == kVirtual) {
+  if (invoke_type == kVirtual || invoke_type == kInterface) {
     MethodReference target_method(dex_file_, method_idx);
     uintptr_t direct_code;
     uintptr_t direct_method;
-    int vtable_index;
+    int table_index;
+    InvokeType optimized_invoke_type = invoke_type;
     // TODO: Add devirtualization support.
     compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
-                                        &invoke_type, &target_method, &vtable_index,
+                                        &optimized_invoke_type, &target_method, &table_index,
                                         &direct_code, &direct_method);
-    if (vtable_index == -1) {
+    if (table_index == -1) {
       return false;
     }
-    invoke = new (arena_) HInvokeVirtual(
-        arena_, number_of_arguments, return_type, dex_offset, vtable_index);
+
+    if (invoke_type == kVirtual) {
+      invoke = new (arena_) HInvokeVirtual(
+          arena_, number_of_arguments, return_type, dex_offset, table_index);
+    } else {
+      DCHECK_EQ(invoke_type, kInterface);
+      invoke = new (arena_) HInvokeInterface(
+          arena_, number_of_arguments, return_type, dex_offset, method_idx, table_index);
+    }
   } else {
     // Treat invoke-direct like static calls for now.
     invoke = new (arena_) HInvokeStatic(
@@ -870,7 +878,8 @@
 
     case Instruction::INVOKE_STATIC:
     case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_VIRTUAL: {
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_INTERFACE: {
       uint32_t method_idx = instruction.VRegB_35c();
       uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
       uint32_t args[5];
@@ -883,7 +892,8 @@
 
     case Instruction::INVOKE_STATIC_RANGE:
     case Instruction::INVOKE_DIRECT_RANGE:
-    case Instruction::INVOKE_VIRTUAL_RANGE: {
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+    case Instruction::INVOKE_INTERFACE_RANGE: {
       uint32_t method_idx = instruction.VRegB_3rc();
       uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
       uint32_t register_index = instruction.VRegC();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c75980d..c965489 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -216,10 +216,14 @@
 
   for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
     Location loc = locations->GetTemp(i);
+    // The DCHECKS below check that a register is not specified twice in
+    // the summary.
     if (loc.IsRegister()) {
-      // Check that a register is not specified twice in the summary.
       DCHECK(!blocked_core_registers_[loc.reg()]);
       blocked_core_registers_[loc.reg()] = true;
+    } else if (loc.IsFpuRegister()) {
+      DCHECK(!blocked_fpu_registers_[loc.reg()]);
+      blocked_fpu_registers_[loc.reg()] = true;
     } else {
       DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
     }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index fef7f0e..8c54cdf 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1131,10 +1131,6 @@
   DCHECK(!codegen_->IsLeafMethod());
 }
 
-void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  HandleInvoke(invoke);
-}
-
 void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
@@ -1149,6 +1145,9 @@
   locations->SetOut(calling_convention_visitor.GetReturnLocation(invoke->GetType()));
 }
 
+void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+  HandleInvoke(invoke);
+}
 
 void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
@@ -1175,6 +1174,42 @@
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
+void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
+  HandleInvoke(invoke);
+  // Add the hidden argument.
+  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
+  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+  Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+  uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+          (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+  LocationSummary* locations = invoke->GetLocations();
+  Location receiver = locations->InAt(0);
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+  // Set the hidden argument.
+  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).As<Register>(), invoke->GetDexMethodIndex());
+
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+  } else {
+    __ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
+  }
+  // temp = temp->GetImtEntryAt(method_offset);
+  uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+  // LR = temp->GetEntryPoint();
+  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
+  // LR();
+  __ blx(LR);
+  DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
 void LocationsBuilderARM::VisitNeg(HNeg* neg) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1be5717..667b075 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -539,6 +539,7 @@
   M(Div)                                                   \
   M(DivZeroCheck)                                          \
   M(FloatConstant)                                         \
+  M(InvokeInterface)                                       \
   M(LoadClass)                                             \
   M(LoadException)                                         \
   M(LoadString)                                            \
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 127ddbe..91aa269 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1104,6 +1104,41 @@
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
+void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
+  HandleInvoke(invoke);
+  // Add the hidden argument.
+  invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM0));
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) {
+  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+  Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+  uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+          (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+  LocationSummary* locations = invoke->GetLocations();
+  Location receiver = locations->InAt(0);
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+  // Set the hidden argument.
+  __ movl(temp, Immediate(invoke->GetDexMethodIndex()));
+  __ movd(invoke->GetLocations()->GetTemp(1).As<XmmRegister>(), temp);
+
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ movl(temp, Address(ESP, receiver.GetStackIndex()));
+    __ movl(temp, Address(temp, class_offset));
+  } else {
+    __ movl(temp, Address(receiver.As<Register>(), class_offset));
+  }
+  // temp = temp->GetImtEntryAt(method_offset);
+  __ movl(temp, Address(temp, method_offset));
+  // call temp->GetEntryPoint();
+  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+
+  DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
 void LocationsBuilderX86::VisitNeg(HNeg* neg) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 8c0842c..1130b4c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1031,10 +1031,6 @@
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
-void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  HandleInvoke(invoke);
-}
-
 void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
@@ -1067,6 +1063,10 @@
   }
 }
 
+void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+  HandleInvoke(invoke);
+}
+
 void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
   size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
@@ -1090,6 +1090,41 @@
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
+void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
+  HandleInvoke(invoke);
+  // Add the hidden argument.
+  invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX));
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
+  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+  CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
+  uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+          (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+  LocationSummary* locations = invoke->GetLocations();
+  Location receiver = locations->InAt(0);
+  size_t class_offset = mirror::Object::ClassOffset().SizeValue();
+
+  // Set the hidden argument.
+  __ movq(invoke->GetLocations()->GetTemp(1).As<CpuRegister>(),
+          Immediate(invoke->GetDexMethodIndex()));
+
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+    __ movl(temp, Address(temp, class_offset));
+  } else {
+    __ movl(temp, Address(receiver.As<CpuRegister>(), class_offset));
+  }
+  // temp = temp->GetImtEntryAt(method_offset);
+  __ movl(temp, Address(temp, method_offset));
+  // call temp->GetEntryPoint();
+  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+
+  DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
 void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 37e5e6b..9253f0b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -495,6 +495,7 @@
   M(InstanceFieldGet, Instruction)                                      \
   M(InstanceFieldSet, Instruction)                                      \
   M(IntConstant, Constant)                                              \
+  M(InvokeInterface, Invoke)                                            \
   M(InvokeStatic, Invoke)                                               \
   M(InvokeVirtual, Invoke)                                              \
   M(LessThan, Condition)                                                \
@@ -1603,6 +1604,30 @@
   DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
 };
 
+class HInvokeInterface : public HInvoke {
+ public:
+  HInvokeInterface(ArenaAllocator* arena,
+                   uint32_t number_of_arguments,
+                   Primitive::Type return_type,
+                   uint32_t dex_pc,
+                   uint32_t dex_method_index,
+                   uint32_t imt_index)
+      : HInvoke(arena, number_of_arguments, return_type, dex_pc),
+        dex_method_index_(dex_method_index),
+        imt_index_(imt_index) {}
+
+  uint32_t GetImtIndex() const { return imt_index_; }
+  uint32_t GetDexMethodIndex() const { return dex_method_index_; }
+
+  DECLARE_INSTRUCTION(InvokeInterface);
+
+ private:
+  const uint32_t dex_method_index_;
+  const uint32_t imt_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
+};
+
 class HNewInstance : public HExpression<0> {
  public:
   HNewInstance(uint32_t dex_pc, uint16_t type_index)
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 0745f9c..bef4af4 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -185,10 +185,11 @@
   // Create synthesized intervals for temporaries.
   for (size_t i = 0; i < locations->GetTempCount(); ++i) {
     Location temp = locations->GetTemp(i);
-    if (temp.IsRegister()) {
+    if (temp.IsRegister() || temp.IsFpuRegister()) {
       BlockRegister(temp, position, position + 1);
     } else {
       DCHECK(temp.IsUnallocated());
+      DCHECK(temp.GetPolicy() == Location::kRequiresRegister);
       LiveInterval* interval = LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
       temp_intervals_.Add(interval);
       interval->AddRange(position, position + 1);