Fix invokeinterface sharpened with kRuntimeCall.

Bug: 174260111
Bug: 173677667

Test: 728-imt-conflict-zygote
Test: atest com.android.bootimageprofile.BootImageProfileTest#testSystemServerProfile
Test: adb install com.google.android.art.apex
Change-Id: Ie600a0c8c8eb38d9084b796bac9184c06ea0a2f4
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f90c092..efa65fc 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4436,11 +4436,15 @@
   if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
     Location interface_method = locations->InAt(invoke->GetNumberOfArguments() - 1);
     if (interface_method.IsStackSlot()) {
-      __ Ldr(ip1, StackOperandFrom(receiver));
+      __ Ldr(ip1, StackOperandFrom(interface_method));
     } else {
       __ Mov(ip1, XRegisterFrom(interface_method));
     }
-  } else {
+  // If the load kind is through a runtime call, we will pass the method we
+  // fetch the IMT, which will either be a no-op if we don't hit the conflict
+  // stub, or will make us always go through the trampoline when there is a
+  // conflict.
+  } else if (invoke->GetHiddenArgumentLoadKind() != MethodLoadKind::kRuntimeCall) {
     codegen_->LoadMethod(
         invoke->GetHiddenArgumentLoadKind(), Location::RegisterLocation(ip1.GetCode()), invoke);
   }
@@ -4451,6 +4455,11 @@
       invoke->GetImtIndex(), kArm64PointerSize));
   // temp = temp->GetImtEntryAt(method_offset);
   __ Ldr(temp, MemOperand(temp, method_offset));
+  if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRuntimeCall) {
+    // We pass the method from the IMT in case of a conflict. This will ensure
+    // we go into the runtime to resolve the actual method.
+    __ Mov(ip1, temp);
+  }
   // lr = temp->GetEntryPoint();
   __ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
 
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index fcc4e06..cdd5d22 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3540,6 +3540,11 @@
       } else {
         __ Mov(RegisterFrom(hidden_reg), RegisterFrom(current_method));
       }
+    } else if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRuntimeCall) {
+      // We pass the method from the IMT in case of a conflict. This will ensure
+      // we go into the runtime to resolve the actual method.
+      CHECK_NE(temp.GetCode(), lr.GetCode());
+      __ Mov(RegisterFrom(hidden_reg), temp);
     } else {
       codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), hidden_reg, invoke);
     }
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 7075d38..f6c0270 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2621,7 +2621,7 @@
   DCHECK_EQ(XMM7, hidden_reg);
   if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
     __ movd(hidden_reg, locations->InAt(invoke->GetNumberOfArguments() - 1).AsRegister<Register>());
-  } else {
+  } else if (invoke->GetHiddenArgumentLoadKind() != MethodLoadKind::kRuntimeCall) {
     codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), locations->GetTemp(0), invoke);
     __ movd(hidden_reg, temp);
   }
@@ -2653,6 +2653,11 @@
   uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
       invoke->GetImtIndex(), kX86PointerSize));
   __ movl(temp, Address(temp, method_offset));
+  if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRuntimeCall) {
+    // We pass the method from the IMT in case of a conflict. This will ensure
+    // we go into the runtime to resolve the actual method.
+    __ movd(hidden_reg, temp);
+  }
   // call temp->GetEntryPoint();
   __ call(Address(temp,
                   ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1fc4c18..d79c2e4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2808,11 +2808,12 @@
 
   codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
 
-  if (invoke->GetHiddenArgumentLoadKind() != MethodLoadKind::kRecursive) {
+  if (invoke->GetHiddenArgumentLoadKind() != MethodLoadKind::kRecursive &&
+      invoke->GetHiddenArgumentLoadKind() != MethodLoadKind::kRuntimeCall) {
     Location hidden_reg = locations->GetTemp(1);
     // Set the hidden argument. This is safe to do this here, as RAX
     // won't be modified thereafter, before the `call` instruction.
-    // We also di it after MaybeGenerateInlineCache that may use RAX.
+    // We also do it after MaybeGenerateInlineCache that may use RAX.
     DCHECK_EQ(RAX, hidden_reg.AsRegister<Register>());
     codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), hidden_reg, invoke);
   }
@@ -2825,6 +2826,12 @@
       invoke->GetImtIndex(), kX86_64PointerSize));
   // temp = temp->GetImtEntryAt(method_offset);
   __ movq(temp, Address(temp, method_offset));
+  if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRuntimeCall) {
+    // We pass the method from the IMT in case of a conflict. This will ensure
+    // we go into the runtime to resolve the actual method.
+    Location hidden_reg = locations->GetTemp(1);
+    __ movq(hidden_reg.AsRegister<CpuRegister>(), temp);
+  }
   // call temp->GetEntryPoint();
   __ call(Address(
       temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64PointerSize).SizeValue()));