summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator.cc3
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc70
-rw-r--r--compiler/optimizing/code_generator_arm64.h1
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc79
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h1
-rw-r--r--compiler/optimizing/code_generator_x86.cc107
-rw-r--r--compiler/optimizing/code_generator_x86.h6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc78
-rw-r--r--compiler/optimizing/code_generator_x86_64.h5
-rw-r--r--compiler/optimizing/instruction_builder.cc22
-rw-r--r--compiler/optimizing/instruction_simplifier.cc2
-rw-r--r--compiler/optimizing/nodes.h34
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc9
-rw-r--r--compiler/optimizing/sharpening.cc2
-rw-r--r--compiler/optimizing/sharpening.h2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S97
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S78
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S85
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S84
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc3
-rw-r--r--runtime/interpreter/mterp/arm64ng/main.S12
-rw-r--r--runtime/interpreter/mterp/nterp.cc2
-rw-r--r--runtime/interpreter/mterp/x86_64ng/main.S10
-rw-r--r--runtime/oat.h4
-rw-r--r--test/812-recursive-default/expected.txt0
-rw-r--r--test/812-recursive-default/info.txt2
-rw-r--r--test/812-recursive-default/src/Main.java36
28 files changed, 394 insertions, 442 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9ff1f73de4..c2ae0e0632 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -923,8 +923,7 @@ uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_S
return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
}
-uint32_t CodeGenerator::GetBootImageOffset(HInvokeStaticOrDirect* invoke) {
- DCHECK_EQ(invoke->GetMethodLoadKind(), MethodLoadKind::kBootImageRelRo);
+uint32_t CodeGenerator::GetBootImageOffset(HInvoke* invoke) {
ArtMethod* method = invoke->GetResolvedMethod();
DCHECK(method != nullptr);
return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 1a01be9708..22804a992f 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -632,7 +632,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
uint32_t GetBootImageOffset(HLoadClass* load_class);
uint32_t GetBootImageOffset(HLoadString* load_string);
- uint32_t GetBootImageOffset(HInvokeStaticOrDirect* invoke);
+ uint32_t GetBootImageOffset(HInvoke* invoke);
static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 36040ca690..007aa43363 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4351,6 +4351,10 @@ void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
HandleInvoke(invoke);
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ // We cannot request ip1 as it's blocked by the register allocator.
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1, Location::Any());
+ }
}
void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
@@ -4421,7 +4425,17 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope scratch_scope(masm);
scratch_scope.Exclude(ip1);
- __ Mov(ip1, invoke->GetMethodReference().index);
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ Location interface_method = locations->InAt(invoke->GetNumberOfArguments() - 1);
+ if (interface_method.IsStackSlot()) {
+ __ Ldr(ip1, StackOperandFrom(receiver));
+ } else {
+ __ Mov(ip1, XRegisterFrom(interface_method));
+ }
+ } else {
+ codegen_->LoadMethod(
+ invoke->GetHiddenArgumentLoadKind(), Location::RegisterLocation(ip1.GetCode()), invoke);
+ }
__ Ldr(temp,
MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
@@ -4489,21 +4503,8 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStatic
return desired_dispatch_info;
}
-void CodeGeneratorARM64::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- uint32_t offset =
- GetThreadOffset<kArm64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- // temp = thread->string_init_entrypoint
- __ Ldr(XRegisterFrom(temp), MemOperand(tr, offset));
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+void CodeGeneratorARM64::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
// Add ADRP with its PC-relative method patch.
@@ -4538,14 +4539,47 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(
EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
}
- case MethodLoadKind::kJitDirectAddress:
+ case MethodLoadKind::kJitDirectAddress: {
// Load method address from literal pool.
- __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
+ __ Ldr(XRegisterFrom(temp),
+ DeduplicateUint64Literal(reinterpret_cast<uint64_t>(invoke->GetResolvedMethod())));
+ break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorARM64::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ uint32_t offset =
+ GetThreadOffset<kArm64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ // temp = thread->string_init_entrypoint
+ __ Ldr(XRegisterFrom(temp), MemOperand(tr, offset));
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), temp, invoke);
+ break;
+ }
}
auto call_code_pointer_member = [&](MemberOffset offset) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 447c0b501f..7ae46d77e8 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -689,6 +689,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 0940f4289c..85337ed767 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3432,7 +3432,10 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke)
void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
HandleInvoke(invoke);
// Add the hidden argument.
- invoke->GetLocations()->AddTemp(LocationFrom(r12));
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ // We cannot request r12 as it's blocked by the register allocator.
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1, Location::Any());
+ }
}
void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
@@ -3468,7 +3471,6 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
LocationSummary* locations = invoke->GetLocations();
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- vixl32::Register hidden_reg = RegisterFrom(locations->GetTemp(1));
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -3509,11 +3511,10 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
// LR = temp->GetEntryPoint();
GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point);
- // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
- // instruction from clobbering it as they might use r12 as a scratch register.
- DCHECK(hidden_reg.Is(r12));
-
{
+ // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
+ // instruction from clobbering it as they might use r12 as a scratch register.
+ Location hidden_reg = Location::RegisterLocation(r12.GetCode());
// The VIXL macro assembler may clobber any of the scratch registers that are available to it,
// so it checks if the application is using them (by passing them to the macro assembler
// methods). The following application of UseScratchRegisterScope corrects VIXL's notion of
@@ -3523,8 +3524,18 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
// (to materialize the constant), since the destination register becomes available for such use
// internally for the duration of the macro instruction.
UseScratchRegisterScope temps(GetVIXLAssembler());
- temps.Exclude(hidden_reg);
- __ Mov(hidden_reg, invoke->GetMethodReference().index);
+ temps.Exclude(RegisterFrom(hidden_reg));
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ Location current_method = locations->InAt(invoke->GetNumberOfArguments() - 1);
+ if (current_method.IsStackSlot()) {
+ GetAssembler()->LoadFromOffset(
+ kLoadWord, RegisterFrom(hidden_reg), sp, current_method.GetStackIndex());
+ } else {
+ __ Mov(RegisterFrom(hidden_reg), RegisterFrom(current_method));
+ }
+ } else {
+ codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), hidden_reg, invoke);
+ }
}
{
// Ensure the pc position is recorded immediately after the `blx` instruction.
@@ -9069,20 +9080,9 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStat
return desired_dispatch_info;
}
-void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- uint32_t offset =
- GetThreadOffset<kArmPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- // temp = thread->string_init_entrypoint
- GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), tr, offset);
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+
+void CodeGeneratorARMVIXL::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
PcRelativePatchInfo* labels = NewBootImageMethodPatch(invoke->GetResolvedMethodReference());
@@ -9106,13 +9106,44 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0);
break;
}
- case MethodLoadKind::kJitDirectAddress:
- __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
+ case MethodLoadKind::kJitDirectAddress: {
+ __ Mov(RegisterFrom(temp), Operand::From(invoke->GetResolvedMethod()));
+ break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ uint32_t offset =
+ GetThreadOffset<kArmPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ // temp = thread->string_init_entrypoint
+ GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), tr, offset);
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), temp, invoke);
+ break;
+ }
}
auto call_code_pointer_member = [&](MemberOffset offset) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 0453d20cc6..12594ed7c0 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -581,6 +581,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 86e6b959ed..d05c2d95cd 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2564,6 +2564,16 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
// Add one temporary for inline cache update.
invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
}
+
+ // For PC-relative load kinds the invoke has an extra input, the PC-relative address base.
+ if (IsPcRelativeMethodLoadKind(invoke->GetHiddenArgumentLoadKind())) {
+ invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
+ }
+
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1,
+ Location::RequiresRegister());
+ }
}
void CodeGeneratorX86::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) {
@@ -2608,8 +2618,12 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// Set the hidden argument. This is safe to do this here, as XMM7
// won't be modified thereafter, before the `call` instruction.
DCHECK_EQ(XMM7, hidden_reg);
- __ movl(temp, Immediate(invoke->GetMethodReference().index));
- __ movd(hidden_reg, temp);
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ __ movd(hidden_reg, locations->InAt(invoke->GetNumberOfArguments() - 1).AsRegister<Register>());
+ } else {
+ codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), locations->GetTemp(0), invoke);
+ __ movd(hidden_reg, temp);
+ }
if (receiver.IsStackSlot()) {
__ movl(temp, Address(ESP, receiver.GetStackIndex()));
@@ -5147,6 +5161,16 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOr
return desired_dispatch_info;
}
+Register CodeGeneratorX86::GetInvokeExtraParameter(HInvoke* invoke, Register temp) {
+ if (invoke->IsInvokeStaticOrDirect()) {
+ return GetInvokeStaticOrDirectExtraParameter(invoke->AsInvokeStaticOrDirect(), temp);
+ }
+ DCHECK(invoke->IsInvokeInterface());
+ Location location =
+ invoke->GetLocations()->InAt(invoke->AsInvokeInterface()->GetSpecialInputIndex());
+ return location.AsRegister<Register>();
+}
+
Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
Register temp) {
Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
@@ -5172,53 +5196,72 @@ Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr
return location.AsRegister<Register>();
}
-void CodeGeneratorX86::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- // temp = thread->string_init_entrypoint
- uint32_t offset =
- GetThreadOffset<kX86PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- __ fs()->movl(temp.AsRegister<Register>(), Address::Absolute(offset));
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+void CodeGeneratorX86::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
- Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
- temp.AsRegister<Register>());
+ Register base_reg = GetInvokeExtraParameter(invoke, temp.AsRegister<Register>());
__ leal(temp.AsRegister<Register>(),
Address(base_reg, CodeGeneratorX86::kPlaceholder32BitOffset));
RecordBootImageMethodPatch(invoke);
break;
}
case MethodLoadKind::kBootImageRelRo: {
- Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
- temp.AsRegister<Register>());
+ size_t index = invoke->IsInvokeInterface()
+ ? invoke->AsInvokeInterface()->GetSpecialInputIndex()
+ : invoke->AsInvokeStaticOrDirect()->GetSpecialInputIndex();
+ Register base_reg = GetInvokeExtraParameter(invoke, temp.AsRegister<Register>());
__ movl(temp.AsRegister<Register>(), Address(base_reg, kPlaceholder32BitOffset));
RecordBootImageRelRoPatch(
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress(),
+ invoke->InputAt(index)->AsX86ComputeBaseMethodAddress(),
GetBootImageOffset(invoke));
break;
}
case MethodLoadKind::kBssEntry: {
- Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
- temp.AsRegister<Register>());
+ Register base_reg = GetInvokeExtraParameter(invoke, temp.AsRegister<Register>());
__ movl(temp.AsRegister<Register>(), Address(base_reg, kPlaceholder32BitOffset));
RecordMethodBssEntryPatch(invoke);
// No need for memory fence, thanks to the x86 memory model.
break;
}
- case MethodLoadKind::kJitDirectAddress:
- __ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
+ case MethodLoadKind::kJitDirectAddress: {
+ __ movl(temp.AsRegister<Register>(),
+ Immediate(reinterpret_cast32<uint32_t>(invoke->GetResolvedMethod())));
break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorX86::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ // temp = thread->string_init_entrypoint
+ uint32_t offset =
+ GetThreadOffset<kX86PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ __ fs()->movl(temp.AsRegister<Register>(), Address::Absolute(offset));
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), callee_method, invoke);
+ }
}
switch (invoke->GetCodePtrLocation()) {
@@ -5336,9 +5379,12 @@ void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* m
__ Bind(&boot_image_other_patches_.back().label);
}
-void CodeGeneratorX86::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86::RecordBootImageMethodPatch(HInvoke* invoke) {
+ size_t index = invoke->IsInvokeInterface()
+ ? invoke->AsInvokeInterface()->GetSpecialInputIndex()
+ : invoke->AsInvokeStaticOrDirect()->GetSpecialInputIndex();
HX86ComputeBaseMethodAddress* method_address =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ invoke->InputAt(index)->AsX86ComputeBaseMethodAddress();
boot_image_method_patches_.emplace_back(
method_address,
invoke->GetResolvedMethodReference().dex_file,
@@ -5346,10 +5392,13 @@ void CodeGeneratorX86::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke)
__ Bind(&boot_image_method_patches_.back().label);
}
-void CodeGeneratorX86::RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86::RecordMethodBssEntryPatch(HInvoke* invoke) {
+ size_t index = invoke->IsInvokeInterface()
+ ? invoke->AsInvokeInterface()->GetSpecialInputIndex()
+ : invoke->AsInvokeStaticOrDirect()->GetSpecialInputIndex();
DCHECK(IsSameDexFile(GetGraph()->GetDexFile(), *invoke->GetMethodReference().dex_file));
HX86ComputeBaseMethodAddress* method_address =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ invoke->InputAt(index)->AsX86ComputeBaseMethodAddress();
// Add the patch entry and bind its label at the end of the instruction.
method_bss_entry_patches_.emplace_back(
method_address,
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index b0575ba969..0368de5343 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -472,6 +472,7 @@ class CodeGeneratorX86 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
@@ -483,8 +484,8 @@ class CodeGeneratorX86 : public CodeGenerator {
uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t boot_image_offset);
- void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
- void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
+ void RecordBootImageMethodPatch(HInvoke* invoke);
+ void RecordMethodBssEntryPatch(HInvoke* invoke);
void RecordBootImageTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
@@ -697,6 +698,7 @@ class CodeGeneratorX86 : public CodeGenerator {
void EmitPcRelativeLinkerPatches(const ArenaDeque<X86PcRelativePatchInfo>& infos,
ArenaVector<linker::LinkerPatch>* linker_patches);
+ Register GetInvokeExtraParameter(HInvoke* invoke, Register temp);
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
// Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 202b58b9ee..3a39ee82b2 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1001,22 +1001,8 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStati
return desired_dispatch_info;
}
-void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- // All registers are assumed to be correctly set up.
-
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- // temp = thread->string_init_entrypoint
- uint32_t offset =
- GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+void CodeGeneratorX86_64::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
__ leal(temp.AsRegister<CpuRegister>(),
@@ -1037,13 +1023,47 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
// No need for memory fence, thanks to the x86-64 memory model.
break;
}
- case MethodLoadKind::kJitDirectAddress:
- Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress());
+ case MethodLoadKind::kJitDirectAddress: {
+ Load64BitValue(temp.AsRegister<CpuRegister>(),
+ reinterpret_cast<int64_t>(invoke->GetResolvedMethod()));
break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ // All registers are assumed to be correctly set up.
+
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ // temp = thread->string_init_entrypoint
+ uint32_t offset =
+ GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), temp, invoke);
+ break;
+ }
}
switch (invoke->GetCodePtrLocation()) {
@@ -1147,13 +1167,13 @@ void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset)
__ Bind(&boot_image_other_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86_64::RecordBootImageMethodPatch(HInvoke* invoke) {
boot_image_method_patches_.emplace_back(invoke->GetResolvedMethodReference().dex_file,
invoke->GetResolvedMethodReference().index);
__ Bind(&boot_image_method_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86_64::RecordMethodBssEntryPatch(HInvoke* invoke) {
DCHECK(IsSameDexFile(GetGraph()->GetDexFile(), *invoke->GetMethodReference().dex_file));
method_bss_entry_patches_.emplace_back(invoke->GetMethodReference().dex_file,
invoke->GetMethodReference().index);
@@ -2711,6 +2731,10 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
HandleInvoke(invoke);
// Add the hidden argument.
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1,
+ Location::RegisterLocation(RAX));
+ }
invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX));
}
@@ -2744,7 +2768,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
LocationSummary* locations = invoke->GetLocations();
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
- CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -2768,11 +2791,14 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
- // Set the hidden argument. This is safe to do this here, as RAX
- // won't be modified thereafter, before the `call` instruction.
- // We also di it after MaybeGenerateInlineCache that may use RAX.
- DCHECK_EQ(RAX, hidden_reg.AsRegister());
- codegen_->Load64BitValue(hidden_reg, invoke->GetMethodReference().index);
+ if (invoke->GetHiddenArgumentLoadKind() != MethodLoadKind::kRecursive) {
+ Location hidden_reg = locations->GetTemp(1);
+ // Set the hidden argument. This is safe to do this here, as RAX
+ // won't be modified thereafter, before the `call` instruction.
+ // We also di it after MaybeGenerateInlineCache that may use RAX.
+ DCHECK_EQ(RAX, hidden_reg.AsRegister<Register>());
+ codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), hidden_reg, invoke);
+ }
// temp = temp->GetAddressOfIMT()
__ movq(temp,
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 81988b4386..c69c80aaf5 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -452,6 +452,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
@@ -459,8 +460,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
- void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
- void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
+ void RecordBootImageMethodPatch(HInvoke* invoke);
+ void RecordMethodBssEntryPatch(HInvoke* invoke);
void RecordBootImageTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index f917500dac..0531d725e7 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1104,9 +1104,9 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
}
}
HInvokeStaticOrDirect::DispatchInfo dispatch_info =
- HSharpening::SharpenInvokeStaticOrDirect(resolved_method,
- has_method_id,
- code_generator_);
+ HSharpening::SharpenLoadMethod(resolved_method,
+ has_method_id,
+ code_generator_);
if (dispatch_info.code_ptr_location == CodePtrLocation::kCallCriticalNative) {
graph_->SetHasDirectCriticalNativeCall(true);
}
@@ -1138,6 +1138,13 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
/*vtable_index=*/ imt_or_vtable_index);
} else {
DCHECK_EQ(invoke_type, kInterface);
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
+ }
+ MethodLoadKind load_kind =
+ HSharpening::SharpenLoadMethod(resolved_method, /* has_method_id= */ true, code_generator_)
+ .method_load_kind;
invoke = new (allocator_) HInvokeInterface(allocator_,
number_of_arguments,
return_type,
@@ -1145,7 +1152,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
method_reference,
resolved_method,
resolved_method_reference,
- /*imt_index=*/ imt_or_vtable_index);
+ /*imt_index=*/ imt_or_vtable_index,
+ load_kind);
}
return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
@@ -1669,6 +1677,12 @@ bool HInstructionBuilder::SetupInvokeArguments(HInstruction* invoke,
invoke->SetRawInputAt(argument_index, graph_->GetCurrentMethod());
}
+ if (invoke->IsInvokeInterface() &&
+ (invoke->AsInvokeInterface()->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive)) {
+ invoke->SetRawInputAt(invoke->AsInvokeInterface()->GetNumberOfArguments() - 1,
+ graph_->GetCurrentMethod());
+ }
+
return true;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d616912b9f..d7d5b597c0 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -2311,7 +2311,7 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
// is unlikely that it exists. The most usual situation for such typed
// arraycopy methods is a direct pointer to the boot image.
invoke->SetDispatchInfo(
- HSharpening::SharpenInvokeStaticOrDirect(method, /* has_method_id= */ true, codegen_));
+ HSharpening::SharpenLoadMethod(method, /* has_method_id= */ true, codegen_));
}
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b674937ee3..9200689f27 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4417,6 +4417,12 @@ enum class CodePtrLocation {
kCallArtMethod,
};
+static inline bool IsPcRelativeMethodLoadKind(MethodLoadKind load_kind) {
+ return load_kind == MethodLoadKind::kBootImageLinkTimePcRelative ||
+ load_kind == MethodLoadKind::kBootImageRelRo ||
+ load_kind == MethodLoadKind::kBssEntry;
+}
+
class HInvoke : public HVariableInputSizeInstruction {
public:
bool NeedsEnvironment() const override;
@@ -4738,9 +4744,7 @@ class HInvokeStaticOrDirect final : public HInvoke {
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
bool HasPcRelativeMethodLoadKind() const {
- return GetMethodLoadKind() == MethodLoadKind::kBootImageLinkTimePcRelative ||
- GetMethodLoadKind() == MethodLoadKind::kBootImageRelRo ||
- GetMethodLoadKind() == MethodLoadKind::kBssEntry;
+ return IsPcRelativeMethodLoadKind(GetMethodLoadKind());
}
QuickEntrypointEnum GetStringInitEntryPoint() const {
@@ -4941,10 +4945,11 @@ class HInvokeInterface final : public HInvoke {
MethodReference method_reference,
ArtMethod* resolved_method,
MethodReference resolved_method_reference,
- uint32_t imt_index)
+ uint32_t imt_index,
+ MethodLoadKind load_kind)
: HInvoke(kInvokeInterface,
allocator,
- number_of_arguments,
+ number_of_arguments + (NeedsCurrentMethod(load_kind) ? 1 : 0),
0u,
return_type,
dex_pc,
@@ -4952,7 +4957,12 @@ class HInvokeInterface final : public HInvoke {
resolved_method,
resolved_method_reference,
kInterface),
- imt_index_(imt_index) {
+ imt_index_(imt_index),
+ hidden_argument_load_kind_(load_kind) {
+ }
+
+ static bool NeedsCurrentMethod(MethodLoadKind load_kind) {
+ return load_kind == MethodLoadKind::kRecursive;
}
bool IsClonable() const override { return true; }
@@ -4967,7 +4977,16 @@ class HInvokeInterface final : public HInvoke {
return true;
}
+ size_t GetSpecialInputIndex() const {
+ return GetNumberOfArguments();
+ }
+
+ void AddSpecialInput(HInstruction* input) {
+ InsertInputAt(GetSpecialInputIndex(), input);
+ }
+
uint32_t GetImtIndex() const { return imt_index_; }
+ MethodLoadKind GetHiddenArgumentLoadKind() const { return hidden_argument_load_kind_; }
DECLARE_INSTRUCTION(InvokeInterface);
@@ -4977,6 +4996,9 @@ class HInvokeInterface final : public HInvoke {
private:
// Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t imt_index_;
+
+ // How the hidden argument (the interface method) is being loaded.
+ const MethodLoadKind hidden_argument_load_kind_;
};
class HNeg final : public HUnaryOperation {
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 3ea19183ba..17f37f05c5 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -207,6 +207,15 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
base_added = true;
}
+ HInvokeInterface* invoke_interface = invoke->AsInvokeInterface();
+ if (invoke_interface != nullptr &&
+ IsPcRelativeMethodLoadKind(invoke_interface->GetHiddenArgumentLoadKind())) {
+ HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(invoke);
+ // Add the extra parameter.
+ invoke_interface->AddSpecialInput(method_address);
+ base_added = true;
+ }
+
// Ensure that we can load FP arguments from the constant area.
HInputsRef inputs = invoke->GetInputs();
for (size_t i = 0; i < inputs.size(); i++) {
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index f570c60843..3ffb24b852 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -57,7 +57,7 @@ static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions&
return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
}
-HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect(
+HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenLoadMethod(
ArtMethod* callee, bool has_method_id, CodeGenerator* codegen) {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current()); // Required for GetDeclaringClass below.
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index b48cd4b9b3..f71d9b5056 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -30,7 +30,7 @@ class DexCompilationUnit;
class HSharpening {
public:
// Used by the builder and InstructionSimplifier.
- static HInvokeStaticOrDirect::DispatchInfo SharpenInvokeStaticOrDirect(
+ static HInvokeStaticOrDirect::DispatchInfo SharpenLoadMethod(
ArtMethod* callee, bool has_method_id, CodeGenerator* codegen);
// Used by the builder and the inliner.
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index f94694d393..986f88d096 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1446,113 +1446,32 @@ END art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* r0 is the conflict ArtMethod.
- * r12 is a hidden argument that holds the target interface method's dex method index.
+ * r12 is a hidden argument that holds the target interface method.
*
* Note that this stub writes to r0, r4, and r12.
*/
.extern artLookupResolvedMethod
ENTRY art_quick_imt_conflict_trampoline
- push {r1-r2}
- .cfi_adjust_cfa_offset (2 * 4)
- .cfi_rel_offset r1, 0
- .cfi_rel_offset r2, 4
- ldr r4, [sp, #(2 * 4)] // Load referrer.
- ldr r2, [r0, #ART_METHOD_JNI_OFFSET_32] // Load ImtConflictTable
- // Load the declaring class (without read barrier) and access flags (for obsolete method check).
- // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
-#if ART_METHOD_ACCESS_FLAGS_OFFSET != ART_METHOD_DECLARING_CLASS_OFFSET + 4
-#error "Expecting declaring class and access flags to be consecutive for LDRD."
-#endif
- ldrd r0, r1, [r4, #ART_METHOD_DECLARING_CLASS_OFFSET]
- // If the method is obsolete, just go through the dex cache miss slow path.
- lsrs r1, #(ACC_OBSOLETE_METHOD_SHIFT + 1)
- bcs .Limt_conflict_trampoline_dex_cache_miss
- ldr r4, [r0, #MIRROR_CLASS_DEX_CACHE_OFFSET] // Load the DexCache (without read barrier).
- UNPOISON_HEAP_REF r4
- ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
- ldr r4, [r4, #MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET] // Load the resolved methods.
- add r4, r4, r1, lsl #(POINTER_SIZE_SHIFT + 1) // Load DexCache method slot address.
-
-// FIXME: Configure the build to use the faster code when appropriate.
-// Currently we fall back to the slower version.
-#if HAS_ATOMIC_LDRD
- ldrd r0, r1, [r4]
-#else
- push {r3}
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset r3, 0
-.Limt_conflict_trampoline_retry_load:
- ldrexd r0, r1, [r4]
- strexd r3, r0, r1, [r4]
- cmp r3, #0
- bne .Limt_conflict_trampoline_retry_load
- pop {r3}
- .cfi_adjust_cfa_offset -4
- .cfi_restore r3
-#endif
-
- ldr r4, [r2] // Load first entry in ImtConflictTable.
- cmp r1, r12 // Compare method index to see if we had a DexCache method hit.
- bne .Limt_conflict_trampoline_dex_cache_miss
+ ldr r0, [r0, #ART_METHOD_JNI_OFFSET_32] // Load ImtConflictTable
+ ldr r4, [r0] // Load first entry in ImtConflictTable.
.Limt_table_iterate:
- cmp r4, r0
+ cmp r4, r12
// Branch if found. Benchmarks have shown doing a branch here is better.
beq .Limt_table_found
// If the entry is null, the interface method is not in the ImtConflictTable.
cbz r4, .Lconflict_trampoline
// Iterate over the entries of the ImtConflictTable.
- ldr r4, [r2, #(2 * __SIZEOF_POINTER__)]!
+ ldr r4, [r0, #(2 * __SIZEOF_POINTER__)]!
b .Limt_table_iterate
.Limt_table_found:
// We successfully hit an entry in the table. Load the target method
// and jump to it.
- ldr r0, [r2, #__SIZEOF_POINTER__]
- .cfi_remember_state
- pop {r1-r2}
- .cfi_adjust_cfa_offset -(2 * 4)
- .cfi_restore r1
- .cfi_restore r2
+ ldr r0, [r0, #__SIZEOF_POINTER__]
ldr pc, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32]
- .cfi_restore_state
.Lconflict_trampoline:
- // Call the runtime stub to populate the ImtConflictTable and jump to the
- // resolved method.
- .cfi_remember_state
- pop {r1-r2}
- .cfi_adjust_cfa_offset -(2 * 4)
- .cfi_restore r1
- .cfi_restore r2
+ // Pass interface method to the trampoline.
+ mov r0, r12
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
- .cfi_restore_state
-.Limt_conflict_trampoline_dex_cache_miss:
- // We're not creating a proper runtime method frame here,
- // artLookupResolvedMethod() is not allowed to walk the stack.
-
- // Save ImtConflictTable (r2), remaining arg (r3), first entry (r4), return address (lr).
- push {r2-r4, lr}
- .cfi_adjust_cfa_offset (4 * 4)
- .cfi_rel_offset r3, 4
- .cfi_rel_offset lr, 12
- // Save FPR args.
- vpush {d0-d7}
- .cfi_adjust_cfa_offset (8 * 8)
-
- mov r0, ip // Pass method index.
- ldr r1, [sp, #(8 * 8 + 6 * 4)] // Pass referrer.
- bl artLookupResolvedMethod // (uint32_t method_index, ArtMethod* referrer)
-
- // Restore FPR args.
- vpop {d0-d7}
- .cfi_adjust_cfa_offset -(8 * 8)
- // Restore ImtConflictTable (r2), remaining arg (r3), first entry (r4), return address (lr).
- pop {r2-r4, lr}
- .cfi_adjust_cfa_offset -(4 * 4)
- .cfi_restore r3
- .cfi_restore lr
-
- cmp r0, #0 // If the method wasn't resolved,
- beq .Lconflict_trampoline // skip the lookup and go to artInvokeInterfaceTrampoline().
- b .Limt_table_iterate
END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 634c762040..9a9a565b37 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1698,96 +1698,34 @@ END art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* x0 is the conflict ArtMethod.
- * xIP1 is a hidden argument that holds the target interface method's dex method index.
+ * xIP1 is a hidden argument that holds the target interface method.
*
- * Note that this stub writes to xIP0, xIP1, x13-x15, and x0.
+ * Note that this stub writes to xIP0, xIP1, and x0.
*/
.extern artLookupResolvedMethod
ENTRY art_quick_imt_conflict_trampoline
- ldr xIP0, [sp, #0] // Load referrer
- // Load the declaring class (without read barrier) and access flags (for obsolete method check).
- // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
-#if ART_METHOD_ACCESS_FLAGS_OFFSET != ART_METHOD_DECLARING_CLASS_OFFSET + 4
-#error "Expecting declaring class and access flags to be consecutive for LDP."
-#endif
- ldp wIP0, w15, [xIP0, #ART_METHOD_DECLARING_CLASS_OFFSET]
- // If the method is obsolete, just go through the dex cache miss slow path.
- tbnz x15, #ACC_OBSOLETE_METHOD_SHIFT, .Limt_conflict_trampoline_dex_cache_miss
- ldr wIP0, [xIP0, #MIRROR_CLASS_DEX_CACHE_OFFSET] // Load the DexCache (without read barrier).
- UNPOISON_HEAP_REF wIP0
- ubfx x15, xIP1, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
- ldr xIP0, [xIP0, #MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET] // Load the resolved methods.
- add xIP0, xIP0, x15, lsl #(POINTER_SIZE_SHIFT + 1) // Load DexCache method slot address.
-
- // Relaxed atomic load x14:x15 from the dex cache slot.
-.Limt_conflict_trampoline_retry_load:
- ldxp x14, x15, [xIP0]
- stxp w13, x14, x15, [xIP0]
- cbnz w13, .Limt_conflict_trampoline_retry_load
-
- cmp x15, xIP1 // Compare method index to see if we had a DexCache method hit.
- bne .Limt_conflict_trampoline_dex_cache_miss
-.Limt_conflict_trampoline_have_interface_method:
- ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64] // Load ImtConflictTable
- ldr x0, [xIP1] // Load first entry in ImtConflictTable.
+ ldr xIP0, [x0, #ART_METHOD_JNI_OFFSET_64] // Load ImtConflictTable
+ ldr x0, [xIP0] // Load first entry in ImtConflictTable.
.Limt_table_iterate:
- cmp x0, x14
+ cmp x0, xIP1
// Branch if found. Benchmarks have shown doing a branch here is better.
beq .Limt_table_found
// If the entry is null, the interface method is not in the ImtConflictTable.
cbz x0, .Lconflict_trampoline
// Iterate over the entries of the ImtConflictTable.
- ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]!
+ ldr x0, [xIP0, #(2 * __SIZEOF_POINTER__)]!
b .Limt_table_iterate
.Limt_table_found:
// We successfully hit an entry in the table. Load the target method
// and jump to it.
- ldr x0, [xIP1, #__SIZEOF_POINTER__]
+ ldr x0, [xIP0, #__SIZEOF_POINTER__]
ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
br xIP0
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
- mov x0, x14 // Load interface method
+ mov x0, xIP1 // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
-.Limt_conflict_trampoline_dex_cache_miss:
- // We're not creating a proper runtime method frame here,
- // artLookupResolvedMethod() is not allowed to walk the stack.
-
- // Save GPR args and return address, allocate space for FPR args, align stack.
- SAVE_TWO_REGS_INCREASE_FRAME x0, x1, (8 * 8 + 8 * 8 + 8 + 8)
- SAVE_TWO_REGS x2, x3, 16
- SAVE_TWO_REGS x4, x5, 32
- SAVE_TWO_REGS x6, x7, 48
- SAVE_REG xLR, (8 * 8 + 8 * 8 + 8)
-
- // Save FPR args.
- stp d0, d1, [sp, #64]
- stp d2, d3, [sp, #80]
- stp d4, d5, [sp, #96]
- stp d6, d7, [sp, #112]
-
- mov x0, xIP1 // Pass method index.
- ldr x1, [sp, #(8 * 8 + 8 * 8 + 8 + 8)] // Pass referrer.
- bl artLookupResolvedMethod // (uint32_t method_index, ArtMethod* referrer)
- mov x14, x0 // Move the interface method to x14 where the loop above expects it.
-
- // Restore FPR args.
- ldp d0, d1, [sp, #64]
- ldp d2, d3, [sp, #80]
- ldp d4, d5, [sp, #96]
- ldp d6, d7, [sp, #112]
-
- // Restore GPR args and return address.
- RESTORE_REG xLR, (8 * 8 + 8 * 8 + 8)
- RESTORE_TWO_REGS x2, x3, 16
- RESTORE_TWO_REGS x4, x5, 32
- RESTORE_TWO_REGS x6, x7, 48
- RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, (8 * 8 + 8 * 8 + 8 + 8)
-
- // If the method wasn't resolved, skip the lookup and go to artInvokeInterfaceTrampoline().
- cbz x14, .Lconflict_trampoline
- b .Limt_conflict_trampoline_have_interface_method
END art_quick_imt_conflict_trampoline
ENTRY art_quick_resolution_trampoline
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index b4155e0d8c..bba283a59f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1712,104 +1712,39 @@ END_FUNCTION art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* eax is the conflict ArtMethod.
- * xmm7 is a hidden argument that holds the target interface method's dex method index.
+ * xmm7 is a hidden argument that holds the target interface method.
*
* Note that this stub writes to eax.
- * Because of lack of free registers, it also saves and restores edi.
+ * Because of lack of free registers, it also saves and restores esi.
*/
DEFINE_FUNCTION art_quick_imt_conflict_trampoline
- PUSH EDI
PUSH ESI
- PUSH EDX
- movl 16(%esp), %edi // Load referrer.
movd %xmm7, %esi // Get target method index stored in xmm7, remember it in ESI.
- // If the method is obsolete, just go through the dex cache miss slow path.
- // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
- testl LITERAL(ACC_OBSOLETE_METHOD), ART_METHOD_ACCESS_FLAGS_OFFSET(%edi)
- jnz .Limt_conflict_trampoline_dex_cache_miss
- movl ART_METHOD_DECLARING_CLASS_OFFSET(%edi), %edi // Load declaring class (no read barrier).
- movl MIRROR_CLASS_DEX_CACHE_OFFSET(%edi), %edi // Load the DexCache (without read barrier).
- UNPOISON_HEAP_REF edi
- movl MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET(%edi), %edi // Load the resolved methods.
- pushl ART_METHOD_JNI_OFFSET_32(%eax) // Push ImtConflictTable.
- CFI_ADJUST_CFA_OFFSET(4)
- movl %esi, %eax // Copy the method index from ESI.
- andl LITERAL(METHOD_DEX_CACHE_SIZE_MINUS_ONE), %eax // Calculate DexCache method slot index.
- leal 0(%edi, %eax, 2 * __SIZEOF_POINTER__), %edi // Load DexCache method slot address.
- mov %ecx, %edx // Make EDX:EAX == ECX:EBX so that LOCK CMPXCHG8B makes no changes.
- mov %ebx, %eax // (The actual value does not matter.)
- lock cmpxchg8b (%edi) // Relaxed atomic load EDX:EAX from the dex cache slot.
- popl %edi // Pop ImtConflictTable.
- CFI_ADJUST_CFA_OFFSET(-4)
- cmp %edx, %esi // Compare method index to see if we had a DexCache method hit.
- jne .Limt_conflict_trampoline_dex_cache_miss
+ movl ART_METHOD_JNI_OFFSET_32(%eax), %eax // Load ImtConflictTable.
.Limt_table_iterate:
- cmpl %eax, 0(%edi)
+ cmpl %esi, 0(%eax)
jne .Limt_table_next_entry
// We successfully hit an entry in the table. Load the target method
// and jump to it.
- movl __SIZEOF_POINTER__(%edi), %eax
+ movl __SIZEOF_POINTER__(%eax), %eax
CFI_REMEMBER_STATE
- POP EDX
POP ESI
- POP EDI
jmp *ART_METHOD_QUICK_CODE_OFFSET_32(%eax)
- CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
+ CFI_RESTORE_STATE_AND_DEF_CFA(esp, 8)
.Limt_table_next_entry:
// If the entry is null, the interface method is not in the ImtConflictTable.
- cmpl LITERAL(0), 0(%edi)
+ cmpl LITERAL(0), 0(%eax)
jz .Lconflict_trampoline
// Iterate over the entries of the ImtConflictTable.
- addl LITERAL(2 * __SIZEOF_POINTER__), %edi
+ addl LITERAL(2 * __SIZEOF_POINTER__), %eax
jmp .Limt_table_iterate
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
- CFI_REMEMBER_STATE
- POP EDX
+ // Pass the interface method in first argument.
+ movl %esi, %eax
POP ESI
- POP EDI
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
- CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
-.Limt_conflict_trampoline_dex_cache_miss:
- // We're not creating a proper runtime method frame here,
- // artLookupResolvedMethod() is not allowed to walk the stack.
-
- // Save core register args; EDX is already saved.
- PUSH ebx
- PUSH ecx
-
- // Save FPR args.
- subl MACRO_LITERAL(32), %esp
- CFI_ADJUST_CFA_OFFSET(32)
- movsd %xmm0, 0(%esp)
- movsd %xmm1, 8(%esp)
- movsd %xmm2, 16(%esp)
- movsd %xmm3, 24(%esp)
-
- pushl 32+8+16(%esp) // Pass referrer.
- CFI_ADJUST_CFA_OFFSET(4)
- pushl %esi // Pass method index.
- CFI_ADJUST_CFA_OFFSET(4)
- call SYMBOL(artLookupResolvedMethod) // (uint32_t method_index, ArtMethod* referrer)
- addl LITERAL(8), %esp // Pop arguments.
- CFI_ADJUST_CFA_OFFSET(-8)
-
- // Restore FPR args.
- movsd 0(%esp), %xmm0
- movsd 8(%esp), %xmm1
- movsd 16(%esp), %xmm2
- movsd 24(%esp), %xmm3
- addl MACRO_LITERAL(32), %esp
- CFI_ADJUST_CFA_OFFSET(-32)
-
- // Restore core register args.
- POP ecx
- POP ebx
-
- cmp LITERAL(0), %eax // If the method wasn't resolved,
- je .Lconflict_trampoline // skip the lookup and go to artInvokeInterfaceTrampoline().
- jmp .Limt_table_iterate
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index e25045d1bf..f5324c51ad 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1411,45 +1411,23 @@ END_FUNCTION art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* rdi is the conflict ArtMethod.
- * rax is a hidden argument that holds the target interface method's dex method index.
+ * rax is a hidden argument that holds the target interface method.
*
- * Note that this stub writes to r10, r11, rax and rdi.
+ * Note that this stub writes to rdi.
*/
DEFINE_FUNCTION art_quick_imt_conflict_trampoline
#if defined(__APPLE__)
int3
int3
#else
- movq __SIZEOF_POINTER__(%rsp), %r10 // Load referrer.
- mov %eax, %r11d // Remember method index in R11.
- PUSH rdx // Preserve RDX as we need to clobber it by LOCK CMPXCHG16B.
- // If the method is obsolete, just go through the dex cache miss slow path.
- // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
- testl LITERAL(ACC_OBSOLETE_METHOD), ART_METHOD_ACCESS_FLAGS_OFFSET(%r10)
- jnz .Limt_conflict_trampoline_dex_cache_miss
- movl ART_METHOD_DECLARING_CLASS_OFFSET(%r10), %r10d // Load declaring class (no read barrier).
- movl MIRROR_CLASS_DEX_CACHE_OFFSET(%r10), %r10d // Load the DexCache (without read barrier).
- UNPOISON_HEAP_REF r10d
- movq MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET(%r10), %r10 // Load the resolved methods.
- andl LITERAL(METHOD_DEX_CACHE_SIZE_MINUS_ONE), %eax // Calculate DexCache method slot index.
- shll LITERAL(1), %eax // Multiply by 2 as entries have size 2 * __SIZEOF_POINTER__.
- leaq 0(%r10, %rax, __SIZEOF_POINTER__), %r10 // Load DexCache method slot address.
- mov %rcx, %rdx // Make RDX:RAX == RCX:RBX so that LOCK CMPXCHG16B makes no changes.
- mov %rbx, %rax // (The actual value does not matter.)
- lock cmpxchg16b (%r10) // Relaxed atomic load RDX:RAX from the dex cache slot.
movq ART_METHOD_JNI_OFFSET_64(%rdi), %rdi // Load ImtConflictTable
- cmp %rdx, %r11 // Compare method index to see if we had a DexCache method hit.
- jne .Limt_conflict_trampoline_dex_cache_miss
.Limt_table_iterate:
cmpq %rax, 0(%rdi)
jne .Limt_table_next_entry
// We successfully hit an entry in the table. Load the target method
// and jump to it.
movq __SIZEOF_POINTER__(%rdi), %rdi
- CFI_REMEMBER_STATE
- POP rdx
jmp *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi)
- CFI_RESTORE_STATE_AND_DEF_CFA(rsp, 16)
.Limt_table_next_entry:
// If the entry is null, the interface method is not in the ImtConflictTable.
cmpq LITERAL(0), 0(%rdi)
@@ -1460,66 +1438,8 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
- CFI_REMEMBER_STATE
- POP rdx
movq %rax, %rdi // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
- CFI_RESTORE_STATE_AND_DEF_CFA(rsp, 16)
-.Limt_conflict_trampoline_dex_cache_miss:
- // We're not creating a proper runtime method frame here,
- // artLookupResolvedMethod() is not allowed to walk the stack.
-
- // Save GPR args and ImtConflictTable; RDX is already saved.
- PUSH r9 // Quick arg 5.
- PUSH r8 // Quick arg 4.
- PUSH rsi // Quick arg 1.
- PUSH rcx // Quick arg 3.
- PUSH rdi // ImtConflictTable
- // Save FPR args and callee-saves, align stack to 16B.
- subq MACRO_LITERAL(12 * 8 + 8), %rsp
- CFI_ADJUST_CFA_OFFSET(12 * 8 + 8)
- movq %xmm0, 0(%rsp)
- movq %xmm1, 8(%rsp)
- movq %xmm2, 16(%rsp)
- movq %xmm3, 24(%rsp)
- movq %xmm4, 32(%rsp)
- movq %xmm5, 40(%rsp)
- movq %xmm6, 48(%rsp)
- movq %xmm7, 56(%rsp)
- movq %xmm12, 64(%rsp) // XMM12-15 are callee-save in ART compiled code ABI
- movq %xmm13, 72(%rsp) // but caller-save in native ABI.
- movq %xmm14, 80(%rsp)
- movq %xmm15, 88(%rsp)
-
- movq %r11, %rdi // Pass method index.
- movq 12 * 8 + 8 + 6 * 8 + 8(%rsp), %rsi // Pass referrer.
- call SYMBOL(artLookupResolvedMethod) // (uint32_t method_index, ArtMethod* referrer)
-
- // Restore FPRs.
- movq 0(%rsp), %xmm0
- movq 8(%rsp), %xmm1
- movq 16(%rsp), %xmm2
- movq 24(%rsp), %xmm3
- movq 32(%rsp), %xmm4
- movq 40(%rsp), %xmm5
- movq 48(%rsp), %xmm6
- movq 56(%rsp), %xmm7
- movq 64(%rsp), %xmm12
- movq 72(%rsp), %xmm13
- movq 80(%rsp), %xmm14
- movq 88(%rsp), %xmm15
- addq MACRO_LITERAL(12 * 8 + 8), %rsp
- CFI_ADJUST_CFA_OFFSET(-(12 * 8 + 8))
- // Restore ImtConflictTable and GPR args.
- POP rdi
- POP rcx
- POP rsi
- POP r8
- POP r9
-
- cmp LITERAL(0), %rax // If the method wasn't resolved,
- je .Lconflict_trampoline // skip the lookup and go to artInvokeInterfaceTrampoline().
- jmp .Limt_table_iterate
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 39987c1bad..3f89fe1d99 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2402,7 +2402,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
ArtMethod* method = nullptr;
ImTable* imt = cls->GetImt(kRuntimePointerSize);
- if (UNLIKELY(interface_method == nullptr)) {
+ if (UNLIKELY(interface_method == nullptr) || interface_method->IsRuntimeMethod()) {
// The interface method is unresolved, so resolve it in the dex file of the caller.
// Fetch the dex_method_idx of the target interface method from the caller.
uint32_t dex_method_idx;
@@ -2438,6 +2438,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
CHECK(self->IsExceptionPending());
return GetTwoWordFailureValue(); // Failure.
}
+ MaybeUpdateBssMethodEntry(interface_method, MethodReference(&dex_file, dex_method_idx));
}
// The compiler and interpreter make sure the conflict trampoline is never
diff --git a/runtime/interpreter/mterp/arm64ng/main.S b/runtime/interpreter/mterp/arm64ng/main.S
index 4949441fb3..2dbfe58322 100644
--- a/runtime/interpreter/mterp/arm64ng/main.S
+++ b/runtime/interpreter/mterp/arm64ng/main.S
@@ -903,8 +903,10 @@ END \name
bl art_quick_invoke_custom
.else
.if \is_interface
- // Setup hidden argument
- FETCH wip2, 1
+ // Setup hidden argument. As we don't have access to the interface method,
+ // just pass the method from the IMT. If the method is the conflict trampoline,
+ // this will make the stub go to runtime, otherwise the hidden argument is unused.
+ mov ip2, x0
.endif
ldr lr, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
blr lr
@@ -1152,8 +1154,10 @@ END \name
bl art_quick_invoke_custom
.else
.if \is_interface
- // Setup hidden argument
- FETCH wip2, 1
+ // Setup hidden argument. As we don't have access to the interface method,
+ // just pass the method from the IMT. If the method is the conflict trampoline,
+ // this will make the stub go to runtime, otherwise the hidden argument is unused.
+ mov ip2, x0
.endif
ldr lr, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
blr lr
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
index 84d3de72d5..da2b3e2479 100644
--- a/runtime/interpreter/mterp/nterp.cc
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -286,6 +286,8 @@ extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_
} else {
DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
UpdateCache(self, dex_pc_ptr, resolved_method->GetImtIndex());
+ // TODO: We should pass the resolved method, and have nterp fetch the IMT
+ // index. Unfortunately, this doesn't work for default methods.
return resolved_method->GetImtIndex();
}
} else if (resolved_method->GetDeclaringClass()->IsStringClass()
diff --git a/runtime/interpreter/mterp/x86_64ng/main.S b/runtime/interpreter/mterp/x86_64ng/main.S
index 3ed5fff08f..202569621b 100644
--- a/runtime/interpreter/mterp/x86_64ng/main.S
+++ b/runtime/interpreter/mterp/x86_64ng/main.S
@@ -1130,7 +1130,10 @@ END_FUNCTION \name
call SYMBOL(art_quick_invoke_custom)
.else
.if \is_interface
- movzwl 2(rPC), %eax
+ // Setup hidden argument. As we don't have access to the interface method,
+ // just pass the method from the IMT. If the method is the conflict trampoline,
+ // this will make the stub go to runtime, otherwise the hidden argument is unused.
+ movq %rdi, %rax
.endif
call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
.endif
@@ -1229,7 +1232,10 @@ END_FUNCTION \name
call SYMBOL(art_quick_invoke_custom)
.else
.if \is_interface
- movzwl 2(rPC), %eax
+ // Setup hidden argument. As we don't have access to the interface method,
+ // just pass the method from the IMT. If the method is the conflict trampoline,
+ // this will make the stub go to runtime, otherwise the hidden argument is unused.
+ movq %rdi, %rax
.endif
call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
.endif
diff --git a/runtime/oat.h b/runtime/oat.h
index 558c1e597b..0fbb09a652 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } };
- // Last oat version changed reason: Use .bss for ResoveTypeAndVerifyAccess.
- static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '8', '6', '\0' } };
+ // Last oat version changed reason: Changes of ABI for conflict trampoline.
+ static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '8', '7', '\0' } };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDebuggableKey = "debuggable";
diff --git a/test/812-recursive-default/expected.txt b/test/812-recursive-default/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/812-recursive-default/expected.txt
diff --git a/test/812-recursive-default/info.txt b/test/812-recursive-default/info.txt
new file mode 100644
index 0000000000..0fe9de7700
--- /dev/null
+++ b/test/812-recursive-default/info.txt
@@ -0,0 +1,2 @@
+Test that the compiler can handle recursive calls in a
+default interface method.
diff --git a/test/812-recursive-default/src/Main.java b/test/812-recursive-default/src/Main.java
new file mode 100644
index 0000000000..4a6b886c8b
--- /dev/null
+++ b/test/812-recursive-default/src/Main.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface Itf {
+ public default int fib(int a) {
+ if (a <= 1) return a;
+ return fib(a - 1) + fib(a - 2);
+ }
+}
+
+public class Main implements Itf {
+
+ public static void main(String[] args) {
+ int result = new Main().fib(2);
+ if (result != 1) {
+ throw new Error("Expected 1, got " + result);
+ }
+ }
+
+ public int fib(int a) {
+ return Itf.super.fib(a);
+ }
+}