summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc3
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc70
-rw-r--r--compiler/optimizing/code_generator_arm64.h1
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc79
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h1
-rw-r--r--compiler/optimizing/code_generator_x86.cc107
-rw-r--r--compiler/optimizing/code_generator_x86.h6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc78
-rw-r--r--compiler/optimizing/code_generator_x86_64.h5
-rw-r--r--compiler/optimizing/instruction_builder.cc22
-rw-r--r--compiler/optimizing/instruction_simplifier.cc2
-rw-r--r--compiler/optimizing/nodes.h34
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc9
-rw-r--r--compiler/optimizing/sharpening.cc2
-rw-r--r--compiler/optimizing/sharpening.h2
16 files changed, 306 insertions, 117 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9ff1f73de4..c2ae0e0632 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -923,8 +923,7 @@ uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_S
return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
}
-uint32_t CodeGenerator::GetBootImageOffset(HInvokeStaticOrDirect* invoke) {
- DCHECK_EQ(invoke->GetMethodLoadKind(), MethodLoadKind::kBootImageRelRo);
+uint32_t CodeGenerator::GetBootImageOffset(HInvoke* invoke) {
ArtMethod* method = invoke->GetResolvedMethod();
DCHECK(method != nullptr);
return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 1a01be9708..22804a992f 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -632,7 +632,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
uint32_t GetBootImageOffset(HLoadClass* load_class);
uint32_t GetBootImageOffset(HLoadString* load_string);
- uint32_t GetBootImageOffset(HInvokeStaticOrDirect* invoke);
+ uint32_t GetBootImageOffset(HInvoke* invoke);
static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 36040ca690..007aa43363 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4351,6 +4351,10 @@ void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
HandleInvoke(invoke);
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ // We cannot request ip1 as it's blocked by the register allocator.
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1, Location::Any());
+ }
}
void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
@@ -4421,7 +4425,17 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope scratch_scope(masm);
scratch_scope.Exclude(ip1);
- __ Mov(ip1, invoke->GetMethodReference().index);
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ Location interface_method = locations->InAt(invoke->GetNumberOfArguments() - 1);
+ if (interface_method.IsStackSlot()) {
+ __ Ldr(ip1, StackOperandFrom(receiver));
+ } else {
+ __ Mov(ip1, XRegisterFrom(interface_method));
+ }
+ } else {
+ codegen_->LoadMethod(
+ invoke->GetHiddenArgumentLoadKind(), Location::RegisterLocation(ip1.GetCode()), invoke);
+ }
__ Ldr(temp,
MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
@@ -4489,21 +4503,8 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStatic
return desired_dispatch_info;
}
-void CodeGeneratorARM64::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- uint32_t offset =
- GetThreadOffset<kArm64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- // temp = thread->string_init_entrypoint
- __ Ldr(XRegisterFrom(temp), MemOperand(tr, offset));
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+void CodeGeneratorARM64::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
// Add ADRP with its PC-relative method patch.
@@ -4538,14 +4539,47 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(
EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
}
- case MethodLoadKind::kJitDirectAddress:
+ case MethodLoadKind::kJitDirectAddress: {
// Load method address from literal pool.
- __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
+ __ Ldr(XRegisterFrom(temp),
+ DeduplicateUint64Literal(reinterpret_cast<uint64_t>(invoke->GetResolvedMethod())));
+ break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorARM64::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ uint32_t offset =
+ GetThreadOffset<kArm64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ // temp = thread->string_init_entrypoint
+ __ Ldr(XRegisterFrom(temp), MemOperand(tr, offset));
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), temp, invoke);
+ break;
+ }
}
auto call_code_pointer_member = [&](MemberOffset offset) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 447c0b501f..7ae46d77e8 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -689,6 +689,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 0940f4289c..85337ed767 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3432,7 +3432,10 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke)
void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
HandleInvoke(invoke);
// Add the hidden argument.
- invoke->GetLocations()->AddTemp(LocationFrom(r12));
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ // We cannot request r12 as it's blocked by the register allocator.
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1, Location::Any());
+ }
}
void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
@@ -3468,7 +3471,6 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
LocationSummary* locations = invoke->GetLocations();
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- vixl32::Register hidden_reg = RegisterFrom(locations->GetTemp(1));
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -3509,11 +3511,10 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
// LR = temp->GetEntryPoint();
GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point);
- // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
- // instruction from clobbering it as they might use r12 as a scratch register.
- DCHECK(hidden_reg.Is(r12));
-
{
+ // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
+ // instruction from clobbering it as they might use r12 as a scratch register.
+ Location hidden_reg = Location::RegisterLocation(r12.GetCode());
// The VIXL macro assembler may clobber any of the scratch registers that are available to it,
// so it checks if the application is using them (by passing them to the macro assembler
// methods). The following application of UseScratchRegisterScope corrects VIXL's notion of
@@ -3523,8 +3524,18 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
// (to materialize the constant), since the destination register becomes available for such use
// internally for the duration of the macro instruction.
UseScratchRegisterScope temps(GetVIXLAssembler());
- temps.Exclude(hidden_reg);
- __ Mov(hidden_reg, invoke->GetMethodReference().index);
+ temps.Exclude(RegisterFrom(hidden_reg));
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ Location current_method = locations->InAt(invoke->GetNumberOfArguments() - 1);
+ if (current_method.IsStackSlot()) {
+ GetAssembler()->LoadFromOffset(
+ kLoadWord, RegisterFrom(hidden_reg), sp, current_method.GetStackIndex());
+ } else {
+ __ Mov(RegisterFrom(hidden_reg), RegisterFrom(current_method));
+ }
+ } else {
+ codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), hidden_reg, invoke);
+ }
}
{
// Ensure the pc position is recorded immediately after the `blx` instruction.
@@ -9069,20 +9080,9 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStat
return desired_dispatch_info;
}
-void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- uint32_t offset =
- GetThreadOffset<kArmPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- // temp = thread->string_init_entrypoint
- GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), tr, offset);
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+
+void CodeGeneratorARMVIXL::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
PcRelativePatchInfo* labels = NewBootImageMethodPatch(invoke->GetResolvedMethodReference());
@@ -9106,13 +9106,44 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0);
break;
}
- case MethodLoadKind::kJitDirectAddress:
- __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
+ case MethodLoadKind::kJitDirectAddress: {
+ __ Mov(RegisterFrom(temp), Operand::From(invoke->GetResolvedMethod()));
+ break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ uint32_t offset =
+ GetThreadOffset<kArmPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ // temp = thread->string_init_entrypoint
+ GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), tr, offset);
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), temp, invoke);
+ break;
+ }
}
auto call_code_pointer_member = [&](MemberOffset offset) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 0453d20cc6..12594ed7c0 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -581,6 +581,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 86e6b959ed..d05c2d95cd 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2564,6 +2564,16 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
// Add one temporary for inline cache update.
invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
}
+
+ // For PC-relative load kinds the invoke has an extra input, the PC-relative address base.
+ if (IsPcRelativeMethodLoadKind(invoke->GetHiddenArgumentLoadKind())) {
+ invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
+ }
+
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1,
+ Location::RequiresRegister());
+ }
}
void CodeGeneratorX86::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) {
@@ -2608,8 +2618,12 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// Set the hidden argument. This is safe to do this here, as XMM7
// won't be modified thereafter, before the `call` instruction.
DCHECK_EQ(XMM7, hidden_reg);
- __ movl(temp, Immediate(invoke->GetMethodReference().index));
- __ movd(hidden_reg, temp);
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ __ movd(hidden_reg, locations->InAt(invoke->GetNumberOfArguments() - 1).AsRegister<Register>());
+ } else {
+ codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), locations->GetTemp(0), invoke);
+ __ movd(hidden_reg, temp);
+ }
if (receiver.IsStackSlot()) {
__ movl(temp, Address(ESP, receiver.GetStackIndex()));
@@ -5147,6 +5161,16 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOr
return desired_dispatch_info;
}
+Register CodeGeneratorX86::GetInvokeExtraParameter(HInvoke* invoke, Register temp) {
+ if (invoke->IsInvokeStaticOrDirect()) {
+ return GetInvokeStaticOrDirectExtraParameter(invoke->AsInvokeStaticOrDirect(), temp);
+ }
+ DCHECK(invoke->IsInvokeInterface());
+ Location location =
+ invoke->GetLocations()->InAt(invoke->AsInvokeInterface()->GetSpecialInputIndex());
+ return location.AsRegister<Register>();
+}
+
Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
Register temp) {
Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
@@ -5172,53 +5196,72 @@ Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr
return location.AsRegister<Register>();
}
-void CodeGeneratorX86::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- // temp = thread->string_init_entrypoint
- uint32_t offset =
- GetThreadOffset<kX86PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- __ fs()->movl(temp.AsRegister<Register>(), Address::Absolute(offset));
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+void CodeGeneratorX86::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
- Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
- temp.AsRegister<Register>());
+ Register base_reg = GetInvokeExtraParameter(invoke, temp.AsRegister<Register>());
__ leal(temp.AsRegister<Register>(),
Address(base_reg, CodeGeneratorX86::kPlaceholder32BitOffset));
RecordBootImageMethodPatch(invoke);
break;
}
case MethodLoadKind::kBootImageRelRo: {
- Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
- temp.AsRegister<Register>());
+ size_t index = invoke->IsInvokeInterface()
+ ? invoke->AsInvokeInterface()->GetSpecialInputIndex()
+ : invoke->AsInvokeStaticOrDirect()->GetSpecialInputIndex();
+ Register base_reg = GetInvokeExtraParameter(invoke, temp.AsRegister<Register>());
__ movl(temp.AsRegister<Register>(), Address(base_reg, kPlaceholder32BitOffset));
RecordBootImageRelRoPatch(
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress(),
+ invoke->InputAt(index)->AsX86ComputeBaseMethodAddress(),
GetBootImageOffset(invoke));
break;
}
case MethodLoadKind::kBssEntry: {
- Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
- temp.AsRegister<Register>());
+ Register base_reg = GetInvokeExtraParameter(invoke, temp.AsRegister<Register>());
__ movl(temp.AsRegister<Register>(), Address(base_reg, kPlaceholder32BitOffset));
RecordMethodBssEntryPatch(invoke);
// No need for memory fence, thanks to the x86 memory model.
break;
}
- case MethodLoadKind::kJitDirectAddress:
- __ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
+ case MethodLoadKind::kJitDirectAddress: {
+ __ movl(temp.AsRegister<Register>(),
+ Immediate(reinterpret_cast32<uint32_t>(invoke->GetResolvedMethod())));
break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorX86::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ // temp = thread->string_init_entrypoint
+ uint32_t offset =
+ GetThreadOffset<kX86PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ __ fs()->movl(temp.AsRegister<Register>(), Address::Absolute(offset));
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), callee_method, invoke);
+ }
}
switch (invoke->GetCodePtrLocation()) {
@@ -5336,9 +5379,12 @@ void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* m
__ Bind(&boot_image_other_patches_.back().label);
}
-void CodeGeneratorX86::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86::RecordBootImageMethodPatch(HInvoke* invoke) {
+ size_t index = invoke->IsInvokeInterface()
+ ? invoke->AsInvokeInterface()->GetSpecialInputIndex()
+ : invoke->AsInvokeStaticOrDirect()->GetSpecialInputIndex();
HX86ComputeBaseMethodAddress* method_address =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ invoke->InputAt(index)->AsX86ComputeBaseMethodAddress();
boot_image_method_patches_.emplace_back(
method_address,
invoke->GetResolvedMethodReference().dex_file,
@@ -5346,10 +5392,13 @@ void CodeGeneratorX86::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke)
__ Bind(&boot_image_method_patches_.back().label);
}
-void CodeGeneratorX86::RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86::RecordMethodBssEntryPatch(HInvoke* invoke) {
+ size_t index = invoke->IsInvokeInterface()
+ ? invoke->AsInvokeInterface()->GetSpecialInputIndex()
+ : invoke->AsInvokeStaticOrDirect()->GetSpecialInputIndex();
DCHECK(IsSameDexFile(GetGraph()->GetDexFile(), *invoke->GetMethodReference().dex_file));
HX86ComputeBaseMethodAddress* method_address =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ invoke->InputAt(index)->AsX86ComputeBaseMethodAddress();
// Add the patch entry and bind its label at the end of the instruction.
method_bss_entry_patches_.emplace_back(
method_address,
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index b0575ba969..0368de5343 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -472,6 +472,7 @@ class CodeGeneratorX86 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
@@ -483,8 +484,8 @@ class CodeGeneratorX86 : public CodeGenerator {
uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t boot_image_offset);
- void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
- void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
+ void RecordBootImageMethodPatch(HInvoke* invoke);
+ void RecordMethodBssEntryPatch(HInvoke* invoke);
void RecordBootImageTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
@@ -697,6 +698,7 @@ class CodeGeneratorX86 : public CodeGenerator {
void EmitPcRelativeLinkerPatches(const ArenaDeque<X86PcRelativePatchInfo>& infos,
ArenaVector<linker::LinkerPatch>* linker_patches);
+ Register GetInvokeExtraParameter(HInvoke* invoke, Register temp);
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
// Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 202b58b9ee..3a39ee82b2 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1001,22 +1001,8 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStati
return desired_dispatch_info;
}
-void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- // All registers are assumed to be correctly set up.
-
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
- case MethodLoadKind::kStringInit: {
- // temp = thread->string_init_entrypoint
- uint32_t offset =
- GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
- break;
- }
- case MethodLoadKind::kRecursive:
- callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
- break;
+void CodeGeneratorX86_64::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
+ switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
__ leal(temp.AsRegister<CpuRegister>(),
@@ -1037,13 +1023,47 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
// No need for memory fence, thanks to the x86-64 memory model.
break;
}
- case MethodLoadKind::kJitDirectAddress:
- Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress());
+ case MethodLoadKind::kJitDirectAddress: {
+ Load64BitValue(temp.AsRegister<CpuRegister>(),
+ reinterpret_cast<int64_t>(invoke->GetResolvedMethod()));
break;
+ }
+ case MethodLoadKind::kRuntimeCall: {
+ // Test situation, don't do anything.
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Load kind should have already been handled " << load_kind;
+ UNREACHABLE();
+ }
+ }
+}
+
+void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ // All registers are assumed to be correctly set up.
+
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case MethodLoadKind::kStringInit: {
+ // temp = thread->string_init_entrypoint
+ uint32_t offset =
+ GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
+ break;
+ }
+ case MethodLoadKind::kRecursive: {
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodIndex());
+ break;
+ }
case MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ default: {
+ LoadMethod(invoke->GetMethodLoadKind(), temp, invoke);
+ break;
+ }
}
switch (invoke->GetCodePtrLocation()) {
@@ -1147,13 +1167,13 @@ void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset)
__ Bind(&boot_image_other_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86_64::RecordBootImageMethodPatch(HInvoke* invoke) {
boot_image_method_patches_.emplace_back(invoke->GetResolvedMethodReference().dex_file,
invoke->GetResolvedMethodReference().index);
__ Bind(&boot_image_method_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke) {
+void CodeGeneratorX86_64::RecordMethodBssEntryPatch(HInvoke* invoke) {
DCHECK(IsSameDexFile(GetGraph()->GetDexFile(), *invoke->GetMethodReference().dex_file));
method_bss_entry_patches_.emplace_back(invoke->GetMethodReference().dex_file,
invoke->GetMethodReference().index);
@@ -2711,6 +2731,10 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
HandleInvoke(invoke);
// Add the hidden argument.
+ if (invoke->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive) {
+ invoke->GetLocations()->SetInAt(invoke->GetNumberOfArguments() - 1,
+ Location::RegisterLocation(RAX));
+ }
invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX));
}
@@ -2744,7 +2768,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
LocationSummary* locations = invoke->GetLocations();
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
- CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -2768,11 +2791,14 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
- // Set the hidden argument. This is safe to do this here, as RAX
- // won't be modified thereafter, before the `call` instruction.
- // We also di it after MaybeGenerateInlineCache that may use RAX.
- DCHECK_EQ(RAX, hidden_reg.AsRegister());
- codegen_->Load64BitValue(hidden_reg, invoke->GetMethodReference().index);
+ if (invoke->GetHiddenArgumentLoadKind() != MethodLoadKind::kRecursive) {
+ Location hidden_reg = locations->GetTemp(1);
+ // Set the hidden argument. This is safe to do this here, as RAX
+ // won't be modified thereafter, before the `call` instruction.
+ // We also di it after MaybeGenerateInlineCache that may use RAX.
+ DCHECK_EQ(RAX, hidden_reg.AsRegister<Register>());
+ codegen_->LoadMethod(invoke->GetHiddenArgumentLoadKind(), hidden_reg, invoke);
+ }
// temp = temp->GetAddressOfIMT()
__ movq(temp,
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 81988b4386..c69c80aaf5 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -452,6 +452,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) override;
+ void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
@@ -459,8 +460,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
- void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
- void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
+ void RecordBootImageMethodPatch(HInvoke* invoke);
+ void RecordMethodBssEntryPatch(HInvoke* invoke);
void RecordBootImageTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index f917500dac..0531d725e7 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1104,9 +1104,9 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
}
}
HInvokeStaticOrDirect::DispatchInfo dispatch_info =
- HSharpening::SharpenInvokeStaticOrDirect(resolved_method,
- has_method_id,
- code_generator_);
+ HSharpening::SharpenLoadMethod(resolved_method,
+ has_method_id,
+ code_generator_);
if (dispatch_info.code_ptr_location == CodePtrLocation::kCallCriticalNative) {
graph_->SetHasDirectCriticalNativeCall(true);
}
@@ -1138,6 +1138,13 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
/*vtable_index=*/ imt_or_vtable_index);
} else {
DCHECK_EQ(invoke_type, kInterface);
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
+ }
+ MethodLoadKind load_kind =
+ HSharpening::SharpenLoadMethod(resolved_method, /* has_method_id= */ true, code_generator_)
+ .method_load_kind;
invoke = new (allocator_) HInvokeInterface(allocator_,
number_of_arguments,
return_type,
@@ -1145,7 +1152,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
method_reference,
resolved_method,
resolved_method_reference,
- /*imt_index=*/ imt_or_vtable_index);
+ /*imt_index=*/ imt_or_vtable_index,
+ load_kind);
}
return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
@@ -1669,6 +1677,12 @@ bool HInstructionBuilder::SetupInvokeArguments(HInstruction* invoke,
invoke->SetRawInputAt(argument_index, graph_->GetCurrentMethod());
}
+ if (invoke->IsInvokeInterface() &&
+ (invoke->AsInvokeInterface()->GetHiddenArgumentLoadKind() == MethodLoadKind::kRecursive)) {
+ invoke->SetRawInputAt(invoke->AsInvokeInterface()->GetNumberOfArguments() - 1,
+ graph_->GetCurrentMethod());
+ }
+
return true;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d616912b9f..d7d5b597c0 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -2311,7 +2311,7 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
// is unlikely that it exists. The most usual situation for such typed
// arraycopy methods is a direct pointer to the boot image.
invoke->SetDispatchInfo(
- HSharpening::SharpenInvokeStaticOrDirect(method, /* has_method_id= */ true, codegen_));
+ HSharpening::SharpenLoadMethod(method, /* has_method_id= */ true, codegen_));
}
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b674937ee3..9200689f27 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4417,6 +4417,12 @@ enum class CodePtrLocation {
kCallArtMethod,
};
+static inline bool IsPcRelativeMethodLoadKind(MethodLoadKind load_kind) {
+ return load_kind == MethodLoadKind::kBootImageLinkTimePcRelative ||
+ load_kind == MethodLoadKind::kBootImageRelRo ||
+ load_kind == MethodLoadKind::kBssEntry;
+}
+
class HInvoke : public HVariableInputSizeInstruction {
public:
bool NeedsEnvironment() const override;
@@ -4738,9 +4744,7 @@ class HInvokeStaticOrDirect final : public HInvoke {
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
bool HasPcRelativeMethodLoadKind() const {
- return GetMethodLoadKind() == MethodLoadKind::kBootImageLinkTimePcRelative ||
- GetMethodLoadKind() == MethodLoadKind::kBootImageRelRo ||
- GetMethodLoadKind() == MethodLoadKind::kBssEntry;
+ return IsPcRelativeMethodLoadKind(GetMethodLoadKind());
}
QuickEntrypointEnum GetStringInitEntryPoint() const {
@@ -4941,10 +4945,11 @@ class HInvokeInterface final : public HInvoke {
MethodReference method_reference,
ArtMethod* resolved_method,
MethodReference resolved_method_reference,
- uint32_t imt_index)
+ uint32_t imt_index,
+ MethodLoadKind load_kind)
: HInvoke(kInvokeInterface,
allocator,
- number_of_arguments,
+ number_of_arguments + (NeedsCurrentMethod(load_kind) ? 1 : 0),
0u,
return_type,
dex_pc,
@@ -4952,7 +4957,12 @@ class HInvokeInterface final : public HInvoke {
resolved_method,
resolved_method_reference,
kInterface),
- imt_index_(imt_index) {
+ imt_index_(imt_index),
+ hidden_argument_load_kind_(load_kind) {
+ }
+
+ static bool NeedsCurrentMethod(MethodLoadKind load_kind) {
+ return load_kind == MethodLoadKind::kRecursive;
}
bool IsClonable() const override { return true; }
@@ -4967,7 +4977,16 @@ class HInvokeInterface final : public HInvoke {
return true;
}
+ size_t GetSpecialInputIndex() const {
+ return GetNumberOfArguments();
+ }
+
+ void AddSpecialInput(HInstruction* input) {
+ InsertInputAt(GetSpecialInputIndex(), input);
+ }
+
uint32_t GetImtIndex() const { return imt_index_; }
+ MethodLoadKind GetHiddenArgumentLoadKind() const { return hidden_argument_load_kind_; }
DECLARE_INSTRUCTION(InvokeInterface);
@@ -4977,6 +4996,9 @@ class HInvokeInterface final : public HInvoke {
private:
// Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t imt_index_;
+
+ // How the hidden argument (the interface method) is being loaded.
+ const MethodLoadKind hidden_argument_load_kind_;
};
class HNeg final : public HUnaryOperation {
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 3ea19183ba..17f37f05c5 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -207,6 +207,15 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
base_added = true;
}
+ HInvokeInterface* invoke_interface = invoke->AsInvokeInterface();
+ if (invoke_interface != nullptr &&
+ IsPcRelativeMethodLoadKind(invoke_interface->GetHiddenArgumentLoadKind())) {
+ HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(invoke);
+ // Add the extra parameter.
+ invoke_interface->AddSpecialInput(method_address);
+ base_added = true;
+ }
+
// Ensure that we can load FP arguments from the constant area.
HInputsRef inputs = invoke->GetInputs();
for (size_t i = 0; i < inputs.size(); i++) {
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index f570c60843..3ffb24b852 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -57,7 +57,7 @@ static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions&
return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
}
-HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect(
+HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenLoadMethod(
ArtMethod* callee, bool has_method_id, CodeGenerator* codegen) {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current()); // Required for GetDeclaringClass below.
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index b48cd4b9b3..f71d9b5056 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -30,7 +30,7 @@ class DexCompilationUnit;
class HSharpening {
public:
// Used by the builder and InstructionSimplifier.
- static HInvokeStaticOrDirect::DispatchInfo SharpenInvokeStaticOrDirect(
+ static HInvokeStaticOrDirect::DispatchInfo SharpenLoadMethod(
ArtMethod* callee, bool has_method_id, CodeGenerator* codegen);
// Used by the builder and the inliner.