summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Almaz Mingaleev <mingaleev@google.com> 2024-08-29 09:52:58 +0000
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2024-09-02 11:14:57 +0000
commitf2c43572c8bfd97c2d8bd7581d599d1a9fb2eb32 (patch)
treebfd4f80a3825bf1b3def119e635aad03d27248bc /compiler/optimizing
parentab13b431d44add08b68d96d99bbb0d0b3f2ce86d (diff)
Address follow-up comments from aosp/2721077.
Bug: 297147201 Test: ./art/test/testrunner/testrunner.py --host --64 -b --optimizing Change-Id: Ie89d310633339b785de5d9f3daf653abfbff875c
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc1
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/instruction_builder.cc10
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc27
-rw-r--r--compiler/optimizing/nodes.h3
5 files changed, 27 insertions, 16 deletions
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e1c3c9f426..6a7f9b1264 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -57,7 +57,6 @@ class GcRoot;
namespace x86_64 {
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kMethodRegisterArgument = RDI;
// The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump
// table version generates 7 instructions and num_entries literals. Compare/jump sequence will
// generates less code/data with a small num_entries.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index ad4a60e091..38758148f1 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -28,6 +28,8 @@
namespace art HIDDEN {
namespace x86_64 {
+static constexpr Register kMethodRegisterArgument = RDI;
+
// Use a local definition to prevent copying mistakes.
static constexpr size_t kX86_64WordSize = static_cast<size_t>(kX86_64PointerSize);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index d7553dd14f..356322e85b 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1391,9 +1391,13 @@ bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
MethodReference method_reference(&graph_->GetDexFile(), method_idx);
+ // MethodHandle.invokeExact intrinsic needs to check whether call-site matches with MethodHandle's
+ // type. To do that, MethodType corresponding to the call-site is passed as an extra input.
+ // Other invoke-polymorphic calls do not need it.
bool is_invoke_exact =
static_cast<Intrinsics>(resolved_method->GetIntrinsic()) ==
Intrinsics::kMethodHandleInvokeExact;
+ // Currently intrinsic works for MethodHandle targeting invoke-virtual calls only.
bool can_be_virtual = number_of_arguments >= 2 &&
DataType::FromShorty(shorty[1]) == DataType::Type::kReference;
@@ -1414,7 +1418,7 @@ bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
return false;
}
- DCHECK_EQ(invoke->AsInvokePolymorphic()->CanTargetInvokeVirtual(), can_be_intrinsified);
+ DCHECK_EQ(invoke->AsInvokePolymorphic()->CanHaveFastPath(), can_be_intrinsified);
if (invoke->GetIntrinsic() != Intrinsics::kNone &&
invoke->GetIntrinsic() != Intrinsics::kMethodHandleInvoke &&
@@ -1896,7 +1900,9 @@ bool HInstructionBuilder::SetupInvokeArguments(HInstruction* invoke,
if (invoke->IsInvokePolymorphic()) {
HInvokePolymorphic* invoke_polymorphic = invoke->AsInvokePolymorphic();
- if (invoke_polymorphic->CanTargetInvokeVirtual()) {
+ // MethodHandle.invokeExact intrinsic expects MethodType corresponding to the call-site as an
+ // extra input to determine whether to throw WrongMethodTypeException or execute target method.
+ if (invoke_polymorphic->CanHaveFastPath()) {
HLoadMethodType* load_method_type =
new (allocator_) HLoadMethodType(graph_->GetCurrentMethod(),
invoke_polymorphic->GetProtoIndex(),
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 2c9272d403..d085d2c469 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -145,10 +145,12 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64);
};
-// invoke-polymorphic's slow-path which does not move arguments.
+// The MethodHandle.invokeExact intrinsic sets up arguments to match the target method call. If we
+// need to go to the slow path, we call art_quick_invoke_polymorphic_with_hidden_receiver, which
+// expects the MethodHandle object in RDI (in place of the actual ArtMethod).
class InvokePolymorphicSlowPathX86_64 : public SlowPathCode {
public:
- explicit InvokePolymorphicSlowPathX86_64(HInstruction* instruction, CpuRegister method_handle)
+ InvokePolymorphicSlowPathX86_64(HInstruction* instruction, CpuRegister method_handle)
: SlowPathCode(instruction), method_handle_(method_handle) {
DCHECK(instruction->IsInvokePolymorphic());
}
@@ -159,6 +161,7 @@ class InvokePolymorphicSlowPathX86_64 : public SlowPathCode {
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
+ // Passing `MethodHandle` object as hidden argument.
__ movq(CpuRegister(RDI), method_handle_);
x86_64_codegen->InvokeRuntime(QuickEntrypointEnum::kQuickInvokePolymorphicWithHiddenReceiver,
instruction_,
@@ -4099,7 +4102,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMethodHandleInvokeExact(HInvoke* invo
// Don't emit intrinsic code for MethodHandle.invokeExact when it certainly does not target
// invoke-virtual: if invokeExact is called w/o arguments or if the first argument in that
// call is not a reference.
- if (!invoke->AsInvokePolymorphic()->CanTargetInvokeVirtual()) {
+ if (!invoke->AsInvokePolymorphic()->CanHaveFastPath()) {
return;
}
ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
@@ -4120,13 +4123,11 @@ void IntrinsicLocationsBuilderX86_64::VisitMethodHandleInvokeExact(HInvoke* invo
// The last input is MethodType object corresponding to the call-site.
locations->SetInAt(number_of_args, Location::RequiresRegister());
- // We use a fixed-register temporary to pass the target method.
- locations->AddTemp(calling_convention.GetMethodLocation());
locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorX86_64::VisitMethodHandleInvokeExact(HInvoke* invoke) {
- DCHECK(invoke->AsInvokePolymorphic()->CanTargetInvokeVirtual());
+ DCHECK(invoke->AsInvokePolymorphic()->CanHaveFastPath());
LocationSummary* locations = invoke->GetLocations();
CpuRegister method_handle = locations->InAt(0).AsRegister<CpuRegister>();
@@ -4139,9 +4140,10 @@ void IntrinsicCodeGeneratorX86_64::VisitMethodHandleInvokeExact(HInvoke* invoke)
Address method_handle_kind = Address(method_handle, mirror::MethodHandle::HandleKindOffset());
// If it is not InvokeVirtual then go to slow path.
- // Even if MethodHandle's kind is kInvokeVirtual underlying method still can be an interface or
- // direct method (that's what current `MethodHandles$Lookup.findVirtual` is doing). We don't check
- // whether `method` is an interface method explicitly: in that case the subtype check will fail.
+ // Even if MethodHandle's kind is kInvokeVirtual, the underlying method can still be an interface
+ // or a direct method (that's what current `MethodHandles$Lookup.findVirtual` is doing). We don't
+ // check whether `method` is an interface method explicitly: in that case the subtype check below
+ // will fail.
// TODO(b/297147201): check whether it can be more precise and what d8/r8 can produce.
__ cmpl(method_handle_kind, Immediate(mirror::MethodHandle::Kind::kInvokeVirtual));
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -4153,16 +4155,17 @@ void IntrinsicCodeGeneratorX86_64::VisitMethodHandleInvokeExact(HInvoke* invoke)
__ cmpl(call_site_type, Address(method_handle, mirror::MethodHandle::MethodTypeOffset()));
__ j(kNotEqual, slow_path->GetEntryLabel());
- CpuRegister method = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister method = CpuRegister(kMethodRegisterArgument);
- // Find method to call.
+ // Get method to call.
__ movq(method, Address(method_handle, mirror::MethodHandle::ArtFieldOrMethodOffset()));
CpuRegister receiver = locations->InAt(1).AsRegister<CpuRegister>();
// Using vtable_index register as temporary in subtype check. It will be overridden later.
// If `method` is an interface method this check will fail.
- CpuRegister vtable_index = locations->GetTemp(1).AsRegister<CpuRegister>();
+ CpuRegister vtable_index = locations->GetTemp(0).AsRegister<CpuRegister>();
+ // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
GenerateSubTypeObjectCheckNoReadBarrier(codegen_,
slow_path,
receiver,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index ffddd25843..eb6d9ecad4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4939,7 +4939,8 @@ class HInvokePolymorphic final : public HInvoke {
dex::ProtoIndex GetProtoIndex() { return proto_idx_; }
- bool CanTargetInvokeVirtual() const {
+ // Whether we can do direct invocation of the method handle.
+ bool CanHaveFastPath() const {
return GetIntrinsic() == Intrinsics::kMethodHandleInvokeExact &&
GetNumberOfArguments() >= 2 &&
InputAt(1)->GetType() == DataType::Type::kReference;