summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Almaz Mingaleev <mingaleev@google.com> 2023-08-22 15:00:44 +0100
committer Almaz Mingaleev <mingaleev@google.com> 2024-08-28 13:31:56 +0000
commit93163edd92664c79da56ffcf5de32b7b872136ce (patch)
treedad21273d417cf020aa527171dc00465076b6434 /compiler
parent46a77ad7d269ba087c4075029d92fe00b04fb6bb (diff)
x86_64: Add instrinsic for MethodHandle::invokeExact...
... which targets invoke-virtual methods. New entrypoint changes deliverException's offset, hence arm test change. Bug: 297147201 Test: ./art/test/testrunner/testrunner.py --host --64 -b --optimizing Test: ./art/test.py --host -g Change-Id: I636fc60c088bfdf9b695c92de47f1c539e3956f1
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_x86_64.h1
-rw-r--r--compiler/optimizing/instruction_builder.cc32
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc134
-rw-r--r--compiler/optimizing/nodes.h9
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc2
5 files changed, 174 insertions, 4 deletions
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index cbb4b17fe5..ad4a60e091 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -93,7 +93,6 @@ static constexpr FloatRegister non_volatile_xmm_regs[] = { XMM12, XMM13, XMM14,
V(StringBuilderLength) \
V(StringBuilderToString) \
/* 1.8 */ \
- V(MethodHandleInvokeExact) \
V(MethodHandleInvoke)
class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatRegister> {
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index c97c78ca17..d7553dd14f 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1390,8 +1390,20 @@ bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
&is_string_constructor);
MethodReference method_reference(&graph_->GetDexFile(), method_idx);
+
+ bool is_invoke_exact =
+ static_cast<Intrinsics>(resolved_method->GetIntrinsic()) ==
+ Intrinsics::kMethodHandleInvokeExact;
+ bool can_be_virtual = number_of_arguments >= 2 &&
+ DataType::FromShorty(shorty[1]) == DataType::Type::kReference;
+
+ bool can_be_intrinsified = is_invoke_exact && can_be_virtual;
+
+ uint32_t number_of_other_inputs = can_be_intrinsified ? 1u : 0u;
+
HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
number_of_arguments,
+ number_of_other_inputs,
return_type,
dex_pc,
method_reference,
@@ -1402,6 +1414,8 @@ bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
return false;
}
+ DCHECK_EQ(invoke->AsInvokePolymorphic()->CanTargetInvokeVirtual(), can_be_intrinsified);
+
if (invoke->GetIntrinsic() != Intrinsics::kNone &&
invoke->GetIntrinsic() != Intrinsics::kMethodHandleInvoke &&
invoke->GetIntrinsic() != Intrinsics::kMethodHandleInvokeExact &&
@@ -1879,6 +1893,24 @@ bool HInstructionBuilder::SetupInvokeArguments(HInstruction* invoke,
graph_->GetCurrentMethod());
}
+ if (invoke->IsInvokePolymorphic()) {
+ HInvokePolymorphic* invoke_polymorphic = invoke->AsInvokePolymorphic();
+
+ if (invoke_polymorphic->CanTargetInvokeVirtual()) {
+ HLoadMethodType* load_method_type =
+ new (allocator_) HLoadMethodType(graph_->GetCurrentMethod(),
+ invoke_polymorphic->GetProtoIndex(),
+ graph_->GetDexFile(),
+ invoke_polymorphic->GetDexPc());
+ HSharpening::ProcessLoadMethodType(load_method_type,
+ code_generator_,
+ *dex_compilation_unit_,
+ graph_->GetHandleCache()->GetHandles());
+ invoke->SetRawInputAt(invoke_polymorphic->GetNumberOfArguments(), load_method_type);
+ AppendInstruction(load_method_type);
+ }
+ }
+
return true;
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 0a60e9c642..c32595c486 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -19,10 +19,13 @@
#include <limits>
#include "arch/x86_64/instruction_set_features_x86_64.h"
+#include "arch/x86_64/registers_x86_64.h"
#include "art_method.h"
#include "base/bit_utils.h"
#include "code_generator_x86_64.h"
+#include "dex/modifiers.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsic_objects.h"
@@ -32,6 +35,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/reference.h"
#include "mirror/string.h"
+#include "optimizing/code_generator.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
#include "utils/x86_64/assembler_x86_64.h"
@@ -141,6 +145,36 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64);
};
+// invoke-polymorphic's slow-path which does not move arguments.
+class InvokePolymorphicSlowPathX86_64 : public SlowPathCode {
+ public:
+ explicit InvokePolymorphicSlowPathX86_64(HInstruction* instruction, CpuRegister method_handle)
+ : SlowPathCode(instruction), method_handle_(method_handle) {
+ DCHECK(instruction->IsInvokePolymorphic());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) override {
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ X86_64Assembler* assembler = x86_64_codegen->GetAssembler();
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+
+ __ movq(CpuRegister(RDI), method_handle_);
+ x86_64_codegen->InvokeRuntime(QuickEntrypointEnum::kQuickInvokePolymorphicWithHiddenReceiver,
+ instruction_,
+ instruction_->GetDexPc());
+
+ RestoreLiveRegisters(codegen, instruction_->GetLocations());
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const override { return "InvokePolymorphicSlowPathX86_64"; }
+
+ private:
+ const CpuRegister method_handle_;
+ DISALLOW_COPY_AND_ASSIGN(InvokePolymorphicSlowPathX86_64);
+};
+
static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
@@ -3606,7 +3640,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMathFmaFloat(HInvoke* invoke) {
// Generate subtype check without read barriers.
static void GenerateSubTypeObjectCheckNoReadBarrier(CodeGeneratorX86_64* codegen,
- VarHandleSlowPathX86_64* slow_path,
+ SlowPathCode* slow_path,
CpuRegister object,
CpuRegister temp,
Address type_address,
@@ -4062,6 +4096,104 @@ static void GenerateVarHandleGet(HInvoke* invoke,
}
}
+void IntrinsicLocationsBuilderX86_64::VisitMethodHandleInvokeExact(HInvoke* invoke) {
+ // Don't emit intrinsic code for MethodHandle.invokeExact when it certainly does not target
+ // invoke-virtual: if invokeExact is called w/o arguments or if the first argument in that
+ // call is not a reference.
+ if (!invoke->AsInvokePolymorphic()->CanTargetInvokeVirtual()) {
+ return;
+ }
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations = new (allocator)
+ LocationSummary(invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
+
+ InvokeDexCallingConventionVisitorX86_64 calling_convention;
+ locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType()));
+
+ locations->SetInAt(0, Location::RequiresRegister());
+
+ // Accomodating LocationSummary for underlying invoke-* call.
+ uint32_t number_of_args = invoke->GetNumberOfArguments();
+ for (uint32_t i = 1; i < number_of_args; ++i) {
+ locations->SetInAt(i, calling_convention.GetNextLocation(invoke->InputAt(i)->GetType()));
+ }
+
+ // The last input is MethodType object corresponding to the call-site.
+ locations->SetInAt(number_of_args, Location::RequiresRegister());
+
+ // We use a fixed-register temporary to pass the target method.
+ locations->AddTemp(calling_convention.GetMethodLocation());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitMethodHandleInvokeExact(HInvoke* invoke) {
+ DCHECK(invoke->AsInvokePolymorphic()->CanTargetInvokeVirtual());
+ LocationSummary* locations = invoke->GetLocations();
+
+ CpuRegister method_handle = locations->InAt(0).AsRegister<CpuRegister>();
+
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) InvokePolymorphicSlowPathX86_64(invoke, method_handle);
+ codegen_->AddSlowPath(slow_path);
+ X86_64Assembler* assembler = codegen_->GetAssembler();
+
+ Address method_handle_kind = Address(method_handle, mirror::MethodHandle::HandleKindOffset());
+
+ // If it is not InvokeVirtual then go to slow path.
+ // Even if MethodHandle's kind is kInvokeVirtual underlying method still can be an interface or
+ // direct method (that's what current `MethodHandles$Lookup.findVirtual` is doing). We don't check
+ // whether `method` is an interface method explicitly: in that case the subtype check will fail.
+ // TODO(b/297147201): check whether it can be more precise and what d8/r8 can produce.
+ __ cmpl(method_handle_kind, Immediate(mirror::MethodHandle::Kind::kInvokeVirtual));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+
+ CpuRegister call_site_type =
+ locations->InAt(invoke->GetNumberOfArguments()).AsRegister<CpuRegister>();
+
+ // Call site should match with MethodHandle's type.
+ __ cmpl(call_site_type, Address(method_handle, mirror::MethodHandle::MethodTypeOffset()));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+
+ CpuRegister method = locations->GetTemp(0).AsRegister<CpuRegister>();
+
+ // Find method to call.
+ __ movq(method, Address(method_handle, mirror::MethodHandle::ArtFieldOrMethodOffset()));
+
+ CpuRegister receiver = locations->InAt(1).AsRegister<CpuRegister>();
+
+ // Using vtable_index register as temporary in subtype check. It will be overridden later.
+ // If `method` is an interface method this check will fail.
+ CpuRegister vtable_index = locations->GetTemp(1).AsRegister<CpuRegister>();
+ GenerateSubTypeObjectCheckNoReadBarrier(codegen_,
+ slow_path,
+ receiver,
+ vtable_index,
+ Address(method, ArtMethod::DeclaringClassOffset()));
+
+ NearLabel execute_target_method;
+ // Skip virtual dispatch if `method` is private.
+ __ testl(Address(method, ArtMethod::AccessFlagsOffset()), Immediate(kAccPrivate));
+ __ j(kNotZero, &execute_target_method);
+
+ // MethodIndex is uint16_t.
+ __ movzxw(vtable_index, Address(method, ArtMethod::MethodIndexOffset()));
+
+ constexpr uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // Re-using method register for receiver class.
+ __ movl(method, Address(receiver, class_offset));
+
+ constexpr uint32_t vtable_offset =
+ mirror::Class::EmbeddedVTableOffset(art::PointerSize::k64).Int32Value();
+ __ movq(method, Address(method, vtable_index, TIMES_8, vtable_offset));
+
+ __ Bind(&execute_target_method);
+ __ call(Address(
+ method,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).SizeValue()));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
+ __ Bind(slow_path->GetExitLabel());
+}
+
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGet(HInvoke* invoke) {
CreateVarHandleGetLocations(invoke, codegen_);
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 23cc3704e9..ffddd25843 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4912,6 +4912,7 @@ class HInvokePolymorphic final : public HInvoke {
public:
HInvokePolymorphic(ArenaAllocator* allocator,
uint32_t number_of_arguments,
+ uint32_t number_of_other_inputs,
DataType::Type return_type,
uint32_t dex_pc,
MethodReference method_reference,
@@ -4924,7 +4925,7 @@ class HInvokePolymorphic final : public HInvoke {
: HInvoke(kInvokePolymorphic,
allocator,
number_of_arguments,
- /* number_of_other_inputs= */ 0u,
+ number_of_other_inputs,
return_type,
dex_pc,
method_reference,
@@ -4938,6 +4939,12 @@ class HInvokePolymorphic final : public HInvoke {
dex::ProtoIndex GetProtoIndex() { return proto_idx_; }
+ bool CanTargetInvokeVirtual() const {
+ return GetIntrinsic() == Intrinsics::kMethodHandleInvokeExact &&
+ GetNumberOfArguments() >= 2 &&
+ InputAt(1)->GetType() == DataType::Type::kReference;
+ }
+
DECLARE_INSTRUCTION(InvokePolymorphic);
protected:
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index f96a9bc154..5184b2c897 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -154,7 +154,7 @@ const char* const VixlJniHelpersResults = {
" 210: f8d9 8020 ldr.w r8, [r9, #32]\n"
" 214: 4770 bx lr\n"
" 216: f8d9 0094 ldr.w r0, [r9, #148]\n"
- " 21a: f8d9 e2bc ldr.w lr, [r9, #700]\n"
+ " 21a: f8d9 e2c0 ldr.w lr, [r9, #704]\n"
" 21e: 47f0 blx lr\n"
};