diff options
author | 2025-03-11 03:24:14 -0700 | |
---|---|---|
committer | 2025-03-20 03:50:21 -0700 | |
commit | 59aaec4d51436995ace22c4bd6f35ff7f6f8aaf4 (patch) | |
tree | 4241d68db637844d86522d0445813d711ff5e8a4 /compiler | |
parent | a23f36390858d148e7ab2cc8ff159b138946595e (diff) |
Revert^2 "Call target method in accessor MHs when it is set."
Previous LUCI crashes were due to b/404465902, which was
fixed in aosp/3551420.
Bug: 297147201
Test: ART_HEAP_POISONING=true art/test/testrunner/testrunner.py -b \
--host --baseline --debug --gcstress --64
Test: art/test/testrunner/testrunner.py -b \
--host --baseline --debug --gcstress --64
Change-Id: If93b9c6559ac8ddbc3211df1cbc4dec192cb054a
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 35 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 35 |
2 files changed, 61 insertions, 9 deletions
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 75607b77e0..c4f8681631 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -16,6 +16,8 @@ #include "intrinsics_arm64.h" +#include "aarch64/assembler-aarch64.h" +#include "aarch64/operands-aarch64.h" #include "arch/arm64/callee_save_frame_arm64.h" #include "arch/arm64/instruction_set_features_arm64.h" #include "art_method.h" @@ -33,6 +35,7 @@ #include "mirror/array-inl.h" #include "mirror/class.h" #include "mirror/method_handle_impl.h" +#include "mirror/method_type.h" #include "mirror/object.h" #include "mirror/object_array-inl.h" #include "mirror/reference.h" @@ -5981,30 +5984,46 @@ void IntrinsicLocationsBuilderARM64::VisitMethodHandleInvokeExact(HInvoke* invok InvokeDexCallingConventionVisitorARM64 calling_convention; locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType())); - locations->SetInAt(0, Location::RequiresRegister()); - // Accomodating LocationSummary for underlying invoke-* call. uint32_t number_of_args = invoke->GetNumberOfArguments(); + for (uint32_t i = 1; i < number_of_args; ++i) { locations->SetInAt(i, calling_convention.GetNextLocation(invoke->InputAt(i)->GetType())); } + // Passing MethodHandle object as the last parameter: accessors implementation rely on it. + DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kReference); + Location receiver_mh_loc = calling_convention.GetNextLocation(DataType::Type::kReference); + locations->SetInAt(0, receiver_mh_loc); + // The last input is MethodType object corresponding to the call-site. locations->SetInAt(number_of_args, Location::RequiresRegister()); locations->AddTemp(calling_convention.GetMethodLocation()); locations->AddRegisterTemps(4); + + if (!receiver_mh_loc.IsRegister()) { + locations->AddTemp(Location::RequiresRegister()); + } } void IntrinsicCodeGeneratorARM64::VisitMethodHandleInvokeExact(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); + MacroAssembler* masm = codegen_->GetVIXLAssembler(); + + Location receiver_mh_loc = locations->InAt(0); + Register method_handle = receiver_mh_loc.IsRegister() + ? InputRegisterAt(invoke, 0) + : WRegisterFrom(locations->GetTemp(5)); - Register method_handle = InputRegisterAt(invoke, 0); + if (!receiver_mh_loc.IsRegister()) { + DCHECK(receiver_mh_loc.IsStackSlot()); + __ Ldr(method_handle.W(), MemOperand(sp, receiver_mh_loc.GetStackIndex())); + } SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) InvokePolymorphicSlowPathARM64(invoke, method_handle); codegen_->AddSlowPath(slow_path); - MacroAssembler* masm = codegen_->GetVIXLAssembler(); Register call_site_type = InputRegisterAt(invoke, invoke->GetNumberOfArguments()); @@ -6019,10 +6038,18 @@ void IntrinsicCodeGeneratorARM64::VisitMethodHandleInvokeExact(HInvoke* invoke) __ Ldr(method, HeapOperand(method_handle.W(), mirror::MethodHandle::ArtFieldOrMethodOffset())); vixl::aarch64::Label execute_target_method; + vixl::aarch64::Label method_dispatch; Register method_handle_kind = WRegisterFrom(locations->GetTemp(2)); __ Ldr(method_handle_kind, HeapOperand(method_handle.W(), mirror::MethodHandle::HandleKindOffset())); + + __ Cmp(method_handle_kind, Operand(mirror::MethodHandle::Kind::kFirstAccessorKind)); + __ B(lt, &method_dispatch); + __ Ldr(method, HeapOperand(method_handle.W(), mirror::MethodHandleImpl::TargetOffset())); + __ B(&execute_target_method); + + __ Bind(&method_dispatch); __ Cmp(method_handle_kind, Operand(mirror::MethodHandle::Kind::kInvokeStatic)); __ B(eq, &execute_target_method); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index f891447848..d3ee1759e0 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -32,6 +32,7 @@ #include "intrinsics_utils.h" #include "lock_word.h" #include "mirror/array-inl.h" +#include "mirror/method_handle_impl.h" #include "mirror/object_array-inl.h" #include "mirror/reference.h" #include "mirror/string.h" @@ -4241,31 +4242,47 @@ void IntrinsicLocationsBuilderX86_64::VisitMethodHandleInvokeExact(HInvoke* invo InvokeDexCallingConventionVisitorX86_64 calling_convention; locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType())); - locations->SetInAt(0, Location::RequiresRegister()); + uint32_t number_of_args = invoke->GetNumberOfArguments(); // Accomodating LocationSummary for underlying invoke-* call. - uint32_t number_of_args = invoke->GetNumberOfArguments(); for (uint32_t i = 1; i < number_of_args; ++i) { locations->SetInAt(i, calling_convention.GetNextLocation(invoke->InputAt(i)->GetType())); } + // Passing MethodHandle object as the last parameter: accessors implementation rely on it. + DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kReference); + Location receiver_mh_loc = calling_convention.GetNextLocation(DataType::Type::kReference); + locations->SetInAt(0, receiver_mh_loc); + // The last input is MethodType object corresponding to the call-site. locations->SetInAt(number_of_args, Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); // Hidden arg for invoke-interface. locations->AddTemp(Location::RegisterLocation(RAX)); + + if (!receiver_mh_loc.IsRegister()) { + locations->AddTemp(Location::RequiresRegister()); + } } void IntrinsicCodeGeneratorX86_64::VisitMethodHandleInvokeExact(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); + X86_64Assembler* assembler = codegen_->GetAssembler(); - CpuRegister method_handle = locations->InAt(0).AsRegister<CpuRegister>(); + Location receiver_mh_loc = locations->InAt(0); + CpuRegister method_handle = receiver_mh_loc.IsRegister() + ? receiver_mh_loc.AsRegister<CpuRegister>() + : locations->GetTemp(2).AsRegister<CpuRegister>(); + + if (!receiver_mh_loc.IsRegister()) { + DCHECK(receiver_mh_loc.IsStackSlot()); + __ movl(method_handle, Address(CpuRegister(RSP), receiver_mh_loc.GetStackIndex())); + } SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) InvokePolymorphicSlowPathX86_64(invoke, method_handle); codegen_->AddSlowPath(slow_path); - X86_64Assembler* assembler = codegen_->GetAssembler(); CpuRegister call_site_type = locations->InAt(invoke->GetNumberOfArguments()).AsRegister<CpuRegister>(); @@ -4288,10 +4305,18 @@ void IntrinsicCodeGeneratorX86_64::VisitMethodHandleInvokeExact(HInvoke* invoke) CpuRegister method = CpuRegister(kMethodRegisterArgument); __ movq(method, Address(method_handle, mirror::MethodHandle::ArtFieldOrMethodOffset())); - Label static_dispatch; Label execute_target_method; + Label method_dispatch; + Label static_dispatch; Address method_handle_kind = Address(method_handle, mirror::MethodHandle::HandleKindOffset()); + + __ cmpl(method_handle_kind, Immediate(mirror::MethodHandle::kFirstAccessorKind)); + __ j(kLess, &method_dispatch); + __ movq(method, Address(method_handle, mirror::MethodHandleImpl::TargetOffset())); + __ Jump(&execute_target_method); + + __ Bind(&method_dispatch); if (invoke->AsInvokePolymorphic()->CanTargetInstanceMethod()) { CpuRegister receiver = locations->InAt(1).AsRegister<CpuRegister>(); |