summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Andra Danciu <andradanciu@google.com> 2020-07-30 12:19:31 +0000
committer Vladimir Marko <vmarko@google.com> 2020-08-03 10:03:16 +0000
commite3e187f29fa4025e30c5a43decb2b6f6c584d59c (patch)
treeb38e434558cc2c6d7a8153c709a9884313cd4df1 /compiler
parent1a277a6e5d5152b4fe4dd5717432ecf8941ec820 (diff)
Check if VarHandle access mode is supported.
This commit checks if a VarHandle access mode is supported. If not, an UnsupportedOperationException is raised by calling the runtime to handle it. I added the polymorphic intrinsics case in the IntrinsicSlowPath code generation to handle all the eventual exceptions. For now, none of the operations are actually compiled. If the slow path is not called, the runtime handles the operation. Bug: b/65872996 Test: art/test.py --host -r -t 712-varhandle-invocations --32 Test: art/test.py --host --all-compiler -r Change-Id: I5a637561549b3fdd64fa53e2d7dbf835d3ae0d64
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator.cc5
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/intrinsics_utils.h9
-rw-r--r--compiler/optimizing/intrinsics_x86.cc52
-rw-r--r--compiler/optimizing/locations.h17
5 files changed, 78 insertions, 7 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8e64e1819e..6bfdacfb7f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -630,12 +630,13 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
-void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
+void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke,
+ SlowPathCode* slow_path) {
// invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
// method index) since it requires multiple info from the instruction (registers A, B, H). Not
// using the reservation has no effect on the registers used in the runtime call.
QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
- InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
+ InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
}
void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 12e2e9745e..1a01be9708 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -597,7 +597,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
- void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke);
+ void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke, SlowPathCode* slow_path = nullptr);
void GenerateInvokeCustomCall(HInvokeCustom* invoke);
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index 29f815c1be..4b8a8e743d 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -65,15 +65,18 @@ class IntrinsicSlowPath : public TSlowPathCode {
DCHECK_NE(invoke_static_or_direct->GetCodePtrLocation(),
HInvokeStaticOrDirect::CodePtrLocation::kCallCriticalNative);
codegen->GenerateStaticOrDirectCall(invoke_static_or_direct, method_loc, this);
- } else {
+ } else if (invoke_->IsInvokeVirtual()) {
codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), method_loc, this);
+ } else {
+ DCHECK(invoke_->IsInvokePolymorphic());
+ codegen->GenerateInvokePolymorphicCall(invoke_->AsInvokePolymorphic(), this);
}
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
if (out.IsValid()) {
- DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
- DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ DCHECK(out.IsRegisterKind()); // TODO: Replace this when we support output in memory.
+ DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->OverlapsRegisters(out));
codegen->MoveFromReturnRegister(out, invoke_->GetType());
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e13a965dae..7bd6b04ba5 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -31,6 +31,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/reference.h"
#include "mirror/string.h"
+#include "mirror/var_handle.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
#include "utils/x86/assembler_x86.h"
@@ -3064,6 +3065,56 @@ void IntrinsicCodeGeneratorX86::VisitIntegerDivideUnsigned(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+static void CreateVarHandleLocationSummary(HInvoke* invoke, ArenaAllocator* allocator) {
+ InvokeDexCallingConventionVisitorX86 visitor;
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
+
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, visitor.GetNextLocation(input->GetType()));
+ }
+
+ locations->SetOut(visitor.GetReturnLocation(invoke->GetType()));
+}
+
+#define INTRINSIC_VARHANDLE_LOCATIONS_BUILDER(Name) \
+void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke) { \
+ CreateVarHandleLocationSummary(invoke, allocator_); \
+}
+
+INTRINSIC_VARHANDLE_LOCATIONS_BUILDER(VarHandleGet)
+
+static void GenerateVarHandleCode(HInvoke* invoke, CodeGeneratorX86* codegen) {
+ X86Assembler* assembler = codegen->GetAssembler();
+ Register varhandle_object = invoke->GetLocations()->InAt(0).AsRegister<Register>();
+ const uint32_t access_modes_bitmask_offset =
+ mirror::VarHandle::AccessModesBitMaskOffset().Uint32Value();
+ mirror::VarHandle::AccessMode access_mode =
+ mirror::VarHandle::GetAccessModeByIntrinsic(invoke->GetIntrinsic());
+ const uint32_t access_mode_bit = 1u << static_cast<uint32_t>(access_mode);
+
+ // If the access mode is not supported, bail to runtime implementation to handle
+ __ testl(Address(varhandle_object, access_modes_bitmask_offset), Immediate(access_mode_bit));
+ SlowPathCode* slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ j(kZero, slow_path->GetEntryLabel());
+
+ // For now, none of the access modes are compiled. The runtime handles them on
+ // both slow path and main path.
+ // TODO: replace calling the runtime with actual assembly code
+ codegen->GenerateInvokePolymorphicCall(invoke->AsInvokePolymorphic());
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+#define INTRINSIC_VARHANDLE_CODE_GENERATOR(Name) \
+void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke) { \
+ GenerateVarHandleCode(invoke, codegen_); \
+}
+
+INTRINSIC_VARHANDLE_CODE_GENERATOR(VarHandleGet)
+
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
@@ -3119,7 +3170,6 @@ UNIMPLEMENTED_INTRINSIC(X86, VarHandleCompareAndExchange)
UNIMPLEMENTED_INTRINSIC(X86, VarHandleCompareAndExchangeAcquire)
UNIMPLEMENTED_INTRINSIC(X86, VarHandleCompareAndExchangeRelease)
UNIMPLEMENTED_INTRINSIC(X86, VarHandleCompareAndSet)
-UNIMPLEMENTED_INTRINSIC(X86, VarHandleGet)
UNIMPLEMENTED_INTRINSIC(X86, VarHandleGetAcquire)
UNIMPLEMENTED_INTRINSIC(X86, VarHandleGetAndAdd)
UNIMPLEMENTED_INTRINSIC(X86, VarHandleGetAndAddAcquire)
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 8f5eed7afd..2a09921ba4 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -478,6 +478,23 @@ class RegisterSet : public ValueObject {
return (register_set & (1 << reg)) != 0;
}
+ bool OverlapsRegisters(Location out) {
+ DCHECK(out.IsRegisterKind());
+ switch (out.GetKind()) {
+ case Location::Kind::kRegister:
+ return ContainsCoreRegister(out.reg());
+ case Location::Kind::kFpuRegister:
+ return ContainsFloatingPointRegister(out.reg());
+ case Location::Kind::kRegisterPair:
+ return ContainsCoreRegister(out.low()) || ContainsCoreRegister(out.high());
+ case Location::Kind::kFpuRegisterPair:
+ return ContainsFloatingPointRegister(out.low()) ||
+ ContainsFloatingPointRegister(out.high());
+ default:
+ return false;
+ }
+ }
+
size_t GetNumberOfRegisters() const {
return POPCOUNT(core_registers_) + POPCOUNT(floating_point_registers_);
}