Check if VarHandle access mode is supported.

This commit checks if a VarHandle access mode is supported. If not, an
UnsupportedOperationException is raised by calling the runtime to handle it.

I added the polymorphic intrinsics case in the IntrinsicSlowPath
code generation to handle all the eventual exceptions. For now,
none of the operations are actually compiled. If the slow path is
not called, the runtime handles the operation.

Bug: b/65872996
Test: art/test.py --host -r -t 712-varhandle-invocations --32
Test: art/test.py --host --all-compiler -r
Change-Id: I5a637561549b3fdd64fa53e2d7dbf835d3ae0d64
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e13a965..7bd6b04 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -31,6 +31,7 @@
 #include "mirror/object_array-inl.h"
 #include "mirror/reference.h"
 #include "mirror/string.h"
+#include "mirror/var_handle.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 #include "utils/x86/assembler_x86.h"
@@ -3064,6 +3065,56 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
+static void CreateVarHandleLocationSummary(HInvoke* invoke, ArenaAllocator* allocator) {
+  InvokeDexCallingConventionVisitorX86 visitor;
+  LocationSummary* locations =
+      new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
+
+  for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
+    HInstruction* input = invoke->InputAt(i);
+    locations->SetInAt(i, visitor.GetNextLocation(input->GetType()));
+  }
+
+  locations->SetOut(visitor.GetReturnLocation(invoke->GetType()));
+}
+
+#define INTRINSIC_VARHANDLE_LOCATIONS_BUILDER(Name)                   \
+void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke) {   \
+  CreateVarHandleLocationSummary(invoke, allocator_);                 \
+}
+
+INTRINSIC_VARHANDLE_LOCATIONS_BUILDER(VarHandleGet)
+
+static void GenerateVarHandleCode(HInvoke* invoke, CodeGeneratorX86* codegen) {
+  X86Assembler* assembler = codegen->GetAssembler();
+  Register varhandle_object = invoke->GetLocations()->InAt(0).AsRegister<Register>();
+  const uint32_t access_modes_bitmask_offset =
+      mirror::VarHandle::AccessModesBitMaskOffset().Uint32Value();
+  mirror::VarHandle::AccessMode access_mode =
+      mirror::VarHandle::GetAccessModeByIntrinsic(invoke->GetIntrinsic());
+  const uint32_t access_mode_bit = 1u << static_cast<uint32_t>(access_mode);
+
+  // If the access mode is not supported, bail to runtime implementation to handle
+  __ testl(Address(varhandle_object, access_modes_bitmask_offset), Immediate(access_mode_bit));
+  SlowPathCode* slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
+  codegen->AddSlowPath(slow_path);
+  __ j(kZero, slow_path->GetEntryLabel());
+
+  // For now, none of the access modes are compiled. The runtime handles them on
+  // both slow path and main path.
+  // TODO: replace calling the runtime with actual assembly code
+  codegen->GenerateInvokePolymorphicCall(invoke->AsInvokePolymorphic());
+
+  __ Bind(slow_path->GetExitLabel());
+}
+
+#define INTRINSIC_VARHANDLE_CODE_GENERATOR(Name)                   \
+void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke) {   \
+  GenerateVarHandleCode(invoke, codegen_);                         \
+}
+
+INTRINSIC_VARHANDLE_CODE_GENERATOR(VarHandleGet)
+
 UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
 UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
@@ -3119,7 +3170,6 @@
 UNIMPLEMENTED_INTRINSIC(X86, VarHandleCompareAndExchangeAcquire)
 UNIMPLEMENTED_INTRINSIC(X86, VarHandleCompareAndExchangeRelease)
 UNIMPLEMENTED_INTRINSIC(X86, VarHandleCompareAndSet)
-UNIMPLEMENTED_INTRINSIC(X86, VarHandleGet)
 UNIMPLEMENTED_INTRINSIC(X86, VarHandleGetAcquire)
 UNIMPLEMENTED_INTRINSIC(X86, VarHandleGetAndAdd)
 UNIMPLEMENTED_INTRINSIC(X86, VarHandleGetAndAddAcquire)