Revert "Revert "ART: Reference.getReferent intrinsic for x86 and x86_64""
This reverts commit 0997d24e67d78f2146ebae2888eda0d7d254789a.
ART_HEAP_POISONING=true mode is fixed.
Change-Id: I83f6d5c101ea6a86802753f81b3e4348a263fb21
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 4ee2368..654c393 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2719,7 +2719,65 @@
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
+void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ LocationSummary* locations = invoke->GetLocations();
+ X86_64Assembler* assembler = GetAssembler();
+
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0));
+ DCHECK(temp_loc.Equals(locations->GetTemp(0)));
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
+
+ // Now get declaring class.
+ __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags preventing us for using intrinsic.
+ if (slow_path_flag_offset == disable_flag_offset + 1) {
+ __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ } else {
+ __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ // Fast path.
+ __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)