diff options
Diffstat (limited to 'compiler/optimizing/intrinsics_x86_64.cc')
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 60 |
1 files changed, 59 insertions, 1 deletions
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 06e9cc2b28..2a867697e5 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -2719,7 +2719,65 @@ void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invok GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); } -UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent) +void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) { + if (kEmitCompilerReadBarrier) { + // Do not intrinsify this call with the read barrier configuration. + return; + } + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCallOnSlowPath, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::SameAsFirstInput()); + locations->AddTemp(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { + DCHECK(!kEmitCompilerReadBarrier); + LocationSummary* locations = invoke->GetLocations(); + X86_64Assembler* assembler = GetAssembler(); + + CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>(); + CpuRegister out = locations->Out().AsRegister<CpuRegister>(); + + SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke); + codegen_->AddSlowPath(slow_path); + + // Load ArtMethod first. + HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect(); + DCHECK(invoke_direct != nullptr); + Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall( + invoke_direct, locations->GetTemp(0)); + DCHECK(temp_loc.Equals(locations->GetTemp(0))); + CpuRegister temp = temp_loc.AsRegister<CpuRegister>(); + + // Now get declaring class. + __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value())); + + uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset(); + uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset(); + DCHECK_NE(slow_path_flag_offset, 0u); + DCHECK_NE(disable_flag_offset, 0u); + DCHECK_NE(slow_path_flag_offset, disable_flag_offset); + + // Check static flags preventing us for using intrinsic. + if (slow_path_flag_offset == disable_flag_offset + 1) { + __ cmpw(Address(temp, disable_flag_offset), Immediate(0)); + __ j(kNotEqual, slow_path->GetEntryLabel()); + } else { + __ cmpb(Address(temp, disable_flag_offset), Immediate(0)); + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0)); + __ j(kNotEqual, slow_path->GetEntryLabel()); + } + + // Fast path. + __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value())); + codegen_->MaybeRecordImplicitNullCheck(invoke); + __ MaybeUnpoisonHeapReference(out); + __ Bind(slow_path->GetExitLabel()); +} + UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite) |