diff options
| author | 2017-06-05 14:40:23 +0000 | |
|---|---|---|
| committer | 2017-06-05 14:40:25 +0000 | |
| commit | 0a50965275df2da590c49a7a955e6ff5a7c7d2ae (patch) | |
| tree | 7c4b3f2ebab0abb1c13239878450dc2bf7aaca08 /compiler/optimizing | |
| parent | ab6393400f0dd213d335092c6e83f6a8743f00c2 (diff) | |
| parent | 4ee8e291a7d5b7b98f35f495eb97705836910871 (diff) | |
Merge "Revert^3 "ART: Reference.getReferent intrinsic for x86 and x86_64""
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator.h | 3 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 19 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.h | 1 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 12 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 1 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 60 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 60 |
8 files changed, 11 insertions, 159 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index c117777508..c918ee6687 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1403,20 +1403,6 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) { locations->AddTemp(Location::RequiresRegister()); } -uint32_t CodeGenerator::GetReferenceSlowFlagOffset() const { - ScopedObjectAccess soa(Thread::Current()); - mirror::Class* klass = mirror::Reference::GetJavaLangRefReference(); - DCHECK(klass->IsInitialized()); - return klass->GetSlowPathFlagOffset().Uint32Value(); -} - -uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const { - ScopedObjectAccess soa(Thread::Current()); - mirror::Class* klass = mirror::Reference::GetJavaLangRefReference(); - DCHECK(klass->IsInitialized()); - return klass->GetDisableIntrinsicFlagOffset().Uint32Value(); -} - void CodeGenerator::EmitJitRoots(uint8_t* code, Handle<mirror::ObjectArray<mirror::Object>> roots, const uint8_t* roots_data) { diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index fc2f4c1a6f..c9ba5c3357 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -573,9 +573,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { virtual void GenerateNop() = 0; - uint32_t GetReferenceSlowFlagOffset() const; - uint32_t GetReferenceDisableFlagOffset() const; - static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass); protected: diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 658049a07a..b8465cd9d5 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -4521,18 +4521,16 @@ Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr // save one load. However, since this is just an intrinsic slow path we prefer this // simple and more robust approach rather that trying to determine if that's the case. SlowPathCode* slow_path = GetCurrentSlowPath(); - if (slow_path != nullptr) { - if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) { - int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>()); - __ movl(temp, Address(ESP, stack_offset)); - return temp; - } + DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path. + if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) { + int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>()); + __ movl(temp, Address(ESP, stack_offset)); + return temp; } return location.AsRegister<Register>(); } -Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, - Location temp) { +void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) { Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp. switch (invoke->GetMethodLoadKind()) { case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: { @@ -4590,11 +4588,6 @@ Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO break; } } - return callee_method; -} - -void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) { - Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp); switch (invoke->GetCodePtrLocation()) { case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf: diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 21c527e8b0..8130bd9d25 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -408,7 +408,6 @@ class CodeGeneratorX86 : public CodeGenerator { HInvokeStaticOrDirect* invoke) OVERRIDE; // Generate a call to a static or direct method. - Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp); void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE; // Generate a call to a virtual method. void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 1d49afca6a..8dde298267 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -977,9 +977,10 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStati return desired_dispatch_info; } -Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, - Location temp) { +void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, + Location temp) { // All registers are assumed to be correctly set up. + Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp. switch (invoke->GetMethodLoadKind()) { case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: { @@ -1032,13 +1033,6 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat break; } } - return callee_method; -} - -void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, - Location temp) { - // All registers are assumed to be correctly set up. - Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp); switch (invoke->GetCodePtrLocation()) { case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf: diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 3039e0519c..25479814d0 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -404,7 +404,6 @@ class CodeGeneratorX86_64 : public CodeGenerator { const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, HInvokeStaticOrDirect* invoke) OVERRIDE; - Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp); void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE; void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE; diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index fa843a6200..a9da15d2ce 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -2819,65 +2819,6 @@ void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); } -void IntrinsicLocationsBuilderX86::VisitReferenceGetReferent(HInvoke* invoke) { - if (kEmitCompilerReadBarrier) { - // Do not intrinsify this call with the read barrier configuration. - return; - } - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnSlowPath, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetOut(Location::SameAsFirstInput()); - locations->AddTemp(Location::RequiresRegister()); -} - -void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) { - DCHECK(!kEmitCompilerReadBarrier); - LocationSummary* locations = invoke->GetLocations(); - X86Assembler* assembler = GetAssembler(); - - Register obj = locations->InAt(0).AsRegister<Register>(); - Register out = locations->Out().AsRegister<Register>(); - - SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke); - codegen_->AddSlowPath(slow_path); - - // Load ArtMethod first. - HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect(); - DCHECK(invoke_direct != nullptr); - Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall( - invoke_direct, locations->GetTemp(0)); - DCHECK(temp_loc.Equals(locations->GetTemp(0))); - Register temp = temp_loc.AsRegister<Register>(); - - // Now get declaring class. - __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value())); - - uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset(); - uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset(); - DCHECK_NE(slow_path_flag_offset, 0u); - DCHECK_NE(disable_flag_offset, 0u); - DCHECK_NE(slow_path_flag_offset, disable_flag_offset); - - // Check static flags preventing us for using intrinsic. - if (slow_path_flag_offset == disable_flag_offset + 1) { - __ cmpw(Address(temp, disable_flag_offset), Immediate(0)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - } else { - __ cmpb(Address(temp, disable_flag_offset), Immediate(0)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - } - - // Fast path. - __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value())); - codegen_->MaybeRecordImplicitNullCheck(invoke); - __ MaybeUnpoisonHeapReference(out); - __ Bind(slow_path->GetExitLabel()); -} - static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) { return instruction->InputAt(input0) == instruction->InputAt(input1); } @@ -3429,6 +3370,7 @@ void IntrinsicCodeGeneratorX86::VisitThreadInterrupted(HInvoke* invoke) { UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble) +UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite) UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 4f4592b3df..8100645e54 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -2959,65 +2959,6 @@ void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invok GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); } -void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) { - if (kEmitCompilerReadBarrier) { - // Do not intrinsify this call with the read barrier configuration. - return; - } - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnSlowPath, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetOut(Location::SameAsFirstInput()); - locations->AddTemp(Location::RequiresRegister()); -} - -void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { - DCHECK(!kEmitCompilerReadBarrier); - LocationSummary* locations = invoke->GetLocations(); - X86_64Assembler* assembler = GetAssembler(); - - CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>(); - CpuRegister out = locations->Out().AsRegister<CpuRegister>(); - - SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke); - codegen_->AddSlowPath(slow_path); - - // Load ArtMethod first. - HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect(); - DCHECK(invoke_direct != nullptr); - Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall( - invoke_direct, locations->GetTemp(0)); - DCHECK(temp_loc.Equals(locations->GetTemp(0))); - CpuRegister temp = temp_loc.AsRegister<CpuRegister>(); - - // Now get declaring class. - __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value())); - - uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset(); - uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset(); - DCHECK_NE(slow_path_flag_offset, 0u); - DCHECK_NE(disable_flag_offset, 0u); - DCHECK_NE(slow_path_flag_offset, disable_flag_offset); - - // Check static flags preventing us for using intrinsic. - if (slow_path_flag_offset == disable_flag_offset + 1) { - __ cmpw(Address(temp, disable_flag_offset), Immediate(0)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - } else { - __ cmpb(Address(temp, disable_flag_offset), Immediate(0)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - } - - // Fast path. - __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value())); - codegen_->MaybeRecordImplicitNullCheck(invoke); - __ MaybeUnpoisonHeapReference(out); - __ Bind(slow_path->GetExitLabel()); -} - void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; IntrinsicVisitor::ComputeIntegerValueOfLocations( @@ -3106,6 +3047,7 @@ void IntrinsicCodeGeneratorX86_64::VisitThreadInterrupted(HInvoke* invoke) { __ Bind(&done); } +UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite) |