diff options
author | 2017-02-28 17:04:50 +0000 | |
---|---|---|
committer | 2017-02-28 17:04:50 +0000 | |
commit | db7b44ac3ea80a722aaed12e913ebc1661a57998 (patch) | |
tree | ef5e6236203e04b59151b2e1b1529f9b389957b4 /compiler | |
parent | cd0b27287843cfd904dd163056322579ab4bbf27 (diff) |
Revert "Intrinsify Integer.valueOf."
Heap poisoning missing
jit-gcstress not optimizing it.
bug:30933338
This reverts commit cd0b27287843cfd904dd163056322579ab4bbf27.
Change-Id: I5ece1818afbca5214babb6803f62614a649aedeb
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/intrinsics_list.h | 7 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics.cc | 107 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics.h | 33 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 69 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 72 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm_vixl.cc | 71 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm_vixl.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_mips.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_mips64.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 58 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 58 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 3 |
15 files changed, 4 insertions, 488 deletions
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h index 63c23cb074..9bd25d8484 100644 --- a/compiler/intrinsics_list.h +++ b/compiler/intrinsics_list.h @@ -24,10 +24,6 @@ // Note: adding a new intrinsic requires an art image version change, // as the modifiers flag for some ArtMethods will need to be changed. -// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an OOME. -// The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf -// (kNoSideEffects), and it is also OK to remove it if it's unused. - #define INTRINSICS_LIST(V) \ V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \ V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \ @@ -153,8 +149,7 @@ V(UnsafeLoadFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "loadFence", "()V") \ V(UnsafeStoreFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "storeFence", "()V") \ V(UnsafeFullFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "fullFence", "()V") \ - V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \ - V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;") + V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") #endif // ART_COMPILER_INTRINSICS_LIST_H_ #undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint. diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 18c95b3c41..edccbd4904 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -4094,7 +4094,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok } void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { - IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_); + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); if (intrinsic.TryDispatch(invoke)) { return; } @@ -4107,7 +4107,7 @@ void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* inv // art::PrepareForRegisterAllocation. DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); - IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_); + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); if (intrinsic.TryDispatch(invoke)) { return; } diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index a85dd84a6b..17d683f357 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -19,7 +19,6 @@ #include "art_method.h" #include "class_linker.h" #include "driver/compiler_driver.h" -#include "driver/compiler_options.h" #include "invoke_type.h" #include "mirror/dex_cache-inl.h" #include "nodes.h" @@ -179,110 +178,4 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) return os; } -void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke, - CodeGenerator* codegen, - Location return_location, - Location first_argument_location) { - if (Runtime::Current()->IsAotCompiler()) { - if (codegen->GetCompilerOptions().IsBootImage() || - codegen->GetCompilerOptions().GetCompilePic()) { - // TODO(ngeoffray): Support boot image compilation. - return; - } - } - - IntegerValueOfInfo info = ComputeIntegerValueOfInfo(); - - if (info.integer_cache == nullptr || - info.integer == nullptr || - info.cache == nullptr || - info.value_offset == 0 || - // low and high cannot be 0, per the spec. - info.low == 0 || - info.high == 0) { - LOG(ERROR) << "Integer.valueOf will not be optimized"; - return; - } - - // The intrinsic will call if it needs to allocate a j.l.Integer. - LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary( - invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); - if (!invoke->InputAt(0)->IsConstant()) { - locations->SetInAt(0, Location::RequiresRegister()); - } - locations->AddTemp(first_argument_location); - locations->SetOut(return_location); -} - -IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() { - // Note that we could cache all of the data looked up here. but there's no good - // location for it. We don't want to add it to WellKnownClasses, to avoid creating global - // jni values. Adding it as state to the compiler singleton seems like wrong - // separation of concerns. - // The need for this data should be pretty rare though. - - // The most common case is that the classes are in the boot image and initialized, - // which is easy to generate code for. We bail if not. - Thread* self = Thread::Current(); - ScopedObjectAccess soa(self); - Runtime* runtime = Runtime::Current(); - ClassLinker* class_linker = runtime->GetClassLinker(); - gc::Heap* heap = runtime->GetHeap(); - IntegerValueOfInfo info; - info.integer_cache = class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;"); - if (info.integer_cache == nullptr) { - self->ClearException(); - return info; - } - if (!heap->ObjectIsInBootImageSpace(info.integer_cache) || !info.integer_cache->IsInitialized()) { - // Optimization only works if the class is initialized and in the boot image. - return info; - } - info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;"); - if (info.integer == nullptr) { - self->ClearException(); - return info; - } - if (!heap->ObjectIsInBootImageSpace(info.integer) || !info.integer->IsInitialized()) { - // Optimization only works if the class is initialized and in the boot image. - return info; - } - - ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;"); - if (field == nullptr) { - return info; - } - info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>( - field->GetObject(info.integer_cache).Ptr()); - if (info.cache == nullptr) { - return info; - } - - if (!heap->ObjectIsInBootImageSpace(info.cache)) { - // Optimization only works if the object is in the boot image. - return info; - } - - field = info.integer->FindDeclaredInstanceField("value", "I"); - if (field == nullptr) { - return info; - } - info.value_offset = field->GetOffset().Int32Value(); - - field = info.integer_cache->FindDeclaredStaticField("low", "I"); - if (field == nullptr) { - return info; - } - info.low = field->GetInt(info.integer_cache); - - field = info.integer_cache->FindDeclaredStaticField("high", "I"); - if (field == nullptr) { - return info; - } - info.high = field->GetInt(info.integer_cache); - - DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1); - return info; -} - } // namespace art diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 9da5a7fa3b..6425e1313f 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -113,39 +113,6 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } - static void ComputeIntegerValueOfLocations(HInvoke* invoke, - CodeGenerator* codegen, - Location return_location, - Location first_argument_location); - - // Temporary data structure for holding Integer.valueOf useful data. We only - // use it if the mirror::Class* are in the boot image, so it is fine to keep raw - // mirror::Class pointers in this structure. - struct IntegerValueOfInfo { - IntegerValueOfInfo() - : integer_cache(nullptr), - integer(nullptr), - cache(nullptr), - low(0), - high(0), - value_offset(0) {} - - // The java.lang.IntegerCache class. - mirror::Class* integer_cache; - // The java.lang.Integer class. - mirror::Class* integer; - // Value of java.lang.IntegerCache#cache. - mirror::ObjectArray<mirror::Object>* cache; - // Value of java.lang.IntegerCache#low. - int32_t low; - // Value of java.lang.IntegerCache#high. - int32_t high; - // The offset of java.lang.Integer.value. - int32_t value_offset; - }; - - static IntegerValueOfInfo ComputeIntegerValueOfInfo(); - protected: IntrinsicVisitor() {} diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 1808a99590..c262cf983d 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -129,7 +129,6 @@ class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode { IntrinsicLocationsBuilderARM::IntrinsicLocationsBuilderARM(CodeGeneratorARM* codegen) : arena_(codegen->GetGraph()->GetArena()), - codegen_(codegen), assembler_(codegen->GetAssembler()), features_(codegen->GetInstructionSetFeatures()) {} @@ -2645,74 +2644,6 @@ void IntrinsicCodeGeneratorARM::VisitReferenceGetReferent(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } -void IntrinsicLocationsBuilderARM::VisitIntegerValueOf(HInvoke* invoke) { - InvokeRuntimeCallingConvention calling_convention; - IntrinsicVisitor::ComputeIntegerValueOfLocations( - invoke, - codegen_, - Location::RegisterLocation(R0), - Location::RegisterLocation(calling_convention.GetRegisterAt(0))); -} - -void IntrinsicCodeGeneratorARM::VisitIntegerValueOf(HInvoke* invoke) { - IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); - LocationSummary* locations = invoke->GetLocations(); - ArmAssembler* const assembler = GetAssembler(); - - Register out = locations->Out().AsRegister<Register>(); - InvokeRuntimeCallingConvention calling_convention; - Register argument = calling_convention.GetRegisterAt(0); - if (invoke->InputAt(0)->IsConstant()) { - int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); - if (value >= info.low && value <= info.high) { - // Just embed the j.l.Integer in the code. - ScopedObjectAccess soa(Thread::Current()); - mirror::Object* boxed = info.cache->Get(value + (-info.low)); - DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); - __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address)); - } else { - // Allocate and initialize a new j.l.Integer. - // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the - // JIT object table. - uint32_t address = - dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ LoadImmediate(IP, value); - __ StoreToOffset(kStoreWord, IP, out, info.value_offset); - // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation - // one. - codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); - } - } else { - Register in = locations->InAt(0).AsRegister<Register>(); - // Check bounds of our cache. - __ AddConstant(out, in, -info.low); - __ CmpConstant(out, info.high - info.low + 1); - Label allocate, done; - __ b(&allocate, HS); - // If the value is within the bounds, load the j.l.Integer directly from the array. - uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); - __ LoadLiteral(IP, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); - codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), IP, out); - __ b(&done); - __ Bind(&allocate); - // Otherwise allocate and initialize a new j.l.Integer. - address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ StoreToOffset(kStoreWord, in, out, info.value_offset); - // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation - // one. - codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); - __ Bind(&done); - } -} - UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble) UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat) UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble) diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h index 2840863632..7f20ea4b1f 100644 --- a/compiler/optimizing/intrinsics_arm.h +++ b/compiler/optimizing/intrinsics_arm.h @@ -51,7 +51,6 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) private: ArenaAllocator* arena_; - CodeGenerator* codegen_; ArmAssembler* assembler_; const ArmInstructionSetFeatures& features_; diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index abb5c0ad1c..86e54294ae 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2924,78 +2924,6 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } -void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) { - InvokeRuntimeCallingConvention calling_convention; - IntrinsicVisitor::ComputeIntegerValueOfLocations( - invoke, - codegen_, - calling_convention.GetReturnLocation(Primitive::kPrimNot), - Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); -} - -void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { - IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); - LocationSummary* locations = invoke->GetLocations(); - MacroAssembler* masm = GetVIXLAssembler(); - - Register out = RegisterFrom(locations->Out(), Primitive::kPrimNot); - UseScratchRegisterScope temps(masm); - Register temp = temps.AcquireW(); - InvokeRuntimeCallingConvention calling_convention; - Register argument = calling_convention.GetRegisterAt(0); - if (invoke->InputAt(0)->IsConstant()) { - int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); - if (value >= info.low && value <= info.high) { - // Just embed the j.l.Integer in the code. - ScopedObjectAccess soa(Thread::Current()); - mirror::Object* boxed = info.cache->Get(value + (-info.low)); - DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); - __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); - } else { - // Allocate and initialize a new j.l.Integer. - // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the - // JIT object table. - uint32_t address = - dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ Mov(temp.W(), value); - __ Str(temp.W(), HeapOperand(out.W(), info.value_offset)); - // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation - // one. - codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); - } - } else { - Register in = RegisterFrom(locations->InAt(0), Primitive::kPrimInt); - // Check bounds of our cache. - __ Add(out.W(), in.W(), -info.low); - __ Cmp(out.W(), info.high - info.low + 1); - vixl::aarch64::Label allocate, done; - __ B(&allocate, hs); - // If the value is within the bounds, load the j.l.Integer directly from the array. - uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); - __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); - MemOperand source = HeapOperand( - temp, out.X(), LSL, Primitive::ComponentSizeShift(Primitive::kPrimNot)); - codegen_->Load(Primitive::kPrimNot, out, source); - __ B(&done); - __ Bind(&allocate); - // Otherwise allocate and initialize a new j.l.Integer. - address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ Str(in.W(), HeapOperand(out.W(), info.value_offset)); - // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation - // one. - codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); - __ Bind(&done); - } -} - UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit) diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h index 3c53517b28..28e41cb086 100644 --- a/compiler/optimizing/intrinsics_arm64.h +++ b/compiler/optimizing/intrinsics_arm64.h @@ -38,8 +38,7 @@ class CodeGeneratorARM64; class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { public: - explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen) - : arena_(arena), codegen_(codegen) {} + explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena) : arena_(arena) {} // Define visitor methods. @@ -57,7 +56,6 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) private: ArenaAllocator* arena_; - CodeGeneratorARM64* codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64); }; diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index fe05edd858..70a3d38c13 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -203,7 +203,6 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen) : arena_(codegen->GetGraph()->GetArena()), - codegen_(codegen), assembler_(codegen->GetAssembler()), features_(codegen->GetInstructionSetFeatures()) {} @@ -2989,76 +2988,6 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) { __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0)); } -void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) { - InvokeRuntimeCallingConventionARMVIXL calling_convention; - IntrinsicVisitor::ComputeIntegerValueOfLocations( - invoke, - codegen_, - LocationFrom(r0), - LocationFrom(calling_convention.GetRegisterAt(0))); -} - -void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) { - IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); - LocationSummary* locations = invoke->GetLocations(); - ArmVIXLAssembler* const assembler = GetAssembler(); - - vixl32::Register out = RegisterFrom(locations->Out()); - UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); - vixl32::Register temp = temps.Acquire(); - InvokeRuntimeCallingConventionARMVIXL calling_convention; - vixl32::Register argument = calling_convention.GetRegisterAt(0); - if (invoke->InputAt(0)->IsConstant()) { - int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); - if (value >= info.low && value <= info.high) { - // Just embed the j.l.Integer in the code. - ScopedObjectAccess soa(Thread::Current()); - mirror::Object* boxed = info.cache->Get(value + (-info.low)); - DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); - __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address)); - } else { - // Allocate and initialize a new j.l.Integer. - // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the - // JIT object table. - uint32_t address = - dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ Mov(temp, value); - assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset); - // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation - // one. - codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); - } - } else { - vixl32::Register in = RegisterFrom(locations->InAt(0)); - // Check bounds of our cache. - __ Add(out, in, -info.low); - __ Cmp(out, info.high - info.low + 1); - vixl32::Label allocate, done; - __ B(hs, &allocate); - // If the value is within the bounds, load the j.l.Integer directly from the array. - uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); - __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); - codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), temp, out); - __ B(&done); - __ Bind(&allocate); - // Otherwise allocate and initialize a new j.l.Integer. - address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - assembler->StoreToOffset(kStoreWord, in, out, info.value_offset); - // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation - // one. - codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); - __ Bind(&done); - } -} - UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe? UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe? UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure. diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h index 023cba1349..6e79cb76a1 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.h +++ b/compiler/optimizing/intrinsics_arm_vixl.h @@ -47,7 +47,6 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) private: ArenaAllocator* arena_; - CodeGenerator* codegen_; ArmVIXLAssembler* assembler_; const ArmInstructionSetFeatures& features_; diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 536b7f6a17..64a68403e9 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -2682,8 +2682,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt) UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong) UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject) -UNIMPLEMENTED_INTRINSIC(MIPS, IntegerValueOf) - UNREACHABLE_INTRINSICS(MIPS) #undef __ diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 1112eed112..3888828722 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -2075,8 +2075,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject) -UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf) - UNREACHABLE_INTRINSICS(MIPS64) #undef __ diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 432984e85f..e1b7ea53b4 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -3335,64 +3335,6 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ Bind(intrinsic_slow_path->GetExitLabel()); } -void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) { - InvokeRuntimeCallingConvention calling_convention; - IntrinsicVisitor::ComputeIntegerValueOfLocations( - invoke, - codegen_, - Location::RegisterLocation(EAX), - Location::RegisterLocation(calling_convention.GetRegisterAt(0))); -} - -void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) { - IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); - LocationSummary* locations = invoke->GetLocations(); - X86Assembler* assembler = GetAssembler(); - - Register out = locations->Out().AsRegister<Register>(); - InvokeRuntimeCallingConvention calling_convention; - if (invoke->InputAt(0)->IsConstant()) { - int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); - if (value >= info.low && value <= info.high) { - // Just embed the j.l.Integer in the code. - ScopedObjectAccess soa(Thread::Current()); - mirror::Object* boxed = info.cache->Get(value + (-info.low)); - DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); - __ movl(out, Immediate(address)); - } else { - // Allocate and initialize a new j.l.Integer. - // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the - // JIT object table. - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ movl(calling_convention.GetRegisterAt(0), Immediate(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ movl(Address(out, info.value_offset), Immediate(value)); - } - } else { - Register in = locations->InAt(0).AsRegister<Register>(); - // Check bounds of our cache. - __ leal(out, Address(in, -info.low)); - __ cmpl(out, Immediate(info.high - info.low + 1)); - NearLabel allocate, done; - __ j(kAboveEqual, &allocate); - // If the value is within the bounds, load the j.l.Integer directly from the array. - uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); - __ movl(out, Address(out, TIMES_4, data_offset + address)); - __ jmp(&done); - __ Bind(&allocate); - // Otherwise allocate and initialize a new j.l.Integer. - address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ movl(calling_convention.GetRegisterAt(0), Immediate(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ movl(Address(out, info.value_offset), in); - __ Bind(&done); - } -} - UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble) UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 57992a9041..05d270a4e6 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -2995,64 +2995,6 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } -void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) { - InvokeRuntimeCallingConvention calling_convention; - IntrinsicVisitor::ComputeIntegerValueOfLocations( - invoke, - codegen_, - Location::RegisterLocation(RAX), - Location::RegisterLocation(calling_convention.GetRegisterAt(0))); -} - -void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) { - IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); - LocationSummary* locations = invoke->GetLocations(); - X86_64Assembler* assembler = GetAssembler(); - - CpuRegister out = locations->Out().AsRegister<CpuRegister>(); - InvokeRuntimeCallingConvention calling_convention; - if (invoke->InputAt(0)->IsConstant()) { - int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); - if (value >= info.low && value <= info.high) { - // Just embed the j.l.Integer in the code. - ScopedObjectAccess soa(Thread::Current()); - mirror::Object* boxed = info.cache->Get(value + (-info.low)); - DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); - __ movl(out, Immediate(address)); - } else { - // Allocate and initialize a new j.l.Integer. - // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the - // JIT object table. - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ movl(Address(out, info.value_offset), Immediate(value)); - } - } else { - CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>(); - // Check bounds of our cache. - __ leal(out, Address(in, -info.low)); - __ cmpl(out, Immediate(info.high - info.low + 1)); - NearLabel allocate, done; - __ j(kAboveEqual, &allocate); - // If the value is within the bounds, load the j.l.Integer directly from the array. - uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); - uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); - __ movl(out, Address(out, TIMES_4, data_offset + address)); - __ jmp(&done); - __ Bind(&allocate); - // Otherwise allocate and initialize a new j.l.Integer. - address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address)); - codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - __ movl(Address(out, info.value_offset), in); - __ Bind(&done); - } -} - UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite) diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index c39aed2c6a..8a9e61875a 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1914,9 +1914,6 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { virtual bool IsControlFlow() const { return false; } - // Can the instruction throw? - // TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance), - // could throw OOME, but it is still OK to remove them if they are unused. virtual bool CanThrow() const { return false; } bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); } |