summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc25
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h25
-rw-r--r--compiler/optimizing/code_generator_riscv64.h7
-rw-r--r--compiler/optimizing/intrinsic_objects.h21
-rw-r--r--compiler/optimizing/intrinsics.cc222
-rw-r--r--compiler/optimizing/intrinsics.h30
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc56
-rw-r--r--compiler/optimizing/intrinsics_arm64.h4
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc57
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.h4
-rw-r--r--compiler/optimizing/intrinsics_x86.cc69
-rw-r--r--compiler/optimizing/intrinsics_x86.h4
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc78
-rw-r--r--compiler/optimizing/intrinsics_x86_64.h4
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc3
-rw-r--r--dex2oat/linker/image_writer.cc9
-rw-r--r--runtime/intrinsics_list.h5
17 files changed, 304 insertions, 319 deletions
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f7fa54b78e..e0764ab85b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -297,31 +297,6 @@ static LoadOperandType GetLoadOperandType(DataType::Type type) {
}
}
-static StoreOperandType GetStoreOperandType(DataType::Type type) {
- switch (type) {
- case DataType::Type::kReference:
- return kStoreWord;
- case DataType::Type::kBool:
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- return kStoreByte;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- return kStoreHalfword;
- case DataType::Type::kInt32:
- return kStoreWord;
- case DataType::Type::kInt64:
- return kStoreWordPair;
- case DataType::Type::kFloat32:
- return kStoreSWord;
- case DataType::Type::kFloat64:
- return kStoreDWord;
- default:
- LOG(FATAL) << "Unreachable type " << type;
- UNREACHABLE();
- }
-}
-
void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 0175448fde..3e449305b5 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -178,6 +178,31 @@ using VIXLUInt32Literal = vixl::aarch32::Literal<uint32_t>;
V(JdkUnsafeGetAndSetObject) \
V(JdkUnsafeCompareAndSetLong)
+ALWAYS_INLINE inline StoreOperandType GetStoreOperandType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kReference:
+ return kStoreWord;
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ return kStoreByte;
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ return kStoreHalfword;
+ case DataType::Type::kInt32:
+ return kStoreWord;
+ case DataType::Type::kInt64:
+ return kStoreWordPair;
+ case DataType::Type::kFloat32:
+ return kStoreSWord;
+ case DataType::Type::kFloat64:
+ return kStoreDWord;
+ default:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+}
+
class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
public:
explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index 1727d01299..69f646c6ae 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -147,7 +147,6 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength =
V(JdkUnsafeGetAndSetObject) \
V(ReferenceGetReferent) \
V(ReferenceRefersTo) \
- V(IntegerValueOf) \
V(ThreadInterrupted) \
V(CRC32Update) \
V(CRC32UpdateBytes) \
@@ -168,7 +167,11 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength =
V(VarHandleGetAndBitwiseXorRelease) \
V(VarHandleGetAndSet) \
V(VarHandleGetAndSetAcquire) \
- V(VarHandleGetAndSetRelease)
+ V(VarHandleGetAndSetRelease) \
+ V(ByteValueOf) \
+ V(ShortValueOf) \
+ V(CharacterValueOf) \
+ V(IntegerValueOf) \
// Method register on invoke.
static const XRegister kArtMethodRegister = A0;
diff --git a/compiler/optimizing/intrinsic_objects.h b/compiler/optimizing/intrinsic_objects.h
index c9ae449467..52a6b81f0e 100644
--- a/compiler/optimizing/intrinsic_objects.h
+++ b/compiler/optimizing/intrinsic_objects.h
@@ -52,14 +52,13 @@ template <class T> class ObjectArray;
class IntrinsicObjects {
public:
enum class PatchType {
- kIntegerValueOfObject,
- kIntegerValueOfArray,
+ kValueOfObject,
+ kValueOfArray,
- kLast = kIntegerValueOfArray
+ kLast = kValueOfArray
};
static uint32_t EncodePatch(PatchType patch_type, uint32_t index = 0u) {
- DCHECK(patch_type == PatchType::kIntegerValueOfObject || index == 0u);
return PatchTypeField::Encode(static_cast<uint32_t>(patch_type)) | IndexField::Encode(index);
}
@@ -94,13 +93,6 @@ class IntrinsicObjects {
return kNumberOfBoxedCaches;
}
- private:
- static constexpr size_t kPatchTypeBits =
- MinimumBitsToStore(static_cast<uint32_t>(PatchType::kLast));
- static constexpr size_t kIndexBits = BitSizeOf<uint32_t>() - kPatchTypeBits;
- using PatchTypeField = BitField<uint32_t, 0u, kPatchTypeBits>;
- using IndexField = BitField<uint32_t, kPatchTypeBits, kIndexBits>;
-
EXPORT static ObjPtr<mirror::Object> GetValueOfObject(
ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
size_t start_index,
@@ -109,6 +101,13 @@ class IntrinsicObjects {
EXPORT static MemberOffset GetValueOfArrayDataOffset(
ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
size_t start_index) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ static constexpr size_t kPatchTypeBits =
+ MinimumBitsToStore(static_cast<uint32_t>(PatchType::kLast));
+ static constexpr size_t kIndexBits = BitSizeOf<uint32_t>() - kPatchTypeBits;
+ using PatchTypeField = BitField<uint32_t, 0u, kPatchTypeBits>;
+ using IndexField = BitField<uint32_t, kPatchTypeBits, kIndexBits>;
};
} // namespace art
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index d9623c0abb..d960d5f212 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -51,17 +51,6 @@ std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
return os;
}
-static const char kIntegerCacheDescriptor[] = "Ljava/lang/Integer$IntegerCache;";
-static const char kIntegerDescriptor[] = "Ljava/lang/Integer;";
-static const char kLowFieldName[] = "low";
-static const char kHighFieldName[] = "high";
-static const char kValueFieldName[] = "value";
-
-static constexpr int32_t kIntegerCacheLow = -128;
-static constexpr int32_t kIntegerCacheHigh = 127;
-static constexpr int32_t kIntegerCacheLength = kIntegerCacheHigh - kIntegerCacheLow + 1;
-
-
static ObjPtr<mirror::ObjectArray<mirror::Object>> GetBootImageLiveObjects()
REQUIRES_SHARED(Locks::mutator_lock_) {
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -76,80 +65,6 @@ static ObjPtr<mirror::ObjectArray<mirror::Object>> GetBootImageLiveObjects()
return boot_image_live_objects;
}
-static ObjPtr<mirror::ObjectArray<mirror::Object>> GetIntegerCacheArray(
- ObjPtr<mirror::Class> cache_class) REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* cache_field = WellKnownClasses::java_lang_Integer_IntegerCache_cache;
- return ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(cache_field->GetObject(cache_class));
-}
-
-static int32_t GetIntegerCacheField(ObjPtr<mirror::Class> cache_class, const char* field_name)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = cache_class->FindDeclaredStaticField(field_name, "I");
- DCHECK(field != nullptr);
- return field->GetInt(cache_class);
-}
-
-bool IntrinsicVisitor::CheckIntegerCacheFields(ObjPtr<mirror::ObjectArray<mirror::Object>> cache) {
- ObjPtr<mirror::Class> cache_class = WellKnownClasses::java_lang_Integer_IntegerCache.Get();
- // Check that the range matches the boot image cache length.
- int32_t low = GetIntegerCacheField(cache_class, kLowFieldName);
- int32_t high = GetIntegerCacheField(cache_class, kHighFieldName);
- if (low != kIntegerCacheLow || high != kIntegerCacheHigh) {
- return false;
- }
- if (cache->GetLength() != high - low + 1) {
- return false;
- }
-
- // Check that the elements match the values we expect.
- ObjPtr<mirror::Class> integer_class = WellKnownClasses::java_lang_Integer.Get();
- DCHECK(integer_class->IsInitialized());
- ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
- DCHECK(value_field != nullptr);
- for (int32_t i = 0, len = cache->GetLength(); i != len; ++i) {
- ObjPtr<mirror::Object> current_object = cache->Get(i);
- if (value_field->GetInt(current_object) != low + i) {
- return false;
- }
- }
- return true;
-}
-
-static bool CheckIntegerCache(ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Since we have a cache in the boot image, both java.lang.Integer and
- // java.lang.Integer$IntegerCache must be initialized in the boot image.
- ObjPtr<mirror::Class> cache_class = WellKnownClasses::java_lang_Integer_IntegerCache.Get();
- DCHECK(cache_class->IsInitialized());
- ObjPtr<mirror::Class> integer_class = WellKnownClasses::java_lang_Integer.Get();
- DCHECK(integer_class->IsInitialized());
-
- ObjPtr<mirror::ObjectArray<mirror::Object>> current_cache = GetIntegerCacheArray(cache_class);
- if (!IntrinsicVisitor::CheckIntegerCacheFields(current_cache)) {
- return false;
- }
-
- // Check that the elements match the boot image intrinsic objects and check their values as well.
- ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
- DCHECK(value_field != nullptr);
- for (int32_t i = 0, len = current_cache->GetLength(); i != len; ++i) {
- ObjPtr<mirror::Object> boot_image_object =
- IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, i);
- DCHECK(Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boot_image_object));
- // No need for read barrier for comparison with a boot image object.
- ObjPtr<mirror::Object> current_object =
- current_cache->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(i);
- if (boot_image_object != current_object) {
- return false; // Messed up IntegerCache.cache[i]
- }
- if (value_field->GetInt(boot_image_object) != kIntegerCacheLow + i) {
- return false; // Messed up IntegerCache.cache[i].value.
- }
- }
-
- return true;
-}
-
static bool CanReferenceBootImageObjects(HInvoke* invoke, const CompilerOptions& compiler_options) {
// Piggyback on the method load kind to determine whether we can use PC-relative addressing
// for AOT. This should cover both the testing config (non-PIC boot image) and codegens that
@@ -165,73 +80,24 @@ static bool CanReferenceBootImageObjects(HInvoke* invoke, const CompilerOptions&
return true;
}
-void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
- CodeGenerator* codegen,
- Location return_location,
- Location first_argument_location) {
- // The intrinsic will call if it needs to allocate a j.l.Integer.
+void IntrinsicVisitor::ComputeValueOfLocations(HInvoke* invoke,
+ CodeGenerator* codegen,
+ int32_t low,
+ int32_t length,
+ Location return_location,
+ Location first_argument_location) {
+ // The intrinsic will call if it needs to allocate a boxed object.
LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly;
const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
if (!CanReferenceBootImageObjects(invoke, compiler_options)) {
return;
}
HInstruction* const input = invoke->InputAt(0);
- if (compiler_options.IsBootImage()) {
- if (!compiler_options.IsImageClass(kIntegerCacheDescriptor) ||
- !compiler_options.IsImageClass(kIntegerDescriptor)) {
- return;
- }
- ScopedObjectAccess soa(Thread::Current());
- ObjPtr<mirror::Class> cache_class = WellKnownClasses::java_lang_Integer_IntegerCache.Get();
- DCHECK(cache_class->IsInitialized());
- ObjPtr<mirror::Class> integer_class = WellKnownClasses::java_lang_Integer.Get();
- DCHECK(integer_class->IsInitialized());
- int32_t low = kIntegerCacheLow;
- int32_t high = kIntegerCacheHigh;
- if (kIsDebugBuild) {
- CHECK_EQ(low, GetIntegerCacheField(cache_class, kLowFieldName));
- CHECK_EQ(high, GetIntegerCacheField(cache_class, kHighFieldName));
- ObjPtr<mirror::ObjectArray<mirror::Object>> current_cache = GetIntegerCacheArray(cache_class);
- CHECK(current_cache != nullptr);
- CHECK_EQ(current_cache->GetLength(), high - low + 1);
- ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
- CHECK(value_field != nullptr);
- for (int32_t i = 0, len = current_cache->GetLength(); i != len; ++i) {
- ObjPtr<mirror::Object> current_object = current_cache->GetWithoutChecks(i);
- CHECK(current_object != nullptr);
- CHECK_EQ(value_field->GetInt(current_object), low + i);
- }
- }
- if (input->IsIntConstant()) {
- int32_t value = input->AsIntConstant()->GetValue();
- if (static_cast<uint32_t>(value) - static_cast<uint32_t>(low) <
- static_cast<uint32_t>(high - low + 1)) {
- // No call, we shall use direct pointer to the Integer object.
- call_kind = LocationSummary::kNoCall;
- }
- }
- } else {
- ScopedObjectAccess soa(Thread::Current());
- ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
- DCHECK_IMPLIES(compiler_options.IsAotCompiler(), CheckIntegerCache(boot_image_live_objects));
-
- if (input->IsIntConstant()) {
- if (kIsDebugBuild) {
- // Check the `value` from the lowest cached Integer.
- ObjPtr<mirror::Object> low_integer =
- IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
- ObjPtr<mirror::Class> integer_class =
- low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
- ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
- DCHECK(value_field != nullptr);
- DCHECK_EQ(kIntegerCacheLow, value_field->GetInt(low_integer));
- }
- int32_t value = input->AsIntConstant()->GetValue();
- if (static_cast<uint32_t>(value) - static_cast<uint32_t>(kIntegerCacheLow) <
- static_cast<uint32_t>(kIntegerCacheLength)) {
- // No call, we shall use direct pointer to the Integer object.
- call_kind = LocationSummary::kNoCall;
- }
+ if (input->IsConstant()) {
+ int32_t value = input->AsIntConstant()->GetValue();
+ if (static_cast<uint32_t>(value) - static_cast<uint32_t>(low) < static_cast<uint32_t>(length)) {
+ // No call, we shall use direct pointer to the boxed object.
+ call_kind = LocationSummary::kNoCall;
}
}
@@ -247,76 +113,58 @@ void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
}
}
-inline IntrinsicVisitor::IntegerValueOfInfo::IntegerValueOfInfo()
+inline IntrinsicVisitor::ValueOfInfo::ValueOfInfo()
: value_offset(0),
low(0),
length(0u),
value_boot_image_reference(kInvalidReference) {}
-IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo(
- HInvoke* invoke, const CompilerOptions& compiler_options) {
- // Note that we could cache all of the data looked up here. but there's no good
- // location for it. We don't want to add it to WellKnownClasses, to avoid creating global
- // jni values. Adding it as state to the compiler singleton seems like wrong
- // separation of concerns.
- // The need for this data should be pretty rare though.
-
- // Note that at this point we can no longer abort the code generation. Therefore,
- // we need to provide data that shall not lead to a crash even if the fields were
- // modified through reflection since ComputeIntegerValueOfLocations() when JITting.
-
- ScopedObjectAccess soa(Thread::Current());
- IntegerValueOfInfo info;
- info.low = kIntegerCacheLow;
- info.length = kIntegerCacheLength;
+IntrinsicVisitor::ValueOfInfo IntrinsicVisitor::ComputeValueOfInfo(
+ HInvoke* invoke,
+ const CompilerOptions& compiler_options,
+ ArtField* value_field,
+ int32_t low,
+ int32_t length,
+ size_t base) {
+ ValueOfInfo info;
+ info.low = low;
+ info.length = length;
+ info.value_offset = value_field->GetOffset().Uint32Value();
if (compiler_options.IsBootImage()) {
- ObjPtr<mirror::Class> integer_class = invoke->GetResolvedMethod()->GetDeclaringClass();
- DCHECK(integer_class->DescriptorEquals(kIntegerDescriptor));
- ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
- DCHECK(value_field != nullptr);
- info.value_offset = value_field->GetOffset().Uint32Value();
- ObjPtr<mirror::Class> cache_class = WellKnownClasses::java_lang_Integer_IntegerCache.Get();
- DCHECK_EQ(info.low, GetIntegerCacheField(cache_class, kLowFieldName));
- DCHECK_EQ(kIntegerCacheHigh, GetIntegerCacheField(cache_class, kHighFieldName));
-
- if (invoke->InputAt(0)->IsIntConstant()) {
+ if (invoke->InputAt(0)->IsConstant()) {
int32_t input_value = invoke->InputAt(0)->AsIntConstant()->GetValue();
uint32_t index = static_cast<uint32_t>(input_value) - static_cast<uint32_t>(info.low);
if (index < static_cast<uint32_t>(info.length)) {
info.value_boot_image_reference = IntrinsicObjects::EncodePatch(
- IntrinsicObjects::PatchType::kIntegerValueOfObject, index);
+ IntrinsicObjects::PatchType::kValueOfObject, index + base);
} else {
// Not in the cache.
- info.value_boot_image_reference = IntegerValueOfInfo::kInvalidReference;
+ info.value_boot_image_reference = ValueOfInfo::kInvalidReference;
}
} else {
info.array_data_boot_image_reference =
- IntrinsicObjects::EncodePatch(IntrinsicObjects::PatchType::kIntegerValueOfArray);
+ IntrinsicObjects::EncodePatch(IntrinsicObjects::PatchType::kValueOfArray, base);
}
} else {
+ ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
- ObjPtr<mirror::Object> low_integer =
- IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
- ObjPtr<mirror::Class> integer_class = low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
- ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
- DCHECK(value_field != nullptr);
- info.value_offset = value_field->GetOffset().Uint32Value();
if (invoke->InputAt(0)->IsIntConstant()) {
int32_t input_value = invoke->InputAt(0)->AsIntConstant()->GetValue();
uint32_t index = static_cast<uint32_t>(input_value) - static_cast<uint32_t>(info.low);
if (index < static_cast<uint32_t>(info.length)) {
- ObjPtr<mirror::Object> integer =
- IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, index);
- info.value_boot_image_reference = CodeGenerator::GetBootImageOffset(integer);
+ ObjPtr<mirror::Object> object =
+ IntrinsicObjects::GetValueOfObject(boot_image_live_objects, base, index);
+ info.value_boot_image_reference = CodeGenerator::GetBootImageOffset(object);
} else {
// Not in the cache.
- info.value_boot_image_reference = IntegerValueOfInfo::kInvalidReference;
+ info.value_boot_image_reference = ValueOfInfo::kInvalidReference;
}
} else {
info.array_data_boot_image_reference =
CodeGenerator::GetBootImageOffset(boot_image_live_objects) +
- IntrinsicObjects::GetIntegerValueOfArrayDataOffset(boot_image_live_objects).Uint32Value();
+ IntrinsicObjects::GetValueOfArrayDataOffset(
+ boot_image_live_objects, base).Uint32Value();
}
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index fbcc613e8c..d901f7e10c 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -95,17 +95,18 @@ class IntrinsicVisitor : public ValueObject {
codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
}
- static void ComputeIntegerValueOfLocations(HInvoke* invoke,
- CodeGenerator* codegen,
- Location return_location,
- Location first_argument_location);
-
- // Temporary data structure for holding Integer.valueOf data for generating code.
- // We only use it if the boot image contains the IntegerCache objects.
- struct IntegerValueOfInfo {
+ static void ComputeValueOfLocations(HInvoke* invoke,
+ CodeGenerator* codegen,
+ int32_t low,
+ int32_t length,
+ Location return_location,
+ Location first_argument_location);
+
+ // Temporary data structure for holding BoxedType.valueOf data for generating code.
+ struct ValueOfInfo {
static constexpr uint32_t kInvalidReference = static_cast<uint32_t>(-1);
- IntegerValueOfInfo();
+ ValueOfInfo();
// Offset of the Integer.value field for initializing a newly allocated instance.
uint32_t value_offset;
@@ -130,10 +131,13 @@ class IntrinsicVisitor : public ValueObject {
};
};
- static IntegerValueOfInfo ComputeIntegerValueOfInfo(
- HInvoke* invoke, const CompilerOptions& compiler_options);
- static bool CheckIntegerCacheFields(ObjPtr<mirror::ObjectArray<mirror::Object>> cache)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ static ValueOfInfo ComputeValueOfInfo(
+ HInvoke* invoke,
+ const CompilerOptions& compiler_options,
+ ArtField* value_field,
+ int32_t low,
+ int32_t length,
+ size_t base);
static MemberOffset GetReferenceDisableIntrinsicOffset();
static MemberOffset GetReferenceSlowPathEnabledOffset();
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 70b7dcaa02..4087afa1c2 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -25,6 +25,7 @@
#include "data_type-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "heap_poisoning.h"
+#include "intrinsic_objects.h"
#include "intrinsics.h"
#include "intrinsics_utils.h"
#include "lock_word.h"
@@ -36,6 +37,7 @@
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
#include "utils/arm64/assembler_arm64.h"
+#include "well_known_classes.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
@@ -3469,18 +3471,34 @@ void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
-void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConvention calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- calling_convention.GetReturnLocation(DataType::Type::kReference),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
-}
-
-void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info =
- IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
+#define VISIT_INTRINSIC(name, low, high, type, start_index) \
+ void IntrinsicLocationsBuilderARM64::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ InvokeRuntimeCallingConvention calling_convention; \
+ IntrinsicVisitor::ComputeValueOfLocations( \
+ invoke, \
+ codegen_, \
+ low, \
+ high - low + 1, \
+ calling_convention.GetReturnLocation(DataType::Type::kReference), \
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); \
+ } \
+ void IntrinsicCodeGeneratorARM64::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ IntrinsicVisitor::ValueOfInfo info = \
+ IntrinsicVisitor::ComputeValueOfInfo( \
+ invoke, \
+ codegen_->GetCompilerOptions(), \
+ WellKnownClasses::java_lang_ ##name ##_value, \
+ low, \
+ high - low + 1, \
+ start_index); \
+ HandleValueOf(invoke, info, type); \
+ }
+ BOXED_TYPES(VISIT_INTRINSIC)
+#undef VISIT_INTRINSIC
+
+void IntrinsicCodeGeneratorARM64::HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type) {
LocationSummary* locations = invoke->GetLocations();
MacroAssembler* masm = GetVIXLAssembler();
@@ -3497,16 +3515,16 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ DCHECK_NE(info.value_boot_image_reference, ValueOfInfo::kInvalidReference);
codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
DCHECK(locations->CanCall());
- // Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // Allocate and initialize a new object.
+ // TODO: If we JIT, we could allocate the object now, and store it in the
// JIT object table.
allocate_instance();
__ Mov(temp.W(), value);
- __ Str(temp.W(), HeapOperand(out.W(), info.value_offset));
+ codegen_->Store(primitive_type, temp.W(), HeapOperand(out.W(), info.value_offset));
// Class pointer and `value` final field stores require a barrier before publication.
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
@@ -3518,7 +3536,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
__ Cmp(out.W(), info.length);
vixl::aarch64::Label allocate, done;
__ B(&allocate, hs);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
+ // If the value is within the bounds, load the object directly from the array.
codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
MemOperand source = HeapOperand(
temp, out.X(), LSL, DataType::SizeShift(DataType::Type::kReference));
@@ -3526,9 +3544,9 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out);
__ B(&done);
__ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
+ // Otherwise allocate and initialize a new object.
allocate_instance();
- __ Str(in.W(), HeapOperand(out.W(), info.value_offset));
+ codegen_->Store(primitive_type, in.W(), HeapOperand(out.W(), info.value_offset));
// Class pointer and `value` final field stores require a barrier before publication.
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
__ Bind(&done);
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index b20cea65f4..d198cea8f6 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -79,6 +79,10 @@ class IntrinsicCodeGeneratorARM64 final : public IntrinsicVisitor {
ArenaAllocator* GetAllocator();
+ void HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type);
+
CodeGeneratorARM64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARM64);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 0cec278f06..cfa72e3d70 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -22,6 +22,7 @@
#include "code_generator_arm_vixl.h"
#include "common_arm.h"
#include "heap_poisoning.h"
+#include "intrinsic_objects.h"
#include "intrinsics.h"
#include "intrinsics_utils.h"
#include "lock_word.h"
@@ -31,6 +32,7 @@
#include "mirror/string-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
+#include "well_known_classes.h"
#include "aarch32/constants-aarch32.h"
@@ -2432,18 +2434,35 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
__ Vrintm(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
-void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- LocationFrom(r0),
- LocationFrom(calling_convention.GetRegisterAt(0)));
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info =
- IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
+#define VISIT_INTRINSIC(name, low, high, type, start_index) \
+ void IntrinsicLocationsBuilderARMVIXL::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ InvokeRuntimeCallingConventionARMVIXL calling_convention; \
+ IntrinsicVisitor::ComputeValueOfLocations( \
+ invoke, \
+ codegen_, \
+ low, \
+ high - low + 1, \
+ LocationFrom(r0), \
+ LocationFrom(calling_convention.GetRegisterAt(0))); \
+ } \
+ void IntrinsicCodeGeneratorARMVIXL::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ IntrinsicVisitor::ValueOfInfo info = \
+ IntrinsicVisitor::ComputeValueOfInfo( \
+ invoke, \
+ codegen_->GetCompilerOptions(), \
+ WellKnownClasses::java_lang_ ##name ##_value, \
+ low, \
+ high - low + 1, \
+ start_index); \
+ HandleValueOf(invoke, info, type); \
+ }
+ BOXED_TYPES(VISIT_INTRINSIC)
+#undef VISIT_INTRINSIC
+
+
+void IntrinsicCodeGeneratorARMVIXL::HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type) {
LocationSummary* locations = invoke->GetLocations();
ArmVIXLAssembler* const assembler = GetAssembler();
@@ -2459,17 +2478,17 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
if (static_cast<uint32_t>(value - info.low) < info.length) {
- // Just embed the j.l.Integer in the code.
- DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ // Just embed the object in the code.
+ DCHECK_NE(info.value_boot_image_reference, ValueOfInfo::kInvalidReference);
codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // TODO: If we JIT, we could allocate the object now, and store it in the
// JIT object table.
allocate_instance();
__ Mov(temp, value);
- assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset);
+ assembler->StoreToOffset(GetStoreOperandType(primitive_type), temp, out, info.value_offset);
// Class pointer and `value` final field stores require a barrier before publication.
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
@@ -2481,15 +2500,15 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
__ Cmp(out, info.length);
vixl32::Label allocate, done;
__ B(hs, &allocate, /* is_far_target= */ false);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
+ // If the value is within the bounds, load the object directly from the array.
codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
assembler->MaybeUnpoisonHeapReference(out);
__ B(&done);
__ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
+ // Otherwise allocate and initialize a new object.
allocate_instance();
- assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
+ assembler->StoreToOffset(GetStoreOperandType(primitive_type), in, out, info.value_offset);
// Class pointer and `value` final field stores require a barrier before publication.
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
__ Bind(&done);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index f517d21c9d..7757fb56a3 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -69,6 +69,10 @@ class IntrinsicCodeGeneratorARMVIXL final : public IntrinsicVisitor {
ArenaAllocator* GetAllocator();
ArmVIXLAssembler* GetAssembler();
+ void HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type);
+
CodeGeneratorARMVIXL* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARMVIXL);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 0e32315335..398e4109dc 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -25,6 +25,7 @@
#include "data_type-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "heap_poisoning.h"
+#include "intrinsic_objects.h"
#include "intrinsics.h"
#include "intrinsics_utils.h"
#include "lock_word.h"
@@ -37,6 +38,7 @@
#include "thread-current-inl.h"
#include "utils/x86/assembler_x86.h"
#include "utils/x86/constants_x86.h"
+#include "well_known_classes.h"
namespace art HIDDEN {
@@ -3282,21 +3284,36 @@ static void RequestBaseMethodAddressInRegister(HInvoke* invoke) {
}
}
-void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
+#define VISIT_INTRINSIC(name, low, high, type, start_index) \
+ void IntrinsicLocationsBuilderX86::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ InvokeRuntimeCallingConvention calling_convention; \
+ IntrinsicVisitor::ComputeValueOfLocations( \
+ invoke, \
+ codegen_, \
+ low, \
+ high - low + 1, \
+ Location::RegisterLocation(EAX), \
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0))); \
+ RequestBaseMethodAddressInRegister(invoke); \
+ } \
+ void IntrinsicCodeGeneratorX86::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ IntrinsicVisitor::ValueOfInfo info = \
+ IntrinsicVisitor::ComputeValueOfInfo( \
+ invoke, \
+ codegen_->GetCompilerOptions(), \
+ WellKnownClasses::java_lang_ ##name ##_value, \
+ low, \
+ high - low + 1, \
+ start_index); \
+ HandleValueOf(invoke, info, type); \
+ }
+ BOXED_TYPES(VISIT_INTRINSIC)
+#undef VISIT_INTRINSIC
+
+void IntrinsicCodeGeneratorX86::HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type) {
DCHECK(invoke->IsInvokeStaticOrDirect());
- InvokeRuntimeCallingConvention calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- Location::RegisterLocation(EAX),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- RequestBaseMethodAddressInRegister(invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
- DCHECK(invoke->IsInvokeStaticOrDirect());
- IntrinsicVisitor::IntegerValueOfInfo info =
- IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
X86Assembler* assembler = GetAssembler();
@@ -3310,17 +3327,22 @@ void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
if (static_cast<uint32_t>(value - info.low) < info.length) {
- // Just embed the j.l.Integer in the code.
- DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ // Just embed the object in the code.
+ DCHECK_NE(info.value_boot_image_reference, ValueOfInfo::kInvalidReference);
codegen_->LoadBootImageAddress(
out, info.value_boot_image_reference, invoke->AsInvokeStaticOrDirect());
} else {
DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // TODO: If we JIT, we could allocate the object now, and store it in the
// JIT object table.
allocate_instance();
- __ movl(Address(out, info.value_offset), Immediate(value));
+ codegen_->MoveToMemory(primitive_type,
+ Location::ConstantLocation(invoke->InputAt(0)->AsIntConstant()),
+ out,
+ /* dst_index= */ Register::kNoRegister,
+ /* dst_scale= */ TIMES_1,
+ /* dst_disp= */ info.value_offset);
}
} else {
DCHECK(locations->CanCall());
@@ -3330,7 +3352,7 @@ void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
__ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
+ // If the value is within the bounds, load the object directly from the array.
constexpr size_t kElementSize = sizeof(mirror::HeapReference<mirror::Object>);
static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
"Check heap reference size.");
@@ -3358,9 +3380,14 @@ void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
+ // Otherwise allocate and initialize a new object.
allocate_instance();
- __ movl(Address(out, info.value_offset), in);
+ codegen_->MoveToMemory(primitive_type,
+ Location::RegisterLocation(in),
+ out,
+ /* dst_index= */ Register::kNoRegister,
+ /* dst_scale= */ TIMES_1,
+ /* dst_disp= */ info.value_offset);
__ Bind(&done);
}
}
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index fc2f0e3fbd..b4f07e90d1 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -71,6 +71,10 @@ class IntrinsicCodeGeneratorX86 final : public IntrinsicVisitor {
ArenaAllocator* GetAllocator();
+ void HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type);
+
CodeGeneratorX86* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 404fd3bccc..4eb89811cd 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -25,6 +25,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
+#include "intrinsic_objects.h"
#include "intrinsics_utils.h"
#include "lock_word.h"
#include "mirror/array-inl.h"
@@ -35,6 +36,7 @@
#include "thread-current-inl.h"
#include "utils/x86_64/assembler_x86_64.h"
#include "utils/x86_64/constants_x86_64.h"
+#include "well_known_classes.h"
namespace art HIDDEN {
@@ -3054,18 +3056,60 @@ void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invok
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
-void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConvention calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- Location::RegisterLocation(RAX),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+#define VISIT_INTRINSIC(name, low, high, type, start_index) \
+ void IntrinsicLocationsBuilderX86_64::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ InvokeRuntimeCallingConvention calling_convention; \
+ IntrinsicVisitor::ComputeValueOfLocations( \
+ invoke, \
+ codegen_, \
+ low, \
+ high - low + 1, \
+ Location::RegisterLocation(RAX), \
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0))); \
+ } \
+ void IntrinsicCodeGeneratorX86_64::Visit ##name ##ValueOf(HInvoke* invoke) { \
+ IntrinsicVisitor::ValueOfInfo info = \
+ IntrinsicVisitor::ComputeValueOfInfo( \
+ invoke, \
+ codegen_->GetCompilerOptions(), \
+ WellKnownClasses::java_lang_ ##name ##_value, \
+ low, \
+ high - low + 1, \
+ start_index); \
+ HandleValueOf(invoke, info, type); \
+ }
+ BOXED_TYPES(VISIT_INTRINSIC)
+#undef VISIT_INTRINSIC
+
+template <typename T>
+static void Store(X86_64Assembler* assembler,
+ DataType::Type primitive_type,
+ const Address& address,
+ const T& operand) {
+ switch (primitive_type) {
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint8: {
+ __ movb(address, operand);
+ break;
+ }
+ case DataType::Type::kInt16:
+ case DataType::Type::kUint16: {
+ __ movw(address, operand);
+ break;
+ }
+ case DataType::Type::kInt32: {
+ __ movl(address, operand);
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unrecognized ValueOf type " << primitive_type;
+ }
+ }
}
-void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info =
- IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
+void IntrinsicCodeGeneratorX86_64::HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type) {
LocationSummary* locations = invoke->GetLocations();
X86_64Assembler* assembler = GetAssembler();
@@ -3077,19 +3121,19 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
};
- if (invoke->InputAt(0)->IsIntConstant()) {
+ if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ DCHECK_NE(info.value_boot_image_reference, ValueOfInfo::kInvalidReference);
codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // TODO: If we JIT, we could allocate the boxed value now, and store it in the
// JIT object table.
allocate_instance();
- __ movl(Address(out, info.value_offset), Immediate(value));
+ Store(assembler, primitive_type, Address(out, info.value_offset), Immediate(value));
}
} else {
DCHECK(locations->CanCall());
@@ -3099,7 +3143,7 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
__ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
+ // If the value is within the bounds, load the boxed value directly from the array.
DCHECK_NE(out.AsRegister(), argument.AsRegister());
codegen_->LoadBootImageAddress(argument, info.array_data_boot_image_reference);
static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
@@ -3108,9 +3152,9 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
+ // Otherwise allocate and initialize a new object.
allocate_instance();
- __ movl(Address(out, info.value_offset), in);
+ Store(assembler, primitive_type, Address(out, info.value_offset), in);
__ Bind(&done);
}
}
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index d0ee6f622d..652a7d1d5c 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -71,6 +71,10 @@ class IntrinsicCodeGeneratorX86_64 final : public IntrinsicVisitor {
ArenaAllocator* GetAllocator();
+ void HandleValueOf(HInvoke* invoke,
+ const IntrinsicVisitor::ValueOfInfo& info,
+ DataType::Type primitive_type);
+
CodeGeneratorX86_64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86_64);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 56341f106f..c2d5ec7b60 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -235,6 +235,9 @@ class PCRelativeHandlerVisitor final : public HGraphVisitor {
LOG(FATAL) << "Unreachable min/max/abs: intrinsics should have been lowered "
"to IR nodes by instruction simplifier";
UNREACHABLE();
+ case Intrinsics::kByteValueOf:
+ case Intrinsics::kShortValueOf:
+ case Intrinsics::kCharacterValueOf:
case Intrinsics::kIntegerValueOf:
// This intrinsic can be call free if it loads the address of the boot image object.
// If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index fb881b95b4..d937adc5c8 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -2810,17 +2810,18 @@ ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
const void* ImageWriter::GetIntrinsicReferenceAddress(uint32_t intrinsic_data) {
DCHECK(compiler_options_.IsBootImage());
switch (IntrinsicObjects::DecodePatchType(intrinsic_data)) {
- case IntrinsicObjects::PatchType::kIntegerValueOfArray: {
+ case IntrinsicObjects::PatchType::kValueOfArray: {
+ uint32_t index = IntrinsicObjects::DecodePatchIndex(intrinsic_data);
const uint8_t* base_address =
reinterpret_cast<const uint8_t*>(GetImageAddress(boot_image_live_objects_));
MemberOffset data_offset =
- IntrinsicObjects::GetIntegerValueOfArrayDataOffset(boot_image_live_objects_);
+ IntrinsicObjects::GetValueOfArrayDataOffset(boot_image_live_objects_, index);
return base_address + data_offset.Uint32Value();
}
- case IntrinsicObjects::PatchType::kIntegerValueOfObject: {
+ case IntrinsicObjects::PatchType::kValueOfObject: {
uint32_t index = IntrinsicObjects::DecodePatchIndex(intrinsic_data);
ObjPtr<mirror::Object> value =
- IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects_, index);
+ IntrinsicObjects::GetValueOfObject(boot_image_live_objects_, /* start_index= */ 0u, index);
return GetImageAddress(value.Ptr());
}
}
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index 5450a3b3f7..9e9bfbc825 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -275,7 +275,6 @@
V(JdkUnsafeFullFence, kVirtual, kNeedsEnvironment, kAllSideEffects, kCanThrow, "Ljdk/internal/misc/Unsafe;", "fullFence", "()V") \
V(ReferenceGetReferent, kDirect, kNeedsEnvironment, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \
V(ReferenceRefersTo, kVirtual, kNeedsEnvironment, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "refersTo", "(Ljava/lang/Object;)Z") \
- V(IntegerValueOf, kStatic, kNeedsEnvironment, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;") \
V(ThreadInterrupted, kStatic, kNeedsEnvironment, kAllSideEffects, kNoThrow, "Ljava/lang/Thread;", "interrupted", "()Z") \
V(VarHandleFullFence, kStatic, kNeedsEnvironment, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "fullFence", "()V") \
V(VarHandleAcquireFence, kStatic, kNeedsEnvironment, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "acquireFence", "()V") \
@@ -286,6 +285,10 @@
V(CRC32Update, kStatic, kNeedsEnvironment, kNoSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
V(CRC32UpdateBytes, kStatic, kNeedsEnvironment, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \
V(CRC32UpdateByteBuffer, kStatic, kNeedsEnvironment, kReadSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "updateByteBuffer", "(IJII)I") \
+ V(ByteValueOf, kStatic, kNeedsEnvironment, kNoSideEffects, kNoThrow, "Ljava/lang/Byte;", "valueOf", "(B)Ljava/lang/Byte;") \
+ V(ShortValueOf, kStatic, kNeedsEnvironment, kNoSideEffects, kNoThrow, "Ljava/lang/Short;", "valueOf", "(S)Ljava/lang/Short;") \
+ V(CharacterValueOf, kStatic, kNeedsEnvironment, kNoSideEffects, kNoThrow, "Ljava/lang/Character;", "valueOf", "(C)Ljava/lang/Character;") \
+ V(IntegerValueOf, kStatic, kNeedsEnvironment, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;") \
ART_SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
#endif // ART_RUNTIME_INTRINSICS_LIST_H_