Implement Integer.valueOf() intrinsic for PIC.
And fix the intrinsic for JIT even in case when someone
messes up the IntegerCache using reflection. Two cases are
exposed with a regression test (one that previously failed
randomly and one that failed 100%) but other crashes were
possible; for example, we would need a read barrier for
array reads when elements are not guaranteed to be in the
boot image.
The new approach loads references only from the boot image
live objects array which cannot be touched by reflection.
The referenced objects and IntegerCache.cache are exposed
and can lead to weird behavior but not crashes.
On x86, the pc_relative_fixups_86 actually checks the cache
an additional time but discrepancies between this check and
the location building at the beginning of codegen should be
OK as the HIsX86ComputeBaseMethodAddress should be added
for PIC regardless of whether pc_relative_fixups_86 thinks
the method is intrinsified or not.
Test: 717-integer-value-of
Test: Pixel 2 XL boots.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing --pictest --npictest
Test: testrunner.py --host --jit
Test: testrunner.py --target --optimizing --pictest --npictest
Test: testrunner.py --target --jit
Bug: 71526895
Change-Id: I89b3245a62aba22980c86a99e2af480bfa250af1
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 8aa790db..85c5659 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -27,6 +27,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_arm64.h"
@@ -4828,6 +4829,25 @@
__ ldr(out, MemOperand(base, /* offset placeholder */ 0));
}
+void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
+ uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset);
+ EmitAdrpPlaceholder(adrp_label, reg.X());
+ // Add LDR with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label);
+ EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ Ldr(reg.W(), DeduplicateBootImageAddressLiteral(reinterpret_cast<uintptr_t>(address)));
+ }
+}
+
template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches(
const ArenaDeque<PcRelativePatchInfo>& infos,
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 5afb712..11ff78b 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -630,6 +630,8 @@
vixl::aarch64::Register out,
vixl::aarch64::Register base);
+ void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_offset);
+
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
void EmitThunkCode(const linker::LinkerPatch& patch,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 91c1315..6804340 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -27,6 +27,7 @@
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics_arm_vixl.h"
#include "linker/linker_patch.h"
@@ -9536,6 +9537,22 @@
});
}
+void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset);
+ EmitMovwMovtPlaceholder(labels, reg);
+ __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset);
+ __ Ldr(reg, DeduplicateBootImageAddressLiteral(dchecked_integral_cast<uint32_t>(address)));
+ }
+}
+
template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches(
const ArenaDeque<PcRelativePatchInfo>& infos,
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index d5b739b..4893d3c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -600,6 +600,8 @@
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
+ void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_offset);
+
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
void EmitThunkCode(const linker::LinkerPatch& patch,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 507db36..112eb51 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -26,6 +26,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_mips.h"
@@ -1739,6 +1740,22 @@
// offset to `out` (e.g. lw, jialc, addiu).
}
+void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+ PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
+ __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ LoadConst32(reg, dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address)));
+ }
+}
+
CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
const DexFile& dex_file,
dex::StringIndex string_index,
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 2e7c736..9fdb385 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -645,6 +645,8 @@
Register out,
Register base);
+ void LoadBootImageAddress(Register reg, uint32_t boot_image_offset);
+
// The JitPatchInfo is used for JIT string and class loads.
struct JitPatchInfo {
JitPatchInfo(const DexFile& dex_file, uint64_t idx)
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 08a6512..9f86364 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -24,6 +24,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_mips64.h"
@@ -1638,6 +1639,24 @@
}
}
+void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+ PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
+ // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
+ __ Lwu(reg, AT, /* placeholder */ 0x5678);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset);
+ __ LoadLiteral(reg, kLoadDoubleword, DeduplicateBootImageAddressLiteral(address));
+ }
+}
+
Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 6e69e46..25c886f 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -615,6 +615,8 @@
GpuRegister out,
PcRelativePatchInfo* info_low = nullptr);
+ void LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_offset);
+
void PatchJitRootUse(uint8_t* code,
const uint8_t* roots_data,
const Literal* literal,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 9f42ac7..12872ed 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -23,6 +23,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_x86.h"
@@ -2188,7 +2189,9 @@
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeMethodLoadKind()) {
+ if (invoke->GetLocations()->CanCall() &&
+ invoke->HasPcRelativeMethodLoadKind() &&
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).IsInvalid()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
}
return;
@@ -4969,6 +4972,28 @@
return &string_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86::LoadBootImageAddress(Register reg,
+ uint32_t boot_image_offset,
+ HInvokeStaticOrDirect* invoke) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ HX86ComputeBaseMethodAddress* method_address =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ DCHECK(method_address != nullptr);
+ Register method_address_reg =
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
+ __ movl(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
+ RecordBootImageRelRoPatch(method_address, boot_image_offset);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+ }
+}
+
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 6c76e27..7d18e2b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -426,6 +426,11 @@
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
+
+ void LoadBootImageAddress(Register reg,
+ uint32_t boot_image_offset,
+ HInvokeStaticOrDirect* invoke);
+
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 05194b1..9631c15 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -22,6 +22,7 @@
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_x86_64.h"
@@ -1107,6 +1108,20 @@
return &string_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ RecordBootImageRelRoPatch(boot_image_offset);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+ }
+}
+
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9a4c53b..cf862d3 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -429,7 +429,7 @@
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_offset);
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
@@ -566,6 +566,8 @@
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
+ void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+
// Assign a 64 bit constant to an address.
void MoveInt64ToAddress(const Address& addr_low,
const Address& addr_high,
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
new file mode 100644
index 0000000..3c20ad6
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsic_objects.h"
+
+#include "art_field-inl.h"
+#include "base/logging.h"
+#include "class_root.h"
+#include "handle.h"
+#include "obj_ptr-inl.h"
+#include "mirror/object_array-inl.h"
+
+namespace art {
+
+static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* self,
+ ClassLinker* class_linker)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+ if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
+ return nullptr;
+ }
+ ArtField* cache_field =
+ integer_cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+ CHECK(cache_field != nullptr);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> integer_cache =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+ cache_field->GetObject(integer_cache_class));
+ CHECK(integer_cache != nullptr);
+ return integer_cache;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::AllocateBootImageLiveObjects(
+ Thread* self,
+ ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // The objects used for the Integer.valueOf() intrinsic must remain live even if references
+ // to them are removed using reflection. Image roots are not accessible through reflection,
+ // so the array we construct here shall keep them alive.
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::Object>> integer_cache =
+ hs.NewHandle(LookupIntegerCache(self, class_linker));
+ size_t live_objects_size =
+ (integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u;
+ ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects =
+ mirror::ObjectArray<mirror::Object>::Alloc(
+ self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker), live_objects_size);
+ int32_t index = 0;
+ if (integer_cache != nullptr) {
+ live_objects->Set(index++, integer_cache.Get());
+ for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
+ live_objects->Set(index++, integer_cache->Get(i));
+ }
+ }
+ CHECK_EQ(index, live_objects->GetLength());
+
+ if (kIsDebugBuild && integer_cache != nullptr) {
+ CHECK_EQ(integer_cache.Get(), GetIntegerValueOfCache(live_objects));
+ for (int32_t i = 0, len = integer_cache->GetLength(); i != len; ++i) {
+ CHECK_EQ(integer_cache->GetWithoutChecks(i), GetIntegerValueOfObject(live_objects, i));
+ }
+ }
+ return live_objects;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::GetIntegerValueOfCache(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+ DCHECK(boot_image_live_objects != nullptr);
+ if (boot_image_live_objects->GetLength() == 0u) {
+ return nullptr; // No intrinsic objects.
+ }
+ // No need for read barrier for boot image object or for verifying the value that was just stored.
+ ObjPtr<mirror::Object> result =
+ boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(0);
+ DCHECK(result != nullptr);
+ DCHECK(result->IsObjectArray());
+ DCHECK(result->GetClass()->DescriptorEquals("[Ljava/lang/Integer;"));
+ return ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(result);
+}
+
+ObjPtr<mirror::Object> IntrinsicObjects::GetIntegerValueOfObject(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ uint32_t index) {
+ DCHECK(boot_image_live_objects != nullptr);
+ DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+ DCHECK_LT(index,
+ static_cast<uint32_t>(GetIntegerValueOfCache(boot_image_live_objects)->GetLength()));
+
+ // No need for read barrier for boot image object or for verifying the value that was just stored.
+ ObjPtr<mirror::Object> result =
+ boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(
+ /* skip the IntegerCache.cache */ 1u + index);
+ DCHECK(result != nullptr);
+ DCHECK(result->GetClass()->DescriptorEquals("Ljava/lang/Integer;"));
+ return result;
+}
+
+MemberOffset IntrinsicObjects::GetIntegerValueOfArrayDataOffset(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+ DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+ MemberOffset result = mirror::ObjectArray<mirror::Object>::OffsetOfElement(1u);
+ DCHECK_EQ(GetIntegerValueOfObject(boot_image_live_objects, 0u),
+ (boot_image_live_objects
+ ->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(result)));
+ return result;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/intrinsic_objects.h b/compiler/optimizing/intrinsic_objects.h
new file mode 100644
index 0000000..ffadd03
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+#define ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+
+#include "base/mutex.h"
+
+namespace art {
+
+class ClassLinker;
+template <class MirrorType> class ObjPtr;
+class MemberOffset;
+class Thread;
+
+namespace mirror {
+class Object;
+template <class T> class ObjectArray;
+} // namespace mirror
+
+class IntrinsicObjects {
+ public:
+ static ObjPtr<mirror::ObjectArray<mirror::Object>> AllocateBootImageLiveObjects(
+ Thread* self,
+ ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Functions for retrieving data for Integer.valueOf().
+ static ObjPtr<mirror::ObjectArray<mirror::Object>> GetIntegerValueOfCache(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ static ObjPtr<mirror::Object> GetIntegerValueOfObject(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
+ static MemberOffset GetIntegerValueOfArrayDataOffset(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index f0c91f3..81b2b7b 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -21,10 +21,12 @@
#include "base/utils.h"
#include "class_linker.h"
#include "dex/invoke_type.h"
-#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
-#include "mirror/dex_cache-inl.h"
+#include "gc/space/image_space.h"
+#include "image-inl.h"
+#include "intrinsic_objects.h"
#include "nodes.h"
+#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
@@ -221,105 +223,223 @@
return os;
}
+static ObjPtr<mirror::ObjectArray<mirror::Object>> GetBootImageLiveObjects()
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ const std::vector<gc::space::ImageSpace*>& boot_image_spaces = heap->GetBootImageSpaces();
+ DCHECK(!boot_image_spaces.empty());
+ const ImageHeader& main_header = boot_image_spaces[0]->GetImageHeader();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+ main_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kBootImageLiveObjects));
+ DCHECK(boot_image_live_objects != nullptr);
+ DCHECK(heap->ObjectIsInBootImageSpace(boot_image_live_objects));
+ return boot_image_live_objects;
+}
+
+static bool CheckIntegerCache(Thread* self,
+ ClassLinker* class_linker,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(boot_image_cache != nullptr);
+
+ // Since we have a cache in the boot image, both java.lang.Integer and
+ // java.lang.Integer$IntegerCache must be initialized in the boot image.
+ ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_loader */ nullptr);
+ DCHECK(cache_class != nullptr);
+ DCHECK(cache_class->IsInitialized());
+ ObjPtr<mirror::Class> integer_class =
+ class_linker->LookupClass(self, "Ljava/lang/Integer;", /* class_loader */ nullptr);
+ DCHECK(integer_class != nullptr);
+ DCHECK(integer_class->IsInitialized());
+
+ // Check that the current cache is the same as the `boot_image_cache`.
+ ArtField* cache_field = cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+ DCHECK(cache_field != nullptr);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> current_cache =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(cache_field->GetObject(cache_class));
+ if (current_cache != boot_image_cache) {
+ return false; // Messed up IntegerCache.cache.
+ }
+
+ // Check that the range matches the boot image cache length.
+ ArtField* low_field = cache_class->FindDeclaredStaticField("low", "I");
+ DCHECK(low_field != nullptr);
+ int32_t low = low_field->GetInt(cache_class);
+ ArtField* high_field = cache_class->FindDeclaredStaticField("high", "I");
+ DCHECK(high_field != nullptr);
+ int32_t high = high_field->GetInt(cache_class);
+ if (boot_image_cache->GetLength() != high - low + 1) {
+ return false; // Messed up IntegerCache.low or IntegerCache.high.
+ }
+
+ // Check that the elements match the boot image intrinsic objects and check their values as well.
+ ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+ DCHECK(value_field != nullptr);
+ for (int32_t i = 0, len = boot_image_cache->GetLength(); i != len; ++i) {
+ ObjPtr<mirror::Object> boot_image_object =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, i);
+ DCHECK(Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boot_image_object));
+ // No need for read barrier for comparison with a boot image object.
+ ObjPtr<mirror::Object> current_object =
+ boot_image_cache->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(i);
+ if (boot_image_object != current_object) {
+ return false; // Messed up IntegerCache.cache[i]
+ }
+ if (value_field->GetInt(boot_image_object) != low + i) {
+ return false; // Messed up IntegerCache.cache[i].value.
+ }
+ }
+
+ return true;
+}
+
void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
CodeGenerator* codegen,
Location return_location,
Location first_argument_location) {
- if (Runtime::Current()->IsAotCompiler()) {
- if (codegen->GetCompilerOptions().IsBootImage() ||
- codegen->GetCompilerOptions().GetCompilePic()) {
- // TODO(ngeoffray): Support boot image compilation.
- return;
- }
- }
-
- IntegerValueOfInfo info = ComputeIntegerValueOfInfo();
-
- // Most common case is that we have found all we needed (classes are initialized
- // and in the boot image). Bail if not.
- if (info.integer_cache == nullptr ||
- info.integer == nullptr ||
- info.cache == nullptr ||
- info.value_offset == 0 ||
- // low and high cannot be 0, per the spec.
- info.low == 0 ||
- info.high == 0) {
- LOG(INFO) << "Integer.valueOf will not be optimized";
+ if (codegen->GetCompilerOptions().IsBootImage()) {
+ // TODO: Implement for boot image. We need access to CompilerDriver::IsImageClass()
+ // to verify that the IntegerCache shall be in the image.
return;
}
+ Runtime* runtime = Runtime::Current();
+ gc::Heap* heap = runtime->GetHeap();
+ if (heap->GetBootImageSpaces().empty()) {
+ return; // Running without boot image, cannot use required boot image objects.
+ }
// The intrinsic will call if it needs to allocate a j.l.Integer.
- LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
- invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
- if (!invoke->InputAt(0)->IsConstant()) {
- locations->SetInAt(0, Location::RequiresRegister());
+ LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly;
+ {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> cache =
+ IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects);
+ if (cache == nullptr) {
+ return; // No cache in the boot image.
+ }
+ if (runtime->UseJitCompilation()) {
+ if (!CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache)) {
+ return; // The cache was somehow messed up, probably by using reflection.
+ }
+ } else {
+ DCHECK(runtime->IsAotCompiler());
+ DCHECK(CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache));
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ // Retrieve the `value` from the lowest cached Integer.
+ ObjPtr<mirror::Object> low_integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+ ObjPtr<mirror::Class> integer_class =
+ low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+ DCHECK(value_field != nullptr);
+ int32_t low = value_field->GetInt(low_integer);
+ if (static_cast<uint32_t>(value) - static_cast<uint32_t>(low) <
+ static_cast<uint32_t>(cache->GetLength())) {
+ // No call, we shall use direct pointer to the Integer object. Note that we cannot
+ // do this for JIT as the "low" can change through reflection before emitting the code.
+ call_kind = LocationSummary::kNoCall;
+ }
+ }
+ }
}
- locations->AddTemp(first_argument_location);
- locations->SetOut(return_location);
+
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations = new (allocator) LocationSummary(invoke, call_kind, kIntrinsified);
+ if (call_kind == LocationSummary::kCallOnMainOnly) {
+ locations->SetInAt(0, Location::RegisterOrConstant(invoke->InputAt(0)));
+ locations->AddTemp(first_argument_location);
+ locations->SetOut(return_location);
+ } else {
+ locations->SetInAt(0, Location::ConstantLocation(invoke->InputAt(0)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister());
+ }
}
-IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() {
+static int32_t GetIntegerCacheLowFromIntegerCache(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> cache_class = Runtime::Current()->GetClassLinker()->LookupClass(
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_loader */ nullptr);
+ DCHECK(cache_class != nullptr);
+ DCHECK(cache_class->IsInitialized());
+ ArtField* low_field = cache_class->FindDeclaredStaticField("low", "I");
+ DCHECK(low_field != nullptr);
+ return low_field->GetInt(cache_class);
+}
+
+static uint32_t CalculateBootImageOffset(ObjPtr<mirror::Object> object)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(heap->ObjectIsInBootImageSpace(object));
+ return reinterpret_cast<const uint8_t*>(object.Ptr()) - heap->GetBootImageSpaces()[0]->Begin();
+}
+
+inline IntrinsicVisitor::IntegerValueOfInfo::IntegerValueOfInfo()
+ : integer_boot_image_offset(0u),
+ value_offset(0),
+ low(0),
+ length(0u),
+ value_boot_image_offset(0u) {}
+
+IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo(HInvoke* invoke) {
// Note that we could cache all of the data looked up here. but there's no good
// location for it. We don't want to add it to WellKnownClasses, to avoid creating global
// jni values. Adding it as state to the compiler singleton seems like wrong
// separation of concerns.
// The need for this data should be pretty rare though.
- // The most common case is that the classes are in the boot image and initialized,
- // which is easy to generate code for. We bail if not.
+ // Note that at this point we can no longer abort the code generation. Therefore,
+ // we need to provide data that shall not lead to a crash even if the fields were
+ // modified through reflection since ComputeIntegerValueOfLocations() when JITting.
+
+ Runtime* runtime = Runtime::Current();
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- gc::Heap* heap = runtime->GetHeap();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+ ObjPtr<mirror::Object> low_integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+ ObjPtr<mirror::Class> integer_class = low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+ DCHECK(value_field != nullptr);
+
IntegerValueOfInfo info;
- info.integer_cache = class_linker->LookupClass(self,
- "Ljava/lang/Integer$IntegerCache;",
- /* class_loader */ nullptr).Ptr();
- if (info.integer_cache == nullptr || !info.integer_cache->IsInitialized()) {
- // Optimization only works if the class is initialized.
- return info;
+ info.integer_boot_image_offset = CalculateBootImageOffset(integer_class);
+ info.value_offset = value_field->GetOffset().Uint32Value();
+ if (runtime->UseJitCompilation()) {
+ // Use the current `IntegerCache.low` for JIT to avoid truly surprising behavior if the
+ // code messes up the `value` field in the lowest cached Integer using reflection.
+ info.low = GetIntegerCacheLowFromIntegerCache(self);
+ } else {
+ // For AOT, the `low_integer->value` should be the same as `IntegerCache.low`.
+ info.low = value_field->GetInt(low_integer);
+ DCHECK_EQ(info.low, GetIntegerCacheLowFromIntegerCache(self));
}
- if (!heap->ObjectIsInBootImageSpace(info.integer_cache)) {
- // Optimization only works if the class is in the boot image.
- // TODO: Implement the intrinsic for boot image compilation.
- return info;
- }
- info.integer =
- class_linker->LookupClass(self, "Ljava/lang/Integer;", /* class_loader */ nullptr).Ptr();
- DCHECK(info.integer != nullptr);
- DCHECK(info.integer->IsInitialized()); // Must be initialized since IntegerCache is initialized.
- if (!heap->ObjectIsInBootImageSpace(info.integer)) {
- // Optimization only works if the class is in the boot image.
- return info;
+ // Do not look at `IntegerCache.high`, use the immutable length of the cache array instead.
+ info.length = dchecked_integral_cast<uint32_t>(
+ IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects)->GetLength());
+
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t input_value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ uint32_t index = static_cast<uint32_t>(input_value) - static_cast<uint32_t>(info.low);
+ if (index < static_cast<uint32_t>(info.length)) {
+ ObjPtr<mirror::Object> integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, index);
+ DCHECK(runtime->GetHeap()->ObjectIsInBootImageSpace(integer));
+ info.value_boot_image_offset = CalculateBootImageOffset(integer);
+ } else {
+ info.value_boot_image_offset = 0u; // Not in the cache.
+ }
+ } else {
+ info.array_data_boot_image_offset =
+ CalculateBootImageOffset(boot_image_live_objects) +
+ IntrinsicObjects::GetIntegerValueOfArrayDataOffset(boot_image_live_objects).Uint32Value();
}
- ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
- CHECK(field != nullptr);
- info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>(
- field->GetObject(info.integer_cache).Ptr());
- if (info.cache == nullptr) {
- return info; // Did someone mess up the IntegerCache using reflection?
- }
-
- if (!heap->ObjectIsInBootImageSpace(info.cache)) {
- // Optimization only works if the object is in the boot image.
- return info;
- }
-
- field = info.integer->FindDeclaredInstanceField("value", "I");
- CHECK(field != nullptr);
- info.value_offset = field->GetOffset().Int32Value();
-
- field = info.integer_cache->FindDeclaredStaticField("low", "I");
- CHECK(field != nullptr);
- info.low = field->GetInt(info.integer_cache);
-
- field = info.integer_cache->FindDeclaredStaticField("high", "I");
- CHECK(field != nullptr);
- info.high = field->GetInt(info.integer_cache);
-
- DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1);
return info;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 30cffac..f2b7823 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -126,33 +126,32 @@
Location return_location,
Location first_argument_location);
- // Temporary data structure for holding Integer.valueOf useful data. We only
- // use it if the mirror::Class* are in the boot image, so it is fine to keep raw
- // mirror::Class pointers in this structure.
+ // Temporary data structure for holding Integer.valueOf data for generating code.
+ // We only use it if the boot image contains the IntegerCache objects.
struct IntegerValueOfInfo {
- IntegerValueOfInfo()
- : integer_cache(nullptr),
- integer(nullptr),
- cache(nullptr),
- low(0),
- high(0),
- value_offset(0) {}
+ IntegerValueOfInfo();
- // The java.lang.IntegerCache class.
- mirror::Class* integer_cache;
- // The java.lang.Integer class.
- mirror::Class* integer;
- // Value of java.lang.IntegerCache#cache.
- mirror::ObjectArray<mirror::Object>* cache;
- // Value of java.lang.IntegerCache#low.
+ // Boot image offset of java.lang.Integer for allocating an instance.
+ uint32_t integer_boot_image_offset;
+ // Offset of the Integer.value field for initializing a newly allocated instance.
+ uint32_t value_offset;
+ // The low value in the cache.
int32_t low;
- // Value of java.lang.IntegerCache#high.
- int32_t high;
- // The offset of java.lang.Integer.value.
- int32_t value_offset;
+ // The length of the cache array.
+ uint32_t length;
+
+ union {
+ // Boot image offset of the target Integer object for constant input in the cache range.
+ // If the input is out of range, this is set to 0u and the code must allocate a new Integer.
+ uint32_t value_boot_image_offset;
+
+ // Boot image offset of the cache array data used for non-constant input in the cache range.
+ // If the input is out of range, the code must allocate a new Integer.
+ uint32_t array_data_boot_image_offset;
+ };
};
- static IntegerValueOfInfo ComputeIntegerValueOfInfo();
+ static IntegerValueOfInfo ComputeIntegerValueOfInfo(HInvoke* invoke);
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c3d643a..b4890e4 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2791,7 +2791,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
MacroAssembler* masm = GetVIXLAssembler();
@@ -2802,20 +2802,15 @@
Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ Mov(temp.W(), value);
@@ -2825,16 +2820,15 @@
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
Register in = RegisterFrom(locations->InAt(0), DataType::Type::kInt32);
// Check bounds of our cache.
__ Add(out.W(), in.W(), -info.low);
- __ Cmp(out.W(), info.high - info.low + 1);
+ __ Cmp(out.W(), info.length);
vixl::aarch64::Label allocate, done;
__ B(&allocate, hs);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_offset);
MemOperand source = HeapOperand(
temp, out.X(), LSL, DataType::SizeShift(DataType::Type::kReference));
codegen_->Load(DataType::Type::kReference, out, source);
@@ -2842,8 +2836,7 @@
__ B(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ Str(in.W(), HeapOperand(out.W(), info.value_offset));
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index fecf1cc..0835060 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2940,7 +2940,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
ArmVIXLAssembler* const assembler = GetAssembler();
@@ -2951,20 +2951,15 @@
vixl32::Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ Mov(temp, value);
@@ -2974,23 +2969,21 @@
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
vixl32::Register in = RegisterFrom(locations->InAt(0));
// Check bounds of our cache.
__ Add(out, in, -info.low);
- __ Cmp(out, info.high - info.low + 1);
+ __ Cmp(out, info.length);
vixl32::Label allocate, done;
__ B(hs, &allocate, /* is_far_target */ false);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_offset);
codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
assembler->MaybeUnpoisonHeapReference(out);
__ B(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index ae248a3..a3eb42b 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2601,7 +2601,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
MipsAssembler* assembler = GetAssembler();
InstructionCodeGeneratorMIPS* icodegen =
@@ -2609,22 +2609,18 @@
Register out = locations->Out().AsRegister<Register>();
InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ LoadConst32(out, address);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
@@ -2633,27 +2629,23 @@
icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
Register in = locations->InAt(0).AsRegister<Register>();
MipsLabel allocate, done;
- int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
- // Is (info.low <= in) && (in <= info.high)?
__ Addiu32(out, in, -info.low);
- // As unsigned quantities is out < (info.high - info.low + 1)?
- if (IsInt<16>(count)) {
- __ Sltiu(AT, out, count);
+ // As unsigned quantities is out < info.length ?
+ if (IsUint<15>(info.length)) {
+ __ Sltiu(AT, out, info.length);
} else {
- __ LoadConst32(AT, count);
+ __ LoadConst32(AT, info.length);
__ Sltu(AT, out, AT);
}
- // Branch if out >= (info.high - info.low + 1).
- // This means that "in" is outside of the range [info.low, info.high].
+ // Branch if out >= info.length. This means that "in" is outside of the valid range.
__ Beqz(AT, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ LoadConst32(TMP, data_offset + address);
+ codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_offset);
__ ShiftAndAdd(out, out, TMP, TIMES_4);
__ Lw(out, out, 0);
__ MaybeUnpoisonHeapReference(out);
@@ -2661,8 +2653,7 @@
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 9a9ae71..510040b 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2267,7 +2267,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
Mips64Assembler* assembler = GetAssembler();
InstructionCodeGeneratorMIPS64* icodegen =
@@ -2275,22 +2275,18 @@
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
InvokeRuntimeCallingConvention calling_convention;
+ GpuRegister argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ LoadConst64(out, address);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
@@ -2299,22 +2295,18 @@
icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
Mips64Label allocate, done;
- int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
- // Is (info.low <= in) && (in <= info.high)?
__ Addiu32(out, in, -info.low);
- // As unsigned quantities is out < (info.high - info.low + 1)?
- __ LoadConst32(AT, count);
- // Branch if out >= (info.high - info.low + 1).
- // This means that "in" is outside of the range [info.low, info.high].
+ // As unsigned quantities is out < info.length ?
+ __ LoadConst32(AT, info.length);
+ // Branch if out >= info.length . This means that "in" is outside of the valid range.
__ Bgeuc(out, AT, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ LoadConst64(TMP, data_offset + address);
+ codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_offset);
__ Dlsa(out, out, TMP, TIMES_4);
__ Lwu(out, out, 0);
__ MaybeUnpoisonHeapReference(out);
@@ -2322,8 +2314,7 @@
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index f84a33b..645ca49 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2851,57 +2851,76 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
+ DCHECK(invoke->IsInvokeStaticOrDirect());
InvokeRuntimeCallingConvention calling_convention;
IntrinsicVisitor::ComputeIntegerValueOfLocations(
invoke,
codegen_,
Location::RegisterLocation(EAX),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+
+ LocationSummary* locations = invoke->GetLocations();
+ if (locations != nullptr) {
+ HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+ if (invoke_static_or_direct->HasSpecialInput() &&
+ invoke->InputAt(invoke_static_or_direct->GetSpecialInputIndex())
+ ->IsX86ComputeBaseMethodAddress()) {
+ locations->SetInAt(invoke_static_or_direct->GetSpecialInputIndex(),
+ Location::RequiresRegister());
+ }
+ }
}
void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ DCHECK(invoke->IsInvokeStaticOrDirect());
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
X86Assembler* assembler = GetAssembler();
Register out = locations->Out().AsRegister<Register>();
InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(address));
+ codegen_->LoadBootImageAddress(
+ out, info.value_boot_image_offset, invoke->AsInvokeStaticOrDirect());
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->LoadBootImageAddress(
+ argument, info.integer_boot_image_offset, invoke->AsInvokeStaticOrDirect());
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), Immediate(value));
}
} else {
+ DCHECK(locations->CanCall());
Register in = locations->InAt(0).AsRegister<Register>();
// Check bounds of our cache.
__ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
+ __ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ movl(out, Address(out, TIMES_4, data_offset + address));
+ constexpr size_t kElementSize = sizeof(mirror::HeapReference<mirror::Object>);
+ uint32_t mid_array_boot_image_offset =
+ info.array_data_boot_image_offset - info.low * kElementSize;
+ codegen_->LoadBootImageAddress(
+ out, mid_array_boot_image_offset, invoke->AsInvokeStaticOrDirect());
+ DCHECK_NE(out, in);
+ static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Check heap reference size.");
+ __ movl(out, Address(out, in, TIMES_4, 0));
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->LoadBootImageAddress(
+ argument, info.integer_boot_image_offset, invoke->AsInvokeStaticOrDirect());
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), in);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7627dc9..6d85f3a 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2660,56 +2660,47 @@
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
X86_64Assembler* assembler = GetAssembler();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
InvokeRuntimeCallingConvention calling_convention;
- if (invoke->InputAt(0)->IsConstant()) {
+ CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
+ if (invoke->InputAt(0)->IsIntConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(static_cast<int32_t>(address)));
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(argument, Immediate(static_cast<int32_t>(address)));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), Immediate(value));
}
} else {
+ DCHECK(locations->CanCall());
CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>();
// Check bounds of our cache.
__ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
+ __ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- if (data_offset + address <= std::numeric_limits<int32_t>::max()) {
- __ movl(out, Address(out, TIMES_4, data_offset + address));
- } else {
- CpuRegister temp = CpuRegister(calling_convention.GetRegisterAt(0));
- __ movl(temp, Immediate(static_cast<int32_t>(data_offset + address)));
- __ movl(out, Address(temp, out, TIMES_4, 0));
- }
+ DCHECK_NE(out.AsRegister(), argument.AsRegister());
+ codegen_->LoadBootImageAddress(argument, info.array_data_boot_image_offset);
+ static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Check heap reference size.");
+ __ movl(out, Address(argument, out, TIMES_4, 0));
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(argument, Immediate(static_cast<int32_t>(address)));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), in);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 9049457..05ec765 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -193,18 +193,19 @@
}
void HandleInvoke(HInvoke* invoke) {
- // If this is an invoke-static/-direct with PC-relative dex cache array
- // addressing, we need the PC-relative address base.
HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
- // We can't add a pointer to the constant area if we already have a current
- // method pointer. This may arise when sharpening doesn't remove the current
- // method pointer from the invoke.
- if (invoke_static_or_direct != nullptr &&
- invoke_static_or_direct->HasCurrentMethodInput()) {
+
+ // We can't add the method address if we already have a current method pointer.
+ // This may arise when sharpening doesn't remove the current method pointer from the invoke.
+ if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasCurrentMethodInput()) {
+ // Note: This happens only for recursive calls (including compiling an intrinsic
+ // by faking a call to itself; we use kRuntimeCall for this case).
DCHECK(!invoke_static_or_direct->HasPcRelativeMethodLoadKind());
return;
}
+ // If this is an invoke-static/-direct with PC-relative addressing (within boot image
+ // or using .bss or .data.bimg.rel.ro), we need the PC-relative address base.
bool base_added = false;
if (invoke_static_or_direct != nullptr &&
invoke_static_or_direct->HasPcRelativeMethodLoadKind() &&
@@ -224,7 +225,6 @@
}
}
- // These intrinsics need the constant area.
switch (invoke->GetIntrinsic()) {
case Intrinsics::kMathAbsDouble:
case Intrinsics::kMathAbsFloat:
@@ -235,7 +235,15 @@
LOG(FATAL) << "Unreachable min/max/abs: intrinsics should have been lowered "
"to IR nodes by instruction simplifier";
UNREACHABLE();
+ case Intrinsics::kIntegerValueOf:
+ // This intrinsic can be call free if it loads the address of the boot image object.
+ // If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
+ if (!codegen_->GetCompilerOptions().GetCompilePic()) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Intrinsics::kMathRoundFloat:
+ // This intrinsic needs the constant area.
if (!base_added) {
DCHECK(invoke_static_or_direct != nullptr);
DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());