ART: Move DexCache arrays to native.
This CL has a companion CL in libcore/
https://android-review.googlesource.com/162985
Change-Id: Icbc9e20ad1b565e603195b12714762bb446515fa
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 981ab2c..eb8730c 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -677,10 +677,11 @@
FALLTHROUGH_INTENDED;
case 1: // Get method->dex_cache_resolved_methods_
if (!use_pc_rel) {
- cg->LoadRefDisp(arg0_ref,
- ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- arg0_ref,
- kNotVolatile);
+ cg->LoadBaseDisp(arg0_ref,
+ ArtMethod::DexCacheResolvedMethodsOffset(kArmPointerSize).Int32Value(),
+ arg0_ref,
+ k32,
+ kNotVolatile);
}
// Set up direct code if known.
if (direct_code != 0) {
@@ -702,8 +703,8 @@
CHECK_EQ(cu->dex_file, target_method.dex_file);
if (!use_pc_rel) {
cg->LoadRefDisp(arg0_ref,
- mirror::ObjectArray<mirror::Object>::OffsetOfElement(
- target_method.dex_method_index).Int32Value(),
+ cg->GetCachePointerOffset(target_method.dex_method_index,
+ kArmPointerSize),
arg0_ref,
kNotVolatile);
} else {
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 83a6aff..036da2e 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -511,10 +511,11 @@
FALLTHROUGH_INTENDED;
case 1: // Get method->dex_cache_resolved_methods_
if (!use_pc_rel) {
- cg->LoadRefDisp(arg0_ref,
- ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- arg0_ref,
- kNotVolatile);
+ cg->LoadBaseDisp(arg0_ref,
+ ArtMethod::DexCacheResolvedMethodsOffset(kArm64PointerSize).Int32Value(),
+ arg0_ref,
+ k64,
+ kNotVolatile);
}
// Set up direct code if known.
if (direct_code != 0) {
@@ -536,8 +537,9 @@
CHECK_EQ(cu->dex_file, target_method.dex_file);
if (!use_pc_rel) {
cg->LoadWordDisp(arg0_ref,
- mirror::Array::DataOffset(kArm64PointerSize).Uint32Value() +
- target_method.dex_method_index * kArm64PointerSize, arg0_ref);
+ cg->GetCachePointerOffset(target_method.dex_method_index,
+ kArm64PointerSize),
+ arg0_ref);
} else {
size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref, true);
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index af10817..2a1d644 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -88,24 +88,30 @@
r_result));
}
+void Mir2Lir::LoadTypeFromCache(uint32_t type_index, RegStorage class_reg) {
+ if (CanUseOpPcRelDexCacheArrayLoad()) {
+ uint32_t offset = dex_cache_arrays_layout_.TypeOffset(type_index);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
+ } else {
+ RegStorage r_method = LoadCurrMethodWithHint(class_reg);
+ MemberOffset resolved_types_offset = ArtMethod::DexCacheResolvedTypesOffset(
+ GetInstructionSetPointerSize(cu_->instruction_set));
+ LoadBaseDisp(r_method, resolved_types_offset.Int32Value(), class_reg,
+ cu_->target64 ? k64 : k32, kNotVolatile);
+ int32_t offset_of_type = GetCacheOffset(type_index);
+ LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
+ }
+}
+
RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& field_info,
int opt_flags) {
DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
+ // Using fixed register to sync with possible call to runtime support.
RegStorage r_base = TargetReg(kArg0, kRef);
LockTemp(r_base);
- if (CanUseOpPcRelDexCacheArrayLoad()) {
- uint32_t offset = dex_cache_arrays_layout_.TypeOffset(field_info.StorageIndex());
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base, false);
- } else {
- // Using fixed register to sync with possible call to runtime support.
- RegStorage r_method = LoadCurrMethodWithHint(r_base);
- LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
- kNotVolatile);
- int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
- LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
- }
+ LoadTypeFromCache(field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or null if the type is not yet resolved.
LIR* unresolved_branch = nullptr;
if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
@@ -1029,19 +1035,7 @@
} else {
rl_result = EvalLoc(rl_dest, kRefReg, true);
// We don't need access checks, load type from dex cache
- if (CanUseOpPcRelDexCacheArrayLoad()) {
- size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false);
- } else {
- int32_t dex_cache_offset =
- ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- RegStorage res_reg = AllocTempRef();
- RegStorage r_method = LoadCurrMethodWithHint(res_reg);
- LoadRefDisp(r_method, dex_cache_offset, res_reg, kNotVolatile);
- int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile);
- FreeTemp(res_reg);
- }
+ LoadTypeFromCache(type_idx, rl_result.reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || ForceSlowTypePath(cu_)) {
// Slow path, at runtime test if type is null and if so initialize
@@ -1054,8 +1048,7 @@
void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
/* NOTE: Most strings should be available at compile time */
- int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx).
- Int32Value();
+ int32_t offset_of_string = GetCacheOffset(string_idx);
if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
*cu_->dex_file, string_idx) || ForceSlowStringPath(cu_)) {
// slow path, resolve string if not in dex cache
@@ -1073,7 +1066,8 @@
RegStorage r_method = LoadCurrMethodWithHint(arg0);
LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), arg0, kNotVolatile);
// Declaring class to dex cache strings.
- LoadRefDisp(arg0, mirror::Class::DexCacheStringsOffset().Int32Value(), arg0, kNotVolatile);
+ LoadBaseDisp(arg0, mirror::Class::DexCacheStringsOffset().Int32Value(), arg0,
+ cu_->target64 ? k64 : k32, kNotVolatile);
LoadRefDisp(arg0, offset_of_string, ret0, kNotVolatile);
}
@@ -1091,8 +1085,8 @@
RegStorage res_reg = AllocTempRef();
LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(), res_reg,
kNotVolatile);
- LoadRefDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg,
- kNotVolatile);
+ LoadBaseDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg,
+ cu_->target64 ? k64 : k32, kNotVolatile);
LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile);
FreeTemp(res_reg);
}
@@ -1176,19 +1170,10 @@
kNotVolatile);
LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
kNotVolatile);
- } else if (CanUseOpPcRelDexCacheArrayLoad()) {
- size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, check_class, false);
- LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
- kNotVolatile);
} else {
- RegStorage r_method = LoadCurrMethodWithHint(check_class);
- LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
+ LoadTypeFromCache(type_idx, check_class);
LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
kNotVolatile);
- int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
}
// FIXME: what should we be comparing here? compressed or decompressed references?
@@ -1239,17 +1224,8 @@
LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
}
- if (CanUseOpPcRelDexCacheArrayLoad()) {
- size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
- } else {
- RegStorage r_method = LoadCurrMethodWithHint(class_reg);
- // Load dex cache entry into class_reg (kArg2)
- LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
- int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
- }
+ // Load dex cache entry into class_reg (kArg2)
+ LoadTypeFromCache(type_idx, class_reg);
if (!can_assume_type_is_in_dex_cache) {
GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
@@ -1370,17 +1346,7 @@
class_reg, kNotVolatile);
} else {
// Load dex cache entry into class_reg (kArg2)
- if (CanUseOpPcRelDexCacheArrayLoad()) {
- size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
- } else {
- RegStorage r_method = LoadCurrMethodWithHint(class_reg);
-
- LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
- int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
- }
+ LoadTypeFromCache(type_idx, class_reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 853980d..8863c05 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -415,10 +415,11 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
- const MethodReference& target_method, uint32_t, uintptr_t direct_code,
- uintptr_t direct_method, InvokeType type) {
- Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+int MipsMir2Lir::MipsNextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
+ const MethodReference& target_method, uint32_t,
+ uintptr_t direct_code, uintptr_t direct_method,
+ InvokeType type) {
+ MipsMir2Lir* cg = static_cast<MipsMir2Lir*>(cu->cg.get());
if (info->string_init_offset != 0) {
RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
switch (state) {
@@ -469,10 +470,12 @@
cg->LoadCurrMethodDirect(arg0_ref);
break;
case 1: // Get method->dex_cache_resolved_methods_
- cg->LoadRefDisp(arg0_ref,
- ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- arg0_ref,
- kNotVolatile);
+ cg->LoadBaseDisp(arg0_ref,
+ ArtMethod::DexCacheResolvedMethodsOffset(
+ cu->target64 ? kMips64PointerSize : kMipsPointerSize).Int32Value(),
+ arg0_ref,
+ cu->target64 ? k64 : k32,
+ kNotVolatile);
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<uintptr_t>(-1)) {
@@ -492,8 +495,9 @@
CHECK_EQ(cu->dex_file, target_method.dex_file);
const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set);
cg->LoadWordDisp(arg0_ref,
- mirror::Array::DataOffset(pointer_size).Uint32Value() +
- target_method.dex_method_index * pointer_size, arg0_ref);
+ cg->GetCachePointerOffset(target_method.dex_method_index,
+ pointer_size),
+ arg0_ref);
break;
}
case 3: // Grab the code from the method*
@@ -512,7 +516,7 @@
}
NextCallInsn MipsMir2Lir::GetNextSDCallInsn() {
- return NextSDCallInsn;
+ return MipsNextSDCallInsn;
}
LIR* MipsMir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 2173253..378b9a0 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -269,6 +269,11 @@
const bool fpuIs32Bit_;
private:
+ static int MipsNextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
+ const MethodReference& target_method, uint32_t,
+ uintptr_t direct_code, uintptr_t direct_method,
+ InvokeType type);
+
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 767fe25..f96816c 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -21,6 +21,7 @@
#include "base/logging.h"
#include "dex/compiler_ir.h"
+#include "gc_root.h"
#include "utils.h"
namespace art {
@@ -278,6 +279,14 @@
}
}
+inline size_t Mir2Lir::GetCacheOffset(uint32_t index) {
+ return sizeof(GcRoot<mirror::Object>) * index;
+}
+
+inline size_t Mir2Lir::GetCachePointerOffset(uint32_t index, size_t pointer_size) {
+ return pointer_size * index;
+}
+
inline Mir2Lir::ShortyIterator::ShortyIterator(const char* shorty, bool is_static)
: cur_(shorty + 1), pending_this_(!is_static), initialized_(false) {
DCHECK(shorty != nullptr);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 73787e9..4e3aab2 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1771,6 +1771,11 @@
return (core_spill_mask_ & (1u << reg)) != 0;
}
+ size_t GetCacheOffset(uint32_t index);
+ size_t GetCachePointerOffset(uint32_t index, size_t pointer_size);
+
+ void LoadTypeFromCache(uint32_t type_index, RegStorage class_reg);
+
public:
// TODO: add accessors for these.
LIR* literal_list_; // Constants.
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 43167a1..9cb45a4 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -394,18 +394,19 @@
cg->LoadCurrMethodDirect(arg0_ref);
break;
case 1: // Get method->dex_cache_resolved_methods_
- cg->LoadRefDisp(arg0_ref,
- ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- arg0_ref,
- kNotVolatile);
+ cg->LoadBaseDisp(arg0_ref,
+ ArtMethod::DexCacheResolvedMethodsOffset(
+ cu->target64 ? kX86_64PointerSize : kX86PointerSize).Int32Value(),
+ arg0_ref,
+ cu->target64 ? k64 : k32,
+ kNotVolatile);
break;
case 2: {
// Grab target method*
CHECK_EQ(cu->dex_file, target_method.dex_file);
const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set);
cg->LoadWordDisp(arg0_ref,
- mirror::Array::DataOffset(pointer_size).Uint32Value() +
- target_method.dex_method_index * pointer_size,
+ cg->GetCachePointerOffset(target_method.dex_method_index, pointer_size),
arg0_ref);
break;
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index d1fe167..ecd23e9 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -3031,31 +3031,12 @@
// The LoadRefDisp(s) below will work normally, even in 64 bit mode.
RegStorage check_class = AllocTemp();
- // If Method* is already in a register, we can save a copy.
- RegLocation rl_method = mir_graph_->GetMethodLoc();
- int32_t offset_of_type = mirror::Array::DataOffset(
- sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() +
- (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx);
-
- if (rl_method.location == kLocPhysReg) {
- if (use_declaring_class) {
- LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class, kNotVolatile);
- } else {
- LoadRefDisp(rl_method.reg, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
- }
+ if (use_declaring_class) {
+ RegStorage r_method = LoadCurrMethodWithHint(check_class);
+ LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(),
+ check_class, kNotVolatile);
} else {
- LoadCurrMethodDirect(check_class);
- if (use_declaring_class) {
- LoadRefDisp(check_class, ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class, kNotVolatile);
- } else {
- LoadRefDisp(check_class, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
- }
+ LoadTypeFromCache(type_idx, check_class);
}
// Compare the computed class to the class in the object.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6d3a960..950f824 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -836,16 +836,17 @@
virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : c->GetVirtualMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m);
+ ResolveExceptionsForMethod(&m, pointer_size);
}
for (auto& m : c->GetDirectMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m);
+ ResolveExceptionsForMethod(&m, pointer_size);
}
return true;
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ResolveExceptionsForMethod(ArtMethod* method_handle, size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
@@ -866,7 +867,8 @@
uint16_t encoded_catch_handler_handlers_type_idx =
DecodeUnsignedLeb128(&encoded_catch_handler_list);
// Add to set of types to resolve if not already in the dex cache resolved types
- if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+ if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx,
+ pointer_size)) {
exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx,
method_handle->GetDexFile());
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index dbd3366..9172c83 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -309,32 +309,29 @@
dex_cache_array_starts_.Put(dex_file, size);
DexCacheArraysLayout layout(target_ptr_size_, dex_file);
DCHECK(layout.Valid());
- auto types_size = layout.TypesSize(dex_file->NumTypeIds());
- auto methods_size = layout.MethodsSize(dex_file->NumMethodIds());
- auto fields_size = layout.FieldsSize(dex_file->NumFieldIds());
- auto strings_size = layout.StringsSize(dex_file->NumStringIds());
- dex_cache_array_indexes_.Put(
- dex_cache->GetResolvedTypes(),
- DexCacheArrayLocation {size + layout.TypesOffset(), types_size, kBinRegular});
- dex_cache_array_indexes_.Put(
- dex_cache->GetResolvedMethods(),
- DexCacheArrayLocation {size + layout.MethodsOffset(), methods_size, kBinArtMethodClean});
- AddMethodPointerArray(dex_cache->GetResolvedMethods());
- dex_cache_array_indexes_.Put(
- dex_cache->GetResolvedFields(),
- DexCacheArrayLocation {size + layout.FieldsOffset(), fields_size, kBinArtField});
- pointer_arrays_.emplace(dex_cache->GetResolvedFields(), kBinArtField);
- dex_cache_array_indexes_.Put(
- dex_cache->GetStrings(),
- DexCacheArrayLocation {size + layout.StringsOffset(), strings_size, kBinRegular});
+ DCHECK_EQ(dex_file->NumTypeIds() != 0u, dex_cache->GetResolvedTypes() != nullptr);
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedTypes(), size + layout.TypesOffset());
+ DCHECK_EQ(dex_file->NumMethodIds() != 0u, dex_cache->GetResolvedMethods() != nullptr);
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedMethods(), size + layout.MethodsOffset());
+ DCHECK_EQ(dex_file->NumFieldIds() != 0u, dex_cache->GetResolvedFields() != nullptr);
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedFields(), size + layout.FieldsOffset());
+ DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr);
+ AddDexCacheArrayRelocation(dex_cache->GetStrings(), size + layout.StringsOffset());
size += layout.Size();
- CHECK_EQ(layout.Size(), types_size + methods_size + fields_size + strings_size);
}
// Set the slot size early to avoid DCHECK() failures in IsImageBinSlotAssigned()
// when AssignImageBinSlot() assigns their indexes out or order.
bin_slot_sizes_[kBinDexCacheArray] = size;
}
+void ImageWriter::AddDexCacheArrayRelocation(void* array, size_t offset) {
+ if (array != nullptr) {
+ native_object_relocations_.emplace(
+ array,
+ NativeObjectRelocation { offset, kNativeObjectRelocationTypeDexCacheArray });
+ }
+}
+
void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) {
DCHECK(arr != nullptr);
if (kIsDebugBuild) {
@@ -381,7 +378,7 @@
// so we pre-calculate their offsets separately in PrepareDexCacheArraySlots().
// Since these arrays are huge, most pages do not overlap other objects and it's not
// really important where they are for the clean/dirty separation. Due to their
- // special PC-relative addressing, we arbitrarily keep them at the beginning.
+ // special PC-relative addressing, we arbitrarily keep them at the end.
// * Class'es which are verified [their clinit runs only at runtime]
// - classes in general [because their static fields get overwritten]
// - initialized classes with all-final statics are unlikely to be ever dirty,
@@ -443,28 +440,13 @@
}
} else if (object->GetClass<kVerifyNone>()->IsStringClass()) {
bin = kBinString; // Strings are almost always immutable (except for object header).
- } else if (object->IsArrayInstance()) {
- mirror::Class* klass = object->GetClass<kVerifyNone>();
- if (klass->IsObjectArrayClass() || klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
- auto it = dex_cache_array_indexes_.find(object);
- if (it != dex_cache_array_indexes_.end()) {
- bin = kBinDexCacheArray;
- // Use prepared offset defined by the DexCacheLayout.
- current_offset = it->second.offset_;
- // Override incase of cross compilation.
- object_size = it->second.length_;
- } // else bin = kBinRegular
- }
} // else bin = kBinRegular
}
size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment
- if (bin != kBinDexCacheArray) {
- DCHECK(dex_cache_array_indexes_.find(object) == dex_cache_array_indexes_.end()) << object;
- current_offset = bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned).
- // Move the current bin size up to accomodate the object we just assigned a bin slot.
- bin_slot_sizes_[bin] += offset_delta;
- }
+ current_offset = bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned).
+ // Move the current bin size up to accomodate the object we just assigned a bin slot.
+ bin_slot_sizes_[bin] += offset_delta;
BinSlot new_bin_slot(bin, current_offset);
SetImageBinSlot(object, new_bin_slot);
@@ -595,7 +577,7 @@
}
// Clear references to removed classes from the DexCaches.
- const ArtMethod* resolution_method = runtime->GetResolutionMethod();
+ ArtMethod* resolution_method = runtime->GetResolutionMethod();
ScopedAssertNoThreadSuspension sa(self, __FUNCTION__);
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable
@@ -611,16 +593,20 @@
dex_cache->SetResolvedType(i, nullptr);
}
}
- auto* resolved_methods = down_cast<mirror::PointerArray*>(dex_cache->GetResolvedMethods());
- for (size_t i = 0, len = resolved_methods->GetLength(); i < len; i++) {
- auto* method = resolved_methods->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_);
+ ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
+ for (size_t i = 0, num = dex_cache->NumResolvedMethods(); i != num; ++i) {
+ ArtMethod* method =
+ mirror::DexCache::GetElementPtrSize(resolved_methods, i, target_ptr_size_);
if (method != nullptr) {
auto* declaring_class = method->GetDeclaringClass();
// Miranda methods may be held live by a class which was not an image class but have a
// declaring class which is an image class. Set it to the resolution method to be safe and
// prevent dangling pointers.
if (method->IsMiranda() || !IsImageClass(declaring_class)) {
- resolved_methods->SetElementPtrSize(i, resolution_method, target_ptr_size_);
+ mirror::DexCache::SetElementPtrSize(resolved_methods,
+ i,
+ resolution_method,
+ target_ptr_size_);
} else {
// Check that the class is still in the classes table.
DCHECK(class_linker->ClassInClassTable(declaring_class)) << "Class "
@@ -922,8 +908,6 @@
image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
image_objects_offset_begin_ = image_end_;
- // Prepare bin slots for dex cache arrays.
- PrepareDexCacheArraySlots();
// Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
heap->VisitObjects(WalkFieldsCallback, this);
// Write the image runtime methods.
@@ -953,6 +937,8 @@
CHECK(m->IsRuntimeMethod());
AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean);
}
+ // Calculate size of the dex cache arrays slot and prepare offsets.
+ PrepareDexCacheArraySlots();
// Calculate bin slot offsets.
size_t bin_offset = image_objects_offset_begin_;
@@ -1019,6 +1005,11 @@
bin_slot_sizes_[kBinArtMethodDirty]);
CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
cur_pos = methods_section->End();
+ // Add dex cache arrays section.
+ auto* dex_cache_arrays_section = §ions[ImageHeader::kSectionDexCacheArrays];
+ *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
+ CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
+ cur_pos = dex_cache_arrays_section->End();
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the interned strings.
@@ -1120,6 +1111,9 @@
ArtMethod::Size(target_ptr_size_),
ArtMethod::Alignment(target_ptr_size_)));
break;
+ case kNativeObjectRelocationTypeDexCacheArray:
+ // Nothing to copy here, everything is done in FixupDexCache().
+ break;
}
}
}
@@ -1187,7 +1181,7 @@
auto* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
if (elem != nullptr) {
auto it = native_object_relocations_.find(elem);
- if (it == native_object_relocations_.end()) {
+ if (UNLIKELY(it == native_object_relocations_.end())) {
if (it->second.IsArtMethodRelocation()) {
auto* method = reinterpret_cast<ArtMethod*>(elem);
LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ "
@@ -1200,6 +1194,7 @@
<< field << " idx=" << i << "/" << num_elements << " with declaring class "
<< PrettyClass(field->GetDeclaringClass());
}
+ UNREACHABLE();
} else {
elem = image_begin_ + it->second.offset;
}
@@ -1280,27 +1275,31 @@
}
};
-void* ImageWriter::NativeLocationInImage(void* obj) {
- if (obj == nullptr) {
- return nullptr;
- }
+uintptr_t ImageWriter::NativeOffsetInImage(void* obj) {
+ DCHECK(obj != nullptr);
auto it = native_object_relocations_.find(obj);
CHECK(it != native_object_relocations_.end()) << obj;
const NativeObjectRelocation& relocation = it->second;
- return reinterpret_cast<void*>(image_begin_ + relocation.offset);
+ return relocation.offset;
+}
+
+template <typename T>
+T* ImageWriter::NativeLocationInImage(T* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return reinterpret_cast<T*>(image_begin_ + NativeOffsetInImage(obj));
}
void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
// Update the field arrays.
- copy->SetSFieldsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtField>*>(
- NativeLocationInImage(orig->GetSFieldsPtr())));
- copy->SetIFieldsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtField>*>(
- NativeLocationInImage(orig->GetIFieldsPtr())));
+ copy->SetSFieldsPtrUnchecked(NativeLocationInImage(orig->GetSFieldsPtr()));
+ copy->SetIFieldsPtrUnchecked(NativeLocationInImage(orig->GetIFieldsPtr()));
// Update direct and virtual method arrays.
- copy->SetDirectMethodsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- NativeLocationInImage(orig->GetDirectMethodsPtr())));
- copy->SetVirtualMethodsPtr(reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- NativeLocationInImage(orig->GetVirtualMethodsPtr())));
+ copy->SetDirectMethodsPtrUnchecked(NativeLocationInImage(orig->GetDirectMethodsPtr()));
+ copy->SetVirtualMethodsPtr(NativeLocationInImage(orig->GetVirtualMethodsPtr()));
+ // Update dex cache strings.
+ copy->SetDexCacheStrings(NativeLocationInImage(orig->GetDexCacheStrings()));
// Fix up embedded tables.
if (orig->ShouldHaveEmbeddedImtAndVTable()) {
for (int32_t i = 0; i < orig->GetEmbeddedVTableLength(); ++i) {
@@ -1333,7 +1332,7 @@
}
auto* klass = orig->GetClass();
if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
- // Is this a native dex cache array?
+ // Is this a native pointer array?
auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig));
if (it != pointer_arrays_.end()) {
// Should only need to fixup every pointer array exactly once.
@@ -1341,8 +1340,6 @@
pointer_arrays_.erase(it);
return;
}
- CHECK(dex_cache_array_indexes_.find(orig) == dex_cache_array_indexes_.end())
- << "Should have been pointer array.";
}
if (orig->IsClass()) {
FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy));
@@ -1357,17 +1354,81 @@
<< "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method);
dest->SetArtMethod(
reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
- } else if (!klass->IsArrayClass() && klass->IsSubClass(down_cast<mirror::Class*>(
- Thread::Current()->DecodeJObject(WellKnownClasses::java_lang_ClassLoader)))) {
- // If src is a ClassLoader, set the class table to null so that it gets recreated by the
- // ClassLoader.
- down_cast<mirror::ClassLoader*>(copy)->SetClassTable(nullptr);
+ } else if (!klass->IsArrayClass()) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (klass == class_linker->GetClassRoot(ClassLinker::kJavaLangDexCache)) {
+ FixupDexCache(down_cast<mirror::DexCache*>(orig), down_cast<mirror::DexCache*>(copy));
+ } else if (klass->IsSubClass(down_cast<mirror::Class*>(
+ class_linker->GetClassRoot(ClassLinker::kJavaLangClassLoader)))) {
+ // If src is a ClassLoader, set the class table to null so that it gets recreated by the
+ // ClassLoader.
+ down_cast<mirror::ClassLoader*>(copy)->SetClassTable(nullptr);
+ }
}
FixupVisitor visitor(this, copy);
orig->VisitReferences(visitor, visitor);
}
}
+void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache,
+ mirror::DexCache* copy_dex_cache) {
+ // Though the DexCache array fields are usually treated as native pointers, we set the full
+ // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
+ // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
+ // static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
+ GcRoot<mirror::String>* orig_strings = orig_dex_cache->GetStrings();
+ if (orig_strings != nullptr) {
+ uintptr_t copy_strings_offset = NativeOffsetInImage(orig_strings);
+ copy_dex_cache->SetField64<false>(
+ mirror::DexCache::StringsOffset(),
+ static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + copy_strings_offset)));
+ GcRoot<mirror::String>* copy_strings =
+ reinterpret_cast<GcRoot<mirror::String>*>(image_->Begin() + copy_strings_offset);
+ for (size_t i = 0, num = orig_dex_cache->NumStrings(); i != num; ++i) {
+ copy_strings[i] = GcRoot<mirror::String>(GetImageAddress(orig_strings[i].Read()));
+ }
+ }
+ GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes();
+ if (orig_types != nullptr) {
+ uintptr_t copy_types_offset = NativeOffsetInImage(orig_types);
+ copy_dex_cache->SetField64<false>(
+ mirror::DexCache::ResolvedTypesOffset(),
+ static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + copy_types_offset)));
+ GcRoot<mirror::Class>* copy_types =
+ reinterpret_cast<GcRoot<mirror::Class>*>(image_->Begin() + copy_types_offset);
+ for (size_t i = 0, num = orig_dex_cache->NumResolvedTypes(); i != num; ++i) {
+ copy_types[i] = GcRoot<mirror::Class>(GetImageAddress(orig_types[i].Read()));
+ }
+ }
+ ArtMethod** orig_methods = orig_dex_cache->GetResolvedMethods();
+ if (orig_methods != nullptr) {
+ uintptr_t copy_methods_offset = NativeOffsetInImage(orig_methods);
+ copy_dex_cache->SetField64<false>(
+ mirror::DexCache::ResolvedMethodsOffset(),
+ static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + copy_methods_offset)));
+ ArtMethod** copy_methods =
+ reinterpret_cast<ArtMethod**>(image_->Begin() + copy_methods_offset);
+ for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) {
+ ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, i, target_ptr_size_);
+ ArtMethod* copy = NativeLocationInImage(orig);
+ mirror::DexCache::SetElementPtrSize(copy_methods, i, copy, target_ptr_size_);
+ }
+ }
+ ArtField** orig_fields = orig_dex_cache->GetResolvedFields();
+ if (orig_fields != nullptr) {
+ uintptr_t copy_fields_offset = NativeOffsetInImage(orig_fields);
+ copy_dex_cache->SetField64<false>(
+ mirror::DexCache::ResolvedFieldsOffset(),
+ static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + copy_fields_offset)));
+ ArtField** copy_fields = reinterpret_cast<ArtField**>(image_->Begin() + copy_fields_offset);
+ for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
+ ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, i, target_ptr_size_);
+ ArtField* copy = NativeLocationInImage(orig);
+ mirror::DexCache::SetElementPtrSize(copy_fields, i, copy, target_ptr_size_);
+ }
+ }
+}
+
const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, bool* quick_is_interpreted) {
DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() &&
!method->IsImtUnimplementedMethod() && !method->IsAbstract()) << PrettyMethod(method);
@@ -1430,8 +1491,11 @@
memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked()));
- copy->SetDexCacheResolvedMethods(GetImageAddress(orig->GetDexCacheResolvedMethods()));
- copy->SetDexCacheResolvedTypes(GetImageAddress(orig->GetDexCacheResolvedTypes()));
+
+ ArtMethod** orig_resolved_methods = orig->GetDexCacheResolvedMethods(target_ptr_size_);
+ copy->SetDexCacheResolvedMethods(NativeLocationInImage(orig_resolved_methods), target_ptr_size_);
+ GcRoot<mirror::Class>* orig_resolved_types = orig->GetDexCacheResolvedTypes(target_ptr_size_);
+ copy->SetDexCacheResolvedTypes(NativeLocationInImage(orig_resolved_types), target_ptr_size_);
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
@@ -1534,9 +1598,11 @@
uint8_t* ImageWriter::GetOatFileBegin() const {
DCHECK_GT(intern_table_bytes_, 0u);
- return image_begin_ + RoundUp(
- image_end_ + bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
- bin_slot_sizes_[kBinArtMethodClean] + intern_table_bytes_, kPageSize);
+ size_t native_sections_size =
+ bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
+ bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinDexCacheArray] +
+ intern_table_bytes_;
+ return image_begin_ + RoundUp(image_end_ + native_sections_size, kPageSize);
}
ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocationType type) {
@@ -1550,6 +1616,8 @@
case kNativeObjectRelocationTypeArtMethodDirty:
case kNativeObjectRelocationTypeArtMethodArrayDirty:
return kBinArtMethodDirty;
+ case kNativeObjectRelocationTypeDexCacheArray:
+ return kBinDexCacheArray;
}
UNREACHABLE();
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 778521c..e235bc4 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -78,12 +78,13 @@
ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress(
- const DexFile* dex_file, uint32_t offset) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ template <typename PtrType>
+ PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
+ const SHARED_REQUIRES(Locks::mutator_lock_) {
auto it = dex_cache_array_starts_.find(dex_file);
DCHECK(it != dex_cache_array_starts_.end());
- return reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
- image_begin_ + RoundUp(sizeof(ImageHeader), kObjectAlignment) + it->second + offset);
+ return reinterpret_cast<PtrType>(
+ image_begin_ + bin_slot_offsets_[kBinDexCacheArray] + it->second + offset);
}
uint8_t* GetOatFileBegin() const;
@@ -104,13 +105,8 @@
// Classify different kinds of bins that objects end up getting packed into during image writing.
enum Bin {
- // Dex cache arrays have a special slot for PC-relative addressing. Since they are
- // huge, and as such their dirtiness is not important for the clean/dirty separation,
- // we arbitrarily keep them at the beginning.
- kBinDexCacheArray, // Object arrays belonging to dex cache.
// Likely-clean:
kBinString, // [String] Almost always immutable (except for obj header).
- kBinArtMethodsManagedInitialized, // [ArtMethod] Not-native, and initialized. Unlikely to dirty
// Unknown mix of clean/dirty:
kBinRegular,
// Likely-dirty:
@@ -127,6 +123,10 @@
// ArtMethods may be dirty if the class has native methods or a declaring class that isn't
// initialized.
kBinArtMethodDirty,
+ // Dex cache arrays have a special slot for PC-relative addressing. Since they are
+ // huge, and as such their dirtiness is not important for the clean/dirty separation,
+ // we arbitrarily keep them at the end of the native data.
+ kBinDexCacheArray, // Arrays belonging to dex cache.
kBinSize,
// Number of bins which are for mirror objects.
kBinMirrorCount = kBinArtField,
@@ -140,6 +140,7 @@
kNativeObjectRelocationTypeArtMethodArrayClean,
kNativeObjectRelocationTypeArtMethodDirty,
kNativeObjectRelocationTypeArtMethodArrayDirty,
+ kNativeObjectRelocationTypeDexCacheArray,
};
friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
@@ -193,6 +194,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+ void AddDexCacheArrayRelocation(void* array, size_t offset) SHARED_REQUIRES(Locks::mutator_lock_);
void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
@@ -266,6 +268,8 @@
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass,
Bin array_type) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -291,7 +295,10 @@
static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
- void* NativeLocationInImage(void* obj);
+ uintptr_t NativeOffsetInImage(void* obj);
+
+ template <typename T>
+ T* NativeLocationInImage(T* obj);
const CompilerDriver& compiler_driver_;
@@ -313,15 +320,6 @@
// Memory mapped for generating the image.
std::unique_ptr<MemMap> image_;
- // Indexes, lengths for dex cache arrays (objects are inside of the image so that they don't
- // move).
- struct DexCacheArrayLocation {
- size_t offset_;
- size_t length_;
- Bin bin_type_;
- };
- SafeMap<mirror::Object*, DexCacheArrayLocation> dex_cache_array_indexes_;
-
// Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
// to keep track. These include vtable arrays, iftable arrays, and dex caches.
std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index fdf904d..4ddd457 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -842,10 +842,10 @@
uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
if (writer_->image_writer_ != nullptr) {
- auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress(
+ auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<const uint8_t*>(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
const uint8_t* oat_data = writer_->image_writer_->GetOatFileBegin() + file_offset_;
- return reinterpret_cast<const uint8_t*>(element) - oat_data;
+ return element - oat_data;
} else {
LOG(FATAL) << "Unimplemented.";
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 503187b..f4cf9b5 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -129,12 +129,12 @@
}
size_t CodeGenerator::GetCacheOffset(uint32_t index) {
- return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
+ return sizeof(GcRoot<mirror::Object>) * index;
}
size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
- return mirror::Array::DataOffset(pointer_size).Uint32Value() + pointer_size * index;
+ return pointer_size * index;
}
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9de9abf..5b7eea6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4231,9 +4231,9 @@
__ LoadFromOffset(kLoadWord,
out,
current_method,
- ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
- __ MaybeUnpoisonHeapReference(out);
+ // TODO: We will need a read barrier here.
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -4293,9 +4293,8 @@
__ LoadFromOffset(
kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
- __ MaybeUnpoisonHeapReference(out);
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
- __ MaybeUnpoisonHeapReference(out);
+ // TODO: We will need a read barrier here.
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -4570,7 +4569,8 @@
}
// temp = current_method->dex_cache_resolved_methods_;
__ LoadFromOffset(
- kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset(
+ kArmPointerSize).Int32Value());
// temp = temp[index_in_cache]
uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
__ LoadFromOffset(kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 25b3ea2..b18fb6e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2446,8 +2446,9 @@
}
// temp = current_method->dex_cache_resolved_methods_;
- __ Ldr(reg.W(), MemOperand(method_reg.X(),
- ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ __ Ldr(reg.X(),
+ MemOperand(method_reg.X(),
+ ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
// temp = temp[index_in_cache];
uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
__ Ldr(reg.X(), MemOperand(reg.X(), GetCachePointerOffset(index_in_cache)));
@@ -2620,9 +2621,10 @@
__ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- __ Ldr(out, MemOperand(current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
- __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
- GetAssembler()->MaybeUnpoisonHeapReference(out.W());
+ MemberOffset resolved_types_offset = ArtMethod::DexCacheResolvedTypesOffset(kArm64PointerSize);
+ __ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
+ __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ // TODO: We will need a read barrier here.
SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -2681,10 +2683,9 @@
Register out = OutputRegister(load);
Register current_method = InputRegisterAt(load, 0);
__ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
- __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
- GetAssembler()->MaybeUnpoisonHeapReference(out.W());
- __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
- GetAssembler()->MaybeUnpoisonHeapReference(out.W());
+ __ Ldr(out.X(), HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
+ __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ // TODO: We will need a read barrier here.
__ Cbz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 093d786..1528d09 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2445,10 +2445,10 @@
}
// temp = temp->dex_cache_resolved_methods_;
- __ LoadFromOffset(kLoadUnsignedWord,
+ __ LoadFromOffset(kLoadDoubleword,
reg,
method_reg,
- ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
// temp = temp[index_in_cache]
uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
__ LoadFromOffset(kLoadDoubleword,
@@ -2549,9 +2549,10 @@
ArtMethod::DeclaringClassOffset().Int32Value());
} else {
DCHECK(cls->CanCallRuntime());
- __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
- ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ __ LoadFromOffset(kLoadDoubleword, out, current_method,
+ ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
__ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+ // TODO: We will need a read barrier here.
SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
cls,
cls,
@@ -2614,8 +2615,9 @@
GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
__ LoadFromOffset(kLoadUnsignedWord, out, current_method,
ArtMethod::DeclaringClassOffset().Int32Value());
- __ LoadFromOffset(kLoadUnsignedWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
__ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ // TODO: We will need a read barrier here.
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 72c690d..4fa7b28 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3545,7 +3545,8 @@
__ movl(reg, Address(ESP, kCurrentMethodStackOffset));
}
// temp = temp->dex_cache_resolved_methods_;
- __ movl(reg, Address(method_reg, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ __ movl(reg, Address(method_reg,
+ ArtMethod::DexCacheResolvedMethodsOffset(kX86PointerSize).Int32Value()));
// temp = temp[index_in_cache]
uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
__ movl(reg, Address(reg, CodeGenerator::GetCachePointerOffset(index_in_cache)));
@@ -4719,9 +4720,9 @@
} else {
DCHECK(cls->CanCallRuntime());
__ movl(out, Address(
- current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ current_method, ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
- __ MaybeUnpoisonHeapReference(out);
+ // TODO: We will need a read barrier here.
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -4779,9 +4780,8 @@
Register current_method = locations->InAt(0).AsRegister<Register>();
__ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
- __ MaybeUnpoisonHeapReference(out);
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
- __ MaybeUnpoisonHeapReference(out);
+ // TODO: We will need a read barrier here.
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 820ec78..29bad12 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -450,8 +450,9 @@
__ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
}
// temp = temp->dex_cache_resolved_methods_;
- __ movl(reg, Address(CpuRegister(method_reg),
- ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ __ movq(reg,
+ Address(CpuRegister(method_reg),
+ ArtMethod::DexCacheResolvedMethodsOffset(kX86_64PointerSize).SizeValue()));
// temp = temp[index_in_cache]
uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
__ movq(reg, Address(reg, CodeGenerator::GetCachePointerOffset(index_in_cache)));
@@ -4550,10 +4551,10 @@
__ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- __ movl(out, Address(
- current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movq(out, Address(
+ current_method, ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
- __ MaybeUnpoisonHeapReference(out);
+ // TODO: We will need a read barrier here.
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -4601,10 +4602,9 @@
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
__ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
- __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
- __ MaybeUnpoisonHeapReference(out);
+ __ movq(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
- __ MaybeUnpoisonHeapReference(out);
+ // TODO: We will need a read barrier here.
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 112d42e..3f90676 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -496,8 +496,9 @@
// TODO: we could be more precise by merging the phi inputs but that requires
// some functionality from the reference type propagation.
DCHECK(return_replacement->IsPhi());
+ size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ReferenceTypeInfo::TypeHandle return_handle =
- handles_->NewHandle(resolved_method->GetReturnType());
+ handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
return_handle, return_handle->IsFinal() /* is_exact */));
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 516638b..ef753ed 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -637,9 +637,9 @@
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
mirror::DexCache* dex_cache = cl->FindDexCache(soa.Self(), instr->GetDexFile());
- ArtMethod* method = dex_cache->GetResolvedMethod(
- instr->GetDexMethodIndex(), cl->GetImagePointerSize());
- mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false);
+ size_t pointer_size = cl->GetImagePointerSize();
+ ArtMethod* method = dex_cache->GetResolvedMethod(instr->GetDexMethodIndex(), pointer_size);
+ mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false, pointer_size);
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
diff --git a/compiler/utils/dex_cache_arrays_layout-inl.h b/compiler/utils/dex_cache_arrays_layout-inl.h
deleted file mode 100644
index fec981a..0000000
--- a/compiler/utils/dex_cache_arrays_layout-inl.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_DEX_CACHE_ARRAYS_LAYOUT_INL_H_
-#define ART_COMPILER_UTILS_DEX_CACHE_ARRAYS_LAYOUT_INL_H_
-
-#include "dex_cache_arrays_layout.h"
-
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "globals.h"
-#include "mirror/array-inl.h"
-#include "primitive.h"
-
-namespace art {
-
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
- : /* types_offset_ is always 0u */
- pointer_size_(pointer_size),
- methods_offset_(types_offset_ + TypesSize(dex_file->NumTypeIds())),
- strings_offset_(methods_offset_ + MethodsSize(dex_file->NumMethodIds())),
- fields_offset_(strings_offset_ + StringsSize(dex_file->NumStringIds())),
- size_(fields_offset_ + FieldsSize(dex_file->NumFieldIds())) {
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
-}
-
-inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
- return types_offset_ + ElementOffset(sizeof(mirror::HeapReference<mirror::Class>), type_idx);
-}
-
-inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
- return ArraySize(sizeof(mirror::HeapReference<mirror::Class>), num_elements);
-}
-
-inline size_t DexCacheArraysLayout::MethodOffset(uint32_t method_idx) const {
- return methods_offset_ + ElementOffset(pointer_size_, method_idx);
-}
-
-inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
- return ArraySize(pointer_size_, num_elements);
-}
-
-inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
- return strings_offset_ + ElementOffset(sizeof(mirror::HeapReference<mirror::String>), string_idx);
-}
-
-inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const {
- return ArraySize(sizeof(mirror::HeapReference<mirror::String>), num_elements);
-}
-
-inline size_t DexCacheArraysLayout::FieldOffset(uint32_t field_idx) const {
- return fields_offset_ + ElementOffset(pointer_size_, field_idx);
-}
-
-inline size_t DexCacheArraysLayout::FieldsSize(size_t num_elements) const {
- return ArraySize(pointer_size_, num_elements);
-}
-
-inline size_t DexCacheArraysLayout::ElementOffset(size_t element_size, uint32_t idx) {
- return mirror::Array::DataOffset(element_size).Uint32Value() + element_size * idx;
-}
-
-inline size_t DexCacheArraysLayout::ArraySize(size_t element_size, uint32_t num_elements) {
- size_t array_size = mirror::ComputeArraySize(num_elements, ComponentSizeShiftWidth(element_size));
- DCHECK_NE(array_size, 0u); // No overflow expected for dex cache arrays.
- return RoundUp(array_size, kObjectAlignment);
-}
-
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_DEX_CACHE_ARRAYS_LAYOUT_INL_H_
diff --git a/compiler/utils/dex_cache_arrays_layout.h b/compiler/utils/dex_cache_arrays_layout.h
deleted file mode 100644
index 2a109bd..0000000
--- a/compiler/utils/dex_cache_arrays_layout.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
-#define ART_COMPILER_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
-
-namespace art {
-
-/**
- * @class DexCacheArraysLayout
- * @details This class provides the layout information for the type, method, field and
- * string arrays for a DexCache with a fixed arrays' layout (such as in the boot image),
- */
-class DexCacheArraysLayout {
- public:
- // Construct an invalid layout.
- DexCacheArraysLayout()
- : /* types_offset_ is always 0u */
- pointer_size_(0u),
- methods_offset_(0u),
- strings_offset_(0u),
- fields_offset_(0u),
- size_(0u) {
- }
-
- // Construct a layout for a particular dex file.
- DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file);
-
- bool Valid() const {
- return Size() != 0u;
- }
-
- size_t Size() const {
- return size_;
- }
-
- size_t TypesOffset() const {
- return types_offset_;
- }
-
- size_t TypeOffset(uint32_t type_idx) const;
-
- size_t TypesSize(size_t num_elements) const;
-
- size_t MethodsOffset() const {
- return methods_offset_;
- }
-
- size_t MethodOffset(uint32_t method_idx) const;
-
- size_t MethodsSize(size_t num_elements) const;
-
- size_t StringsOffset() const {
- return strings_offset_;
- }
-
- size_t StringOffset(uint32_t string_idx) const;
-
- size_t StringsSize(size_t num_elements) const;
-
- size_t FieldsOffset() const {
- return fields_offset_;
- }
-
- size_t FieldOffset(uint32_t field_idx) const;
-
- size_t FieldsSize(size_t num_elements) const;
-
- private:
- static constexpr size_t types_offset_ = 0u;
- const size_t pointer_size_; // Must be first for construction initialization order.
- const size_t methods_offset_;
- const size_t strings_offset_;
- const size_t fields_offset_;
- const size_t size_;
-
- static size_t ElementOffset(size_t element_size, uint32_t idx);
-
- static size_t ArraySize(size_t element_size, uint32_t num_elements);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_