Merge "A first implementation of a loop optimization framework."
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 449502c..06485ca 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -21,9 +21,9 @@
# Directory used for temporary test files on the host.
ifneq ($(TMPDIR),)
-ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID)
+ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$$(echo $$PPID)
else
-ART_HOST_TEST_DIR := /tmp/$(USER)/test-art-$(shell echo $$PPID)
+ART_HOST_TEST_DIR := /tmp/$(USER)/test-art-$$(echo $$PPID)
endif
# List of known broken tests that we won't attempt to execute. The test name must be the full
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index b726649..bc8facd 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -174,13 +174,12 @@
void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind,
InstructionSet isa,
size_t number_of_threads) {
+ compiler_options_->boot_image_ = true;
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
kind,
isa,
instruction_set_features_.get(),
- /* boot_image */ true,
- /* app_image */ false,
GetImageClasses(),
GetCompiledClasses(),
GetCompiledMethods(),
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 1a87448..99b0ac1 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -177,6 +177,7 @@
kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
kString,
kStringRelative, // NOTE: Actual patching is instruction_set-dependent.
+ kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent.
kDexCacheArray, // NOTE: Actual patching is instruction_set-dependent.
};
@@ -244,10 +245,20 @@
return patch;
}
+ static LinkerPatch StringBssEntryPatch(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t target_string_idx) {
+ LinkerPatch patch(literal_offset, Type::kStringBssEntry, target_dex_file);
+ patch.string_idx_ = target_string_idx;
+ patch.pc_insn_offset_ = pc_insn_offset;
+ return patch;
+ }
+
static LinkerPatch DexCacheArrayPatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t pc_insn_offset,
- size_t element_offset) {
+ uint32_t element_offset) {
DCHECK(IsUint<32>(element_offset));
LinkerPatch patch(literal_offset, Type::kDexCacheArray, target_dex_file);
patch.pc_insn_offset_ = pc_insn_offset;
@@ -271,6 +282,7 @@
case Type::kCallRelative:
case Type::kTypeRelative:
case Type::kStringRelative:
+ case Type::kStringBssEntry:
case Type::kDexCacheArray:
return true;
default:
@@ -296,12 +308,16 @@
}
const DexFile* TargetStringDexFile() const {
- DCHECK(patch_type_ == Type::kString || patch_type_ == Type::kStringRelative);
+ DCHECK(patch_type_ == Type::kString ||
+ patch_type_ == Type::kStringRelative ||
+ patch_type_ == Type::kStringBssEntry);
return target_dex_file_;
}
uint32_t TargetStringIndex() const {
- DCHECK(patch_type_ == Type::kString || patch_type_ == Type::kStringRelative);
+ DCHECK(patch_type_ == Type::kString ||
+ patch_type_ == Type::kStringRelative ||
+ patch_type_ == Type::kStringBssEntry);
return string_idx_;
}
@@ -318,6 +334,7 @@
uint32_t PcInsnOffset() const {
DCHECK(patch_type_ == Type::kTypeRelative ||
patch_type_ == Type::kStringRelative ||
+ patch_type_ == Type::kStringBssEntry ||
patch_type_ == Type::kDexCacheArray);
return pc_insn_offset_;
}
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 5063d71..b72d0ac 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -32,8 +32,6 @@
Compiler::kOptimizing,
/* instruction_set_ */ kNone,
/* instruction_set_features */ nullptr,
- /* boot_image */ false,
- /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 2d0dd3c..9efd636 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -31,10 +31,6 @@
namespace art {
-inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) {
- return mUnit->GetClassLinker()->FindDexCache(Thread::Current(), *mUnit->GetDexFile(), false);
-}
-
inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa,
const DexCompilationUnit* mUnit) {
return soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader()).Decode();
@@ -87,10 +83,6 @@
return resolved_field;
}
-inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) {
- return Runtime::Current()->GetClassLinker()->FindDexCache(Thread::Current(), *dex_file, false);
-}
-
inline ArtField* CompilerDriver::ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -100,31 +92,16 @@
is_static);
}
-inline void CompilerDriver::GetResolvedFieldDexFileLocation(
- ArtField* resolved_field, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
- mirror::Class* declaring_class = resolved_field->GetDeclaringClass();
- *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
- *declaring_class_idx = declaring_class->GetDexTypeIndex();
- *declaring_field_idx = resolved_field->GetDexFieldIndex();
-}
-
-inline bool CompilerDriver::IsFieldVolatile(ArtField* field) {
- return field->IsVolatile();
-}
-
-inline MemberOffset CompilerDriver::GetFieldOffset(ArtField* field) {
- return field->GetOffset();
-}
-
inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx) {
DCHECK(!resolved_field->IsStatic());
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
bool fast_get = referrer_class != nullptr &&
- referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache, field_idx);
+ referrer_class->CanAccessResolvedField(fields_class.Decode(),
+ resolved_field,
+ dex_cache,
+ field_idx);
bool fast_put = fast_get && (!resolved_field->IsFinal() || fields_class == referrer_class);
return std::make_pair(fast_get, fast_put);
}
@@ -167,13 +144,13 @@
uint32_t* storage_index) {
DCHECK(resolved_member->IsStatic());
if (LIKELY(referrer_class != nullptr)) {
- mirror::Class* members_class = resolved_member->GetDeclaringClass();
+ ObjPtr<mirror::Class> members_class = resolved_member->GetDeclaringClass();
if (members_class == referrer_class) {
*storage_index = members_class->GetDexTypeIndex();
return std::make_pair(true, true);
}
if (CanAccessResolvedMember<ArtMember>(
- referrer_class, members_class, resolved_member, dex_cache, member_idx)) {
+ referrer_class, members_class.Decode(), resolved_member, dex_cache, member_idx)) {
// We have the resolved member, we must make it into a index for the referrer
// in its static storage (which may fail if it doesn't have a slot for it)
// TODO: for images we can elide the static storage base null check
@@ -217,43 +194,6 @@
return result.first;
}
-inline bool CompilerDriver::IsStaticFieldInReferrerClass(mirror::Class* referrer_class,
- ArtField* resolved_field) {
- DCHECK(resolved_field->IsStatic());
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- return referrer_class == fields_class;
-}
-
-inline bool CompilerDriver::CanAssumeClassIsInitialized(mirror::Class* klass) {
- // Being loaded is a pre-requisite for being initialized but let's do the cheap check first.
- //
- // NOTE: When AOT compiling an app, we eagerly initialize app classes (and potentially their
- // super classes in the boot image) but only those that have a trivial initialization, i.e.
- // without <clinit>() or static values in the dex file for that class or any of its super
- // classes. So while we could see the klass as initialized during AOT compilation and have
- // it only loaded at runtime, the needed initialization would have to be trivial and
- // unobservable from Java, so we may as well treat it as initialized.
- if (!klass->IsInitialized()) {
- return false;
- }
- return CanAssumeClassIsLoaded(klass);
-}
-
-inline bool CompilerDriver::CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class,
- mirror::Class* klass) {
- return (referrer_class != nullptr
- && !referrer_class->IsInterface()
- && referrer_class->IsSubClass(klass))
- || CanAssumeClassIsInitialized(klass);
-}
-
-inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
- ArtField* resolved_field) {
- DCHECK(resolved_field->IsStatic());
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- return CanReferrerAssumeClassIsInitialized(referrer_class, fields_class);
-}
-
inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -273,35 +213,6 @@
return resolved_method;
}
-inline void CompilerDriver::GetResolvedMethodDexFileLocation(
- ArtMethod* resolved_method, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) {
- mirror::Class* declaring_class = resolved_method->GetDeclaringClass();
- *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
- *declaring_class_idx = declaring_class->GetDexTypeIndex();
- *declaring_method_idx = resolved_method->GetDexMethodIndex();
-}
-
-inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
- ArtMethod* resolved_method, InvokeType type) {
- if (type == kVirtual || type == kSuper) {
- return resolved_method->GetMethodIndex();
- } else if (type == kInterface) {
- return resolved_method->GetDexMethodIndex();
- } else {
- return DexFile::kDexNoIndex16;
- }
-}
-
-inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_class,
- ArtMethod* resolved_method) {
- if (!resolved_method->IsStatic()) {
- return true;
- }
- mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- return CanReferrerAssumeClassIsInitialized(referrer_class, methods_class);
-}
-
} // namespace art
#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 2ec3f16..6cca397 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -95,8 +95,6 @@
public:
AOTCompilationStats()
: stats_lock_("AOT compilation statistics lock"),
- types_in_dex_cache_(0), types_not_in_dex_cache_(0),
- strings_in_dex_cache_(0), strings_not_in_dex_cache_(0),
resolved_types_(0), unresolved_types_(0),
resolved_instance_fields_(0), unresolved_instance_fields_(0),
resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
@@ -112,8 +110,6 @@
}
void Dump() {
- DumpStat(types_in_dex_cache_, types_not_in_dex_cache_, "types known to be in dex cache");
- DumpStat(strings_in_dex_cache_, strings_not_in_dex_cache_, "strings known to be in dex cache");
DumpStat(resolved_types_, unresolved_types_, "types resolved");
DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved");
DumpStat(resolved_local_static_fields_ + resolved_static_fields_, unresolved_static_fields_,
@@ -164,26 +160,6 @@
#define STATS_LOCK()
#endif
- void TypeInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- types_in_dex_cache_++;
- }
-
- void TypeNotInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- types_not_in_dex_cache_++;
- }
-
- void StringInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- strings_in_dex_cache_++;
- }
-
- void StringNotInDexCache() REQUIRES(!stats_lock_) {
- STATS_LOCK();
- strings_not_in_dex_cache_++;
- }
-
void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_types_++;
@@ -225,67 +201,6 @@
type_based_devirtualization_++;
}
- // Indicate that a method of the given type was resolved at compile time.
- void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- resolved_methods_[type]++;
- }
-
- // Indicate that a method of the given type was unresolved at compile time as it was in an
- // unknown dex file.
- void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- unresolved_methods_[type]++;
- }
-
- // Indicate that a type of virtual method dispatch has been converted into a direct method
- // dispatch.
- void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK(type == kVirtual || type == kInterface || type == kSuper);
- STATS_LOCK();
- virtual_made_direct_[type]++;
- }
-
- // Indicate that a method of the given type was able to call directly into boot.
- void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- direct_calls_to_boot_[type]++;
- }
-
- // Indicate that a method of the given type was able to be resolved directly from boot.
- void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
- DCHECK_LE(type, kMaxInvokeType);
- STATS_LOCK();
- direct_methods_to_boot_[type]++;
- }
-
- void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) {
- STATS_LOCK();
- if (flags == 0) {
- unresolved_methods_[type]++;
- } else {
- DCHECK_NE((flags & kFlagMethodResolved), 0);
- resolved_methods_[type]++;
- if ((flags & kFlagVirtualMadeDirect) != 0) {
- virtual_made_direct_[type]++;
- if ((flags & kFlagPreciseTypeDevirtualization) != 0) {
- type_based_devirtualization_++;
- }
- } else {
- DCHECK_EQ((flags & kFlagPreciseTypeDevirtualization), 0);
- }
- if ((flags & kFlagDirectCallToBoot) != 0) {
- direct_calls_to_boot_[type]++;
- }
- if ((flags & kFlagDirectMethodToBoot) != 0) {
- direct_methods_to_boot_[type]++;
- }
- }
- }
-
// A check-cast could be eliminated due to verifier type analysis.
void SafeCast() REQUIRES(!stats_lock_) {
STATS_LOCK();
@@ -301,12 +216,6 @@
private:
Mutex stats_lock_;
- size_t types_in_dex_cache_;
- size_t types_not_in_dex_cache_;
-
- size_t strings_in_dex_cache_;
- size_t strings_not_in_dex_cache_;
-
size_t resolved_types_;
size_t unresolved_types_;
@@ -355,8 +264,6 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- bool boot_image,
- bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -377,8 +284,6 @@
compiled_methods_lock_("compiled method lock"),
compiled_methods_(MethodTable::key_compare()),
non_relative_linker_patch_count_(0u),
- boot_image_(boot_image),
- app_image_(app_image),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
methods_to_compile_(compiled_methods),
@@ -404,7 +309,7 @@
if (compiler_options->VerifyOnlyProfile()) {
CHECK(profile_compilation_info_ != nullptr) << "Requires profile";
}
- if (boot_image_) {
+ if (GetCompilerOptions().IsBootImage()) {
CHECK(image_classes_.get() != nullptr) << "Expected image classes for boot image";
}
}
@@ -496,7 +401,7 @@
// 3) Attempt to verify all classes
// 4) Attempt to initialize image classes, and trivially initialized classes
PreCompile(class_loader, dex_files, timings);
- if (IsBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
// We don't need to setup the intrinsics for non boot image compilation, as
// those compilations will pick up a boot image that have the ArtMethod already
// set with the intrinsics flag.
@@ -849,9 +754,10 @@
// TODO: Collect the relevant string indices in parallel, then allocate them sequentially in a
// stable order.
-static void ResolveConstStrings(CompilerDriver* driver,
+static void ResolveConstStrings(Handle<mirror::DexCache> dex_cache,
const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
+ const DexFile::CodeItem* code_item)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (code_item == nullptr) {
// Abstract or native method.
return;
@@ -859,18 +765,19 @@
const uint16_t* code_ptr = code_item->insns_;
const uint16_t* code_end = code_item->insns_ + code_item->insns_size_in_code_units_;
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
while (code_ptr < code_end) {
const Instruction* inst = Instruction::At(code_ptr);
switch (inst->Opcode()) {
case Instruction::CONST_STRING: {
uint32_t string_index = inst->VRegB_21c();
- driver->CanAssumeStringIsPresentInDexCache(dex_file, string_index);
+ class_linker->ResolveString(dex_file, string_index, dex_cache);
break;
}
case Instruction::CONST_STRING_JUMBO: {
uint32_t string_index = inst->VRegB_31c();
- driver->CanAssumeStringIsPresentInDexCache(dex_file, string_index);
+ class_linker->ResolveString(dex_file, string_index, dex_cache);
break;
}
@@ -885,7 +792,13 @@
static void ResolveConstStrings(CompilerDriver* driver,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
+
for (const DexFile* dex_file : dex_files) {
+ dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file, false));
TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
size_t class_def_count = dex_file->NumClassDefs();
@@ -926,7 +839,7 @@
continue;
}
previous_direct_method_idx = method_idx;
- ResolveConstStrings(driver, *dex_file, it.GetMethodCodeItem());
+ ResolveConstStrings(dex_cache, *dex_file, it.GetMethodCodeItem());
it.Next();
}
// Virtual methods.
@@ -940,7 +853,7 @@
continue;
}
previous_virtual_method_idx = method_idx;
- ResolveConstStrings(driver, *dex_file, it.GetMethodCodeItem());
+ ResolveConstStrings(dex_cache, *dex_file, it.GetMethodCodeItem());
it.Next();
}
DCHECK(!it.HasNext());
@@ -982,7 +895,7 @@
return;
}
- if (GetCompilerOptions().IsForceDeterminism() && IsBootImage()) {
+ if (GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
// Resolve strings from const-string. Do this now to have a deterministic image.
ResolveConstStrings(this, dex_files, timings);
VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
@@ -1010,7 +923,7 @@
}
// No set of image classes, assume we include all the classes.
// NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
- return !IsBootImage();
+ return !GetCompilerOptions().IsBootImage();
}
bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
@@ -1134,7 +1047,7 @@
// Make a list of descriptors for classes to include in the image
void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
CHECK(timings != nullptr);
- if (!IsBootImage()) {
+ if (!GetCompilerOptions().IsBootImage()) {
return;
}
@@ -1362,7 +1275,7 @@
};
void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
- if (IsBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
Runtime* runtime = Runtime::Current();
@@ -1389,7 +1302,7 @@
// Having the klass reference here implies that the klass is already loaded.
return true;
}
- if (!IsBootImage()) {
+ if (!GetCompilerOptions().IsBootImage()) {
// Assume loaded only if klass is in the boot image. App classes cannot be assumed
// loaded because we don't even know what class loader will be used to load them.
bool class_in_image = runtime->GetHeap()->FindSpaceFromObject(klass, false)->IsImageSpace();
@@ -1411,54 +1324,6 @@
dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index);
}
-bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> dex_cache,
- uint32_t type_idx) {
- bool result = false;
- if ((IsBootImage() &&
- IsImageClass(dex_cache->GetDexFile()->StringDataByIdx(
- dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) ||
- Runtime::Current()->UseJitCompilation()) {
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- result = (resolved_class != nullptr);
- }
-
- if (result) {
- stats_->TypeInDexCache();
- } else {
- stats_->TypeNotInDexCache();
- }
- return result;
-}
-
-bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
- uint32_t string_idx) {
- // See also Compiler::ResolveDexFile
-
- bool result = false;
- if (IsBootImage() || Runtime::Current()->UseJitCompilation()) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
- soa.Self(), dex_file, false)));
- if (IsBootImage()) {
- // We resolve all const-string strings when building for the image.
- class_linker->ResolveString(dex_file, string_idx, dex_cache);
- result = true;
- } else {
- // Just check whether the dex cache already has the string.
- DCHECK(Runtime::Current()->UseJitCompilation());
- result = (dex_cache->GetResolvedString(string_idx) != nullptr);
- }
- }
- if (result) {
- stats_->StringInDexCache();
- } else {
- stats_->StringNotInDexCache();
- }
- return result;
-}
-
bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
Handle<mirror::DexCache> dex_cache,
uint32_t type_idx) {
@@ -1522,108 +1387,6 @@
return result;
}
-bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
- bool* is_type_initialized, bool* use_direct_type_ptr,
- uintptr_t* direct_type_ptr, bool* out_is_finalizable) {
- ScopedObjectAccess soa(Thread::Current());
- Runtime* runtime = Runtime::Current();
- mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(
- soa.Self(), dex_file, false);
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- if (resolved_class == nullptr) {
- return false;
- }
- if (GetCompilerOptions().GetCompilePic()) {
- // Do not allow a direct class pointer to be used when compiling for position-independent
- return false;
- }
- *out_is_finalizable = resolved_class->IsFinalizable();
- gc::Heap* heap = runtime->GetHeap();
- const bool compiling_boot = heap->IsCompilingBoot();
- const bool support_boot_image_fixup = GetSupportBootImageFixup();
- if (compiling_boot) {
- // boot -> boot class pointers.
- // True if the class is in the image at boot compiling time.
- const bool is_image_class = IsBootImage() && IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_));
- // True if pc relative load works.
- if (is_image_class && support_boot_image_fixup) {
- *is_type_initialized = resolved_class->IsInitialized();
- *use_direct_type_ptr = false;
- *direct_type_ptr = 0;
- return true;
- } else {
- return false;
- }
- } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) {
- *is_type_initialized = resolved_class->IsInitialized();
- // If the class may move around, then don't embed it as a direct pointer.
- *use_direct_type_ptr = true;
- *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
- return true;
- } else {
- // True if the class is in the image at app compiling time.
- const bool class_in_image = heap->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
- if (class_in_image && support_boot_image_fixup) {
- // boot -> app class pointers.
- *is_type_initialized = resolved_class->IsInitialized();
- // TODO This is somewhat hacky. We should refactor all of this invoke codepath.
- *use_direct_type_ptr = !GetCompilerOptions().GetIncludePatchInformation();
- *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
- return true;
- } else {
- // app -> app class pointers.
- // Give up because app does not have an image and class
- // isn't created at compile time. TODO: implement this
- // if/when each app gets an image.
- return false;
- }
- }
-}
-
-bool CompilerDriver::CanEmbedReferenceTypeInCode(ClassReference* ref,
- bool* use_direct_ptr,
- uintptr_t* direct_type_ptr) {
- CHECK(ref != nullptr);
- CHECK(use_direct_ptr != nullptr);
- CHECK(direct_type_ptr != nullptr);
-
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
- bool is_initialized = false;
- bool unused_finalizable;
- // Make sure we have a finished Reference class object before attempting to use it.
- if (!CanEmbedTypeInCode(*reference_class->GetDexCache()->GetDexFile(),
- reference_class->GetDexTypeIndex(), &is_initialized,
- use_direct_ptr, direct_type_ptr, &unused_finalizable) ||
- !is_initialized) {
- return false;
- }
- ref->first = &reference_class->GetDexFile();
- ref->second = reference_class->GetDexClassDefIndex();
- return true;
-}
-
-uint32_t CompilerDriver::GetReferenceSlowFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetSlowPathFlagOffset().Uint32Value();
-}
-
-uint32_t CompilerDriver::GetReferenceDisableFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
-}
-
-DexCacheArraysLayout CompilerDriver::GetDexCacheArraysLayout(const DexFile* dex_file) {
- return ContainsElement(GetDexFilesForOatFile(), dex_file)
- ? DexCacheArraysLayout(GetInstructionSetPointerSize(instruction_set_), dex_file)
- : DexCacheArraysLayout();
-}
-
void CompilerDriver::ProcessedInstanceField(bool resolved) {
if (!resolved) {
stats_->UnresolvedInstanceField();
@@ -1642,10 +1405,6 @@
}
}
-void CompilerDriver::ProcessedInvoke(InvokeType invoke_type, int flags) {
- stats_->ProcessedInvoke(invoke_type, flags);
-}
-
ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
const DexCompilationUnit* mUnit, bool is_put,
const ScopedObjectAccess& soa) {
@@ -1733,7 +1492,7 @@
if (!use_dex_cache && force_relocations) {
bool is_in_image;
- if (IsBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
is_in_image = IsImageClass(method->GetDeclaringClassDescriptor());
} else {
is_in_image = instruction_set_ != kX86 && instruction_set_ != kX86_64 &&
@@ -2132,7 +1891,7 @@
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
thread_pool);
- if (IsBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
TimingLogger::ScopedTiming t("Resolve Types", timings);
@@ -2242,7 +2001,7 @@
// It is *very* problematic if there are verification errors in the boot classpath. For example,
// we rely on things working OK without verification when the decryption dialog is brought up.
// So abort in a debug build if we find this violated.
- DCHECK(!manager_->GetCompiler()->IsBootImage() || klass->IsVerified())
+ DCHECK(!manager_->GetCompiler()->GetCompilerOptions().IsBootImage() || klass->IsVerified())
<< "Boot classpath class " << PrettyClass(klass.Get()) << " failed to fully verify.";
}
soa.Self()->AssertNoPendingException();
@@ -2373,7 +2132,8 @@
if (!klass->IsInitialized()) {
// We need to initialize static fields, we only do this for image classes that aren't
// marked with the $NoPreloadHolder (which implies this should not be initialized early).
- bool can_init_static_fields = manager_->GetCompiler()->IsBootImage() &&
+ bool can_init_static_fields =
+ manager_->GetCompiler()->GetCompilerOptions().IsBootImage() &&
manager_->GetCompiler()->IsImageClass(descriptor) &&
!StringPiece(descriptor).ends_with("$NoPreloadHolder;");
if (can_init_static_fields) {
@@ -2445,7 +2205,7 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, dex_files,
init_thread_pool);
- if (IsBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
// TODO: remove this when transactional mode supports multithreading.
init_thread_count = 1U;
}
@@ -2499,7 +2259,7 @@
CHECK(dex_file != nullptr);
InitializeClasses(class_loader, *dex_file, dex_files, timings);
}
- if (boot_image_ || app_image_) {
+ if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsAppImage()) {
// Make sure that we call EnsureIntiailized on all the array classes to call
// SetVerificationAttempted so that the access flags are set. If we do not do this they get
// changed at runtime resulting in more dirty image pages.
@@ -2509,7 +2269,7 @@
InitializeArrayClassesAndCreateConflictTablesVisitor visitor;
Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor);
}
- if (IsBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
// Prune garbage objects created during aborted transactions.
Runtime::Current()->GetHeap()->CollectGarbage(true);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 52a04cc..9a4dd85 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -90,8 +90,6 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- bool boot_image,
- bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -147,11 +145,6 @@
return compiler_.get();
}
- // Are we compiling and creating an image file?
- bool IsBootImage() const {
- return boot_image_;
- }
-
const std::unordered_set<std::string>* GetImageClasses() const {
return image_classes_.get();
}
@@ -189,15 +182,6 @@
uint16_t class_def_index)
REQUIRES(!requires_constructor_barrier_lock_);
- // Callbacks from compiler to see what runtime checks must be generated.
-
- bool CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> dex_cache,
- uint32_t type_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
- REQUIRES(!Locks::mutator_lock_);
-
// Are runtime access checks necessary in the compiled code?
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
Handle<mirror::DexCache> dex_cache,
@@ -212,24 +196,6 @@
bool* out_is_finalizable)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
- bool* is_type_initialized, bool* use_direct_type_ptr,
- uintptr_t* direct_type_ptr, bool* out_is_finalizable);
-
- // Query methods for the java.lang.ref.Reference class.
- bool CanEmbedReferenceTypeInCode(ClassReference* ref,
- bool* use_direct_type_ptr, uintptr_t* direct_type_ptr);
- uint32_t GetReferenceSlowFlagOffset() const;
- uint32_t GetReferenceDisableFlagOffset() const;
-
- // Get the DexCache for the
- mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -257,19 +223,6 @@
uint32_t field_idx, bool is_static)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Get declaration location of a resolved field.
- void GetResolvedFieldDexFileLocation(
- ArtField* resolved_field, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool IsFieldVolatile(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
- MemberOffset GetFieldOffset(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Find a dex cache for a dex file.
- inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
@@ -295,15 +248,6 @@
uint32_t* storage_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Is static field's in referrer's class?
- bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Is static field's class initialized?
- bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
- ArtField* resolved_field)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -311,37 +255,8 @@
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Get declaration location of a resolved field.
- void GetResolvedMethodDexFileLocation(
- ArtMethod* resolved_method, const DexFile** declaring_dex_file,
- uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Get the index in the vtable of the method.
- uint16_t GetResolvedMethodVTableIndex(
- ArtMethod* resolved_method, InvokeType type)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Is method's class initialized for an invoke?
- // For static invokes to determine whether we need to consider potential call to <clinit>().
- // For non-static invokes, assuming a non-null reference, the class is always initialized.
- bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
- // dex cache arrays don't have a fixed layout.
- DexCacheArraysLayout GetDexCacheArraysLayout(const DexFile* dex_file);
-
void ProcessedInstanceField(bool resolved);
void ProcessedStaticField(bool resolved, bool local);
- void ProcessedInvoke(InvokeType invoke_type, int flags);
-
- void ComputeFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- const ScopedObjectAccess& soa, bool is_static,
- ArtField** resolved_field,
- mirror::Class** referrer_class,
- mirror::DexCache** dex_cache)
- REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
@@ -393,6 +308,7 @@
void SetDedupeEnabled(bool dedupe_enabled) {
compiled_method_storage_.SetDedupeEnabled(dedupe_enabled);
}
+
bool DedupeEnabled() const {
return compiled_method_storage_.DedupeEnabled();
}
@@ -456,6 +372,13 @@
return current_dex_to_dex_methods_;
}
+ // Compute constant code and method pointers when possible.
+ void GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
+ ArtMethod* method,
+ /* out */ uintptr_t* direct_code,
+ /* out */ uintptr_t* direct_method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
// Return whether the declaring class of `resolved_member` is
// available to `referrer_class` for read or write access using two
@@ -484,38 +407,9 @@
uint32_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Can we assume that the klass is initialized?
- bool CanAssumeClassIsInitialized(mirror::Class* klass)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
- // The only external contract is that unresolved method has flags 0 and resolved non-0.
- enum {
- kBitMethodResolved = 0,
- kBitVirtualMadeDirect,
- kBitPreciseTypeDevirtualization,
- kBitDirectCallToBoot,
- kBitDirectMethodToBoot
- };
- static constexpr int kFlagMethodResolved = 1 << kBitMethodResolved;
- static constexpr int kFlagVirtualMadeDirect = 1 << kBitVirtualMadeDirect;
- static constexpr int kFlagPreciseTypeDevirtualization = 1 << kBitPreciseTypeDevirtualization;
- static constexpr int kFlagDirectCallToBoot = 1 << kBitDirectCallToBoot;
- static constexpr int kFlagDirectMethodToBoot = 1 << kBitDirectMethodToBoot;
- static constexpr int kFlagsMethodResolvedVirtualMadeDirect =
- kFlagMethodResolved | kFlagVirtualMadeDirect;
- static constexpr int kFlagsMethodResolvedPreciseTypeDevirtualization =
- kFlagsMethodResolvedVirtualMadeDirect | kFlagPreciseTypeDevirtualization;
-
- public: // TODO make private or eliminate.
- // Compute constant code and method pointers when possible.
- void GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
- ArtMethod* method,
- /* out */ uintptr_t* direct_code,
- /* out */ uintptr_t* direct_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
+ const DexCompilationUnit* mUnit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
void PreCompile(jobject class_loader,
@@ -573,8 +467,6 @@
REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
- static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_);
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
@@ -628,9 +520,6 @@
// in the .oat_patches ELF section if requested in the compiler options.
size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_);
- const bool boot_image_;
- const bool app_image_;
-
// If image_ is true, specifies the classes that will be included in the image.
// Note if image_classes_ is null, all classes are included in the image.
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 30ba8c9..cbcc169 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -30,6 +30,8 @@
inline_depth_limit_(kUnsetInlineDepthLimit),
inline_max_code_units_(kUnsetInlineMaxCodeUnits),
no_inline_from_(nullptr),
+ boot_image_(false),
+ app_image_(false),
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
@@ -78,34 +80,35 @@
bool dump_cfg_append,
bool force_determinism,
RegisterAllocator::Strategy regalloc_strategy,
- const std::vector<std::string>* passes_to_run
- ) : // NOLINT(whitespace/parens)
- compiler_filter_(compiler_filter),
- huge_method_threshold_(huge_method_threshold),
- large_method_threshold_(large_method_threshold),
- small_method_threshold_(small_method_threshold),
- tiny_method_threshold_(tiny_method_threshold),
- num_dex_methods_threshold_(num_dex_methods_threshold),
- inline_depth_limit_(inline_depth_limit),
- inline_max_code_units_(inline_max_code_units),
- no_inline_from_(no_inline_from),
- include_patch_information_(include_patch_information),
- top_k_profile_threshold_(top_k_profile_threshold),
- debuggable_(debuggable),
- generate_debug_info_(generate_debug_info),
- generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
- implicit_null_checks_(implicit_null_checks),
- implicit_so_checks_(implicit_so_checks),
- implicit_suspend_checks_(implicit_suspend_checks),
- compile_pic_(compile_pic),
- verbose_methods_(verbose_methods),
- abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
- init_failure_output_(init_failure_output),
- dump_cfg_file_name_(dump_cfg_file_name),
- dump_cfg_append_(dump_cfg_append),
- force_determinism_(force_determinism),
- register_allocation_strategy_(regalloc_strategy),
- passes_to_run_(passes_to_run) {
+ const std::vector<std::string>* passes_to_run)
+ : compiler_filter_(compiler_filter),
+ huge_method_threshold_(huge_method_threshold),
+ large_method_threshold_(large_method_threshold),
+ small_method_threshold_(small_method_threshold),
+ tiny_method_threshold_(tiny_method_threshold),
+ num_dex_methods_threshold_(num_dex_methods_threshold),
+ inline_depth_limit_(inline_depth_limit),
+ inline_max_code_units_(inline_max_code_units),
+ no_inline_from_(no_inline_from),
+ boot_image_(false),
+ app_image_(false),
+ include_patch_information_(include_patch_information),
+ top_k_profile_threshold_(top_k_profile_threshold),
+ debuggable_(debuggable),
+ generate_debug_info_(generate_debug_info),
+ generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
+ implicit_null_checks_(implicit_null_checks),
+ implicit_so_checks_(implicit_so_checks),
+ implicit_suspend_checks_(implicit_suspend_checks),
+ compile_pic_(compile_pic),
+ verbose_methods_(verbose_methods),
+ abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
+ init_failure_output_(init_failure_output),
+ dump_cfg_file_name_(dump_cfg_file_name),
+ dump_cfg_append_(dump_cfg_append),
+ force_determinism_(force_determinism),
+ register_allocation_strategy_(regalloc_strategy),
+ passes_to_run_(passes_to_run) {
}
void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index abc58d7..8e4a775 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -203,6 +203,14 @@
return include_patch_information_;
}
+ bool IsBootImage() const {
+ return boot_image_;
+ }
+
+ bool IsAppImage() const {
+ return app_image_;
+ }
+
// Should the code be compiled as position independent?
bool GetCompilePic() const {
return compile_pic_;
@@ -281,6 +289,8 @@
// prefer vector<> over a lookup-oriented container, such as set<>.
const std::vector<const DexFile*>* no_inline_from_;
+ bool boot_image_;
+ bool app_image_;
bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
@@ -305,7 +315,7 @@
std::string dump_cfg_file_name_;
bool dump_cfg_append_;
- // Whether the compiler should trade performance for determinism to guarantee exactly reproducable
+ // Whether the compiler should trade performance for determinism to guarantee exactly reproducible
// outcomes.
bool force_determinism_;
@@ -320,6 +330,7 @@
const std::vector<std::string>* passes_to_run_;
friend class Dex2Oat;
+ friend class CommonCompilerTest;
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 02831c9..73240be 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -619,7 +619,8 @@
void PrepareDynamicSection(const std::string& elf_file_path,
Elf_Word rodata_size,
Elf_Word text_size,
- Elf_Word bss_size) {
+ Elf_Word bss_size,
+ Elf_Word bss_roots_offset) {
std::string soname(elf_file_path);
size_t directory_separator_pos = soname.rfind('/');
if (directory_separator_pos != std::string::npos) {
@@ -659,10 +660,20 @@
Elf_Word oatlastword_address = rodata_address + rodata_size - 4;
dynsym_.Add(oatlastword, rodata_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
}
+ DCHECK_LE(bss_roots_offset, bss_size);
if (bss_size != 0u) {
Elf_Word bss_index = rodata_index + 1u + (text_size != 0 ? 1u : 0u);
Elf_Word oatbss = dynstr_.Add("oatbss");
- dynsym_.Add(oatbss, bss_index, bss_address, bss_size, STB_GLOBAL, STT_OBJECT);
+ dynsym_.Add(oatbss, bss_index, bss_address, bss_roots_offset, STB_GLOBAL, STT_OBJECT);
+ // Add a symbol marking the start of the GC roots part of the .bss, if not empty.
+ if (bss_roots_offset != bss_size) {
+ DCHECK_LT(bss_roots_offset, bss_size);
+ Elf_Word bss_roots_address = bss_address + bss_roots_offset;
+ Elf_Word bss_roots_size = bss_size - bss_roots_offset;
+ Elf_Word oatbssroots = dynstr_.Add("oatbssroots");
+ dynsym_.Add(
+ oatbssroots, bss_index, bss_roots_address, bss_roots_size, STB_GLOBAL, STT_OBJECT);
+ }
Elf_Word oatbsslastword = dynstr_.Add("oatbsslastword");
Elf_Word bsslastword_address = bss_address + bss_size - 4;
dynsym_.Add(oatbsslastword, bss_index, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT);
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index f8f9102..d55f745 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -52,7 +52,10 @@
virtual ~ElfWriter() {}
virtual void Start() = 0;
- virtual void SetLoadedSectionSizes(size_t rodata_size, size_t text_size, size_t bss_size) = 0;
+ virtual void PrepareDynamicSection(size_t rodata_size,
+ size_t text_size,
+ size_t bss_size,
+ size_t bss_roots_offset) = 0;
virtual void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
virtual OutputStream* StartRoData() = 0;
virtual void EndRoData(OutputStream* rodata) = 0;
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index bed864b..36cd232 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -93,7 +93,10 @@
~ElfWriterQuick();
void Start() OVERRIDE;
- void SetLoadedSectionSizes(size_t rodata_size, size_t text_size, size_t bss_size) OVERRIDE;
+ void PrepareDynamicSection(size_t rodata_size,
+ size_t text_size,
+ size_t bss_size,
+ size_t bss_roots_offset) OVERRIDE;
void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
OutputStream* StartRoData() OVERRIDE;
void EndRoData(OutputStream* rodata) OVERRIDE;
@@ -167,16 +170,21 @@
}
template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::SetLoadedSectionSizes(size_t rodata_size,
+void ElfWriterQuick<ElfTypes>::PrepareDynamicSection(size_t rodata_size,
size_t text_size,
- size_t bss_size) {
+ size_t bss_size,
+ size_t bss_roots_offset) {
DCHECK_EQ(rodata_size_, 0u);
rodata_size_ = rodata_size;
DCHECK_EQ(text_size_, 0u);
text_size_ = text_size;
DCHECK_EQ(bss_size_, 0u);
bss_size_ = bss_size;
- builder_->PrepareDynamicSection(elf_file_->GetPath(), rodata_size_, text_size_, bss_size_);
+ builder_->PrepareDynamicSection(elf_file_->GetPath(),
+ rodata_size_,
+ text_size_,
+ bss_size_,
+ bss_roots_offset);
}
template <typename ElfTypes>
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 4689c9d..9e94b9d 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -263,7 +263,10 @@
oat_writer->PrepareLayout(driver, writer.get(), cur_dex_files, &patcher);
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
- elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer->GetBssSize());
+ elf_writer->PrepareDynamicSection(rodata_size,
+ text_size,
+ oat_writer->GetBssSize(),
+ oat_writer->GetBssRootsOffset());
writer->UpdateOatFileLayout(i,
elf_writer->GetLoadedSize(),
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 41bda60..210943c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -908,7 +908,7 @@
ArtField** resolved_fields = dex_cache->GetResolvedFields();
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
ArtField* field = mirror::DexCache::GetElementPtrSize(resolved_fields, i, target_ptr_size_);
- if (field != nullptr && !KeepClass(field->GetDeclaringClass())) {
+ if (field != nullptr && !KeepClass(field->GetDeclaringClass().Decode())) {
dex_cache->SetResolvedField(i, nullptr, target_ptr_size_);
}
}
@@ -1742,7 +1742,7 @@
case kNativeObjectRelocationTypeArtField: {
memcpy(dest, pair.first, sizeof(ArtField));
reinterpret_cast<ArtField*>(dest)->SetDeclaringClass(
- GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
+ GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass().Decode()));
break;
}
case kNativeObjectRelocationTypeRuntimeMethod:
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 4f86905..4ef2db8 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -156,8 +156,6 @@
Compiler::kOptimizing,
instruction_set,
instruction_set_features_.get(),
- /* boot_image */ false,
- /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 4c8788e..3b77880 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -222,9 +222,10 @@
}
shift = 0u; // No shift for ADD.
} else {
- // LDR 32-bit or 64-bit with imm12 == 0 (unset).
- DCHECK(patch.GetType() == LinkerPatch::Type::kDexCacheArray) << patch.GetType();
- DCHECK_EQ(insn & 0xbffffc00, 0xb9400000) << std::hex << insn;
+ // LDR/STR 32-bit or 64-bit with imm12 == 0 (unset).
+ DCHECK(patch.GetType() == LinkerPatch::Type::kDexCacheArray ||
+ patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
+ DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn;
}
if (kIsDebugBuild) {
uint32_t adrp = GetInsn(code, pc_insn_offset);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 62b3a0a..0151789 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -47,8 +47,6 @@
Compiler::kQuick,
instruction_set,
/* instruction_set_features*/ nullptr,
- /* boot_image */ false,
- /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index e8bc67d..2762804 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -108,8 +108,6 @@
compiler_kind,
insn_set,
insn_features_.get(),
- /* boot_image */ false,
- /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
@@ -194,6 +192,7 @@
&opened_dex_files)) {
return false;
}
+
Runtime* runtime = Runtime::Current();
ClassLinker* const class_linker = runtime->GetClassLinker();
std::vector<const DexFile*> dex_files;
@@ -207,7 +206,10 @@
oat_writer.PrepareLayout(compiler_driver_.get(), nullptr, dex_files, &patcher);
size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer.GetOatSize() - rodata_size;
- elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer.GetBssSize());
+ elf_writer->PrepareDynamicSection(rodata_size,
+ text_size,
+ oat_writer.GetBssSize(),
+ oat_writer.GetBssRootsOffset());
if (!oat_writer.WriteRodata(oat_rodata)) {
return false;
@@ -228,7 +230,15 @@
elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
elf_writer->WritePatchLocations(oat_writer.GetAbsolutePatchLocations());
- return elf_writer->End();
+ if (!elf_writer->End()) {
+ return false;
+ }
+
+ opened_dex_files_maps_.emplace_back(std::move(opened_dex_files_map));
+ for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
+ opened_dex_files_.emplace_back(dex_file.release());
+ }
+ return true;
}
void TestDexFileInput(bool verify, bool low_4gb);
@@ -236,6 +246,9 @@
std::unique_ptr<const InstructionSetFeatures> insn_features_;
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
+
+ std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
+ std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
};
class ZipBuilder {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 54ec7c1..44c26ed 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -300,7 +300,10 @@
vdex_dex_files_offset_(0u),
vdex_verifier_deps_offset_(0u),
oat_size_(0u),
+ bss_start_(0u),
bss_size_(0u),
+ bss_roots_offset_(0u),
+ bss_string_entries_(),
oat_data_offset_(0u),
oat_header_(nullptr),
size_vdex_header_(0),
@@ -554,15 +557,8 @@
oat_size_ = offset;
if (!HasBootImage()) {
- // Allocate space for app dex cache arrays in the .bss section.
- size_t bss_start = RoundUp(oat_size_, kPageSize);
- PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
- bss_size_ = 0u;
- for (const DexFile* dex_file : *dex_files_) {
- dex_cache_arrays_offsets_.Put(dex_file, bss_start + bss_size_);
- DexCacheArraysLayout layout(pointer_size, dex_file);
- bss_size_ += layout.Size();
- }
+ TimingLogger::ScopedTiming split("InitBssLayout", timings_);
+ InitBssLayout(instruction_set);
}
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
@@ -805,6 +801,10 @@
if (!patch.IsPcRelative()) {
writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
}
+ if (patch.GetType() == LinkerPatch::Type::kStringBssEntry) {
+ StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
+ writer_->bss_string_entries_.Overwrite(ref, /* placeholder */ 0u);
+ }
}
}
}
@@ -1115,6 +1115,15 @@
target_offset);
break;
}
+ case LinkerPatch::Type::kStringBssEntry: {
+ StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
+ uint32_t target_offset = writer_->bss_string_entries_.Get(ref);
+ writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
+ patch,
+ offset_ + literal_offset,
+ target_offset);
+ break;
+ }
case LinkerPatch::Type::kTypeRelative: {
uint32_t target_offset = GetTargetObjectOffset(GetTargetType(patch));
writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
@@ -1500,7 +1509,7 @@
offset = RoundUp(offset, kPageSize);
oat_header_->SetExecutableOffset(offset);
size_executable_offset_alignment_ = offset - old_offset;
- if (compiler_driver_->IsBootImage()) {
+ if (compiler_driver_->GetCompilerOptions().IsBootImage()) {
InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
#define DO_TRAMPOLINE(field, fn_name) \
@@ -1548,6 +1557,29 @@
return offset;
}
+void OatWriter::InitBssLayout(InstructionSet instruction_set) {
+ DCHECK(!HasBootImage());
+
+ // Allocate space for app dex cache arrays in the .bss section.
+ bss_start_ = RoundUp(oat_size_, kPageSize);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
+ bss_size_ = 0u;
+ for (const DexFile* dex_file : *dex_files_) {
+ dex_cache_arrays_offsets_.Put(dex_file, bss_start_ + bss_size_);
+ DexCacheArraysLayout layout(pointer_size, dex_file);
+ bss_size_ += layout.Size();
+ }
+
+ bss_roots_offset_ = bss_size_;
+
+ // Prepare offsets for .bss String entries.
+ for (auto& entry : bss_string_entries_) {
+ DCHECK_EQ(entry.second, 0u);
+ entry.second = bss_start_ + bss_size_;
+ bss_size_ += sizeof(GcRoot<mirror::String>);
+ }
+}
+
bool OatWriter::WriteRodata(OutputStream* out) {
CHECK(write_state_ == WriteState::kWriteRoData);
@@ -1736,7 +1768,7 @@
oat_header_->SetImageFileLocationOatChecksum(image_file_location_oat_checksum);
oat_header_->SetImageFileLocationOatDataBegin(image_file_location_oat_begin);
- if (compiler_driver_->IsBootImage()) {
+ if (compiler_driver_->GetCompilerOptions().IsBootImage()) {
CHECK_EQ(image_patch_delta, 0);
CHECK_EQ(oat_header_->GetImagePatchDelta(), 0);
} else {
@@ -1826,7 +1858,7 @@
}
size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset) {
- if (compiler_driver_->IsBootImage()) {
+ if (compiler_driver_->GetCompilerOptions().IsBootImage()) {
InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
#define DO_TRAMPOLINE(field) \
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 670accb..1cc193b 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -30,6 +30,7 @@
#include "oat.h"
#include "os.h"
#include "safe_map.h"
+#include "string_reference.h"
namespace art {
@@ -194,6 +195,10 @@
return bss_size_;
}
+ size_t GetBssRootsOffset() const {
+ return bss_roots_offset_;
+ }
+
size_t GetOatDataOffset() const {
return oat_data_offset_;
}
@@ -265,6 +270,7 @@
size_t InitOatMaps(size_t offset);
size_t InitOatCode(size_t offset);
size_t InitOatCodeDexFiles(size_t offset);
+ void InitBssLayout(InstructionSet instruction_set);
bool WriteClassOffsets(OutputStream* out);
bool WriteClasses(OutputStream* out);
@@ -322,9 +328,20 @@
// Size required for Oat data structures.
size_t oat_size_;
- // The size of the required .bss section holding the DexCache data.
+ // The start of the required .bss section.
+ size_t bss_start_;
+
+ // The size of the required .bss section holding the DexCache data and GC roots.
size_t bss_size_;
+ // The offset of the GC roots in .bss section.
+ size_t bss_roots_offset_;
+
+ // Map for allocating String entries in .bss. Indexed by StringReference for the source
+ // string in the dex file with the "string value comparator" for deduplication. The value
+ // is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
+ SafeMap<StringReference, size_t, StringReferenceValueComparator> bss_string_entries_;
+
// Offsets of the dex cache arrays for each app dex file. For the
// boot image, this information is provided by the ImageWriter.
SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_; // DexFiles not owned.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 8500204..49f4f18 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -531,40 +531,15 @@
uint32_t GetReferenceDisableFlagOffset() const;
protected:
- // Method patch info used for recording locations of required linker patches and
- // target methods. The target method can be used for various purposes, whether for
- // patching the address of the method or the code pointer or a PC-relative call.
+ // Patch info used for recording locations of required linker patches and their targets,
+ // i.e. target method, string, type or code identified by their dex file and index.
template <typename LabelType>
- struct MethodPatchInfo {
- explicit MethodPatchInfo(MethodReference m) : target_method(m), label() { }
-
- MethodReference target_method;
- LabelType label;
- };
-
- // String patch info used for recording locations of required linker patches and
- // target strings. The actual string address can be absolute or PC-relative.
- template <typename LabelType>
- struct StringPatchInfo {
- StringPatchInfo(const DexFile& df, uint32_t index)
- : dex_file(df), string_index(index), label() { }
+ struct PatchInfo {
+ PatchInfo(const DexFile& target_dex_file, uint32_t target_index)
+ : dex_file(target_dex_file), index(target_index) { }
const DexFile& dex_file;
- uint32_t string_index;
- LabelType label;
- };
-
- // Type patch info used for recording locations of required linker patches and
- // target types. The actual type address can be absolute or PC-relative.
- // TODO: Consider merging with MethodPatchInfo and StringPatchInfo - all these
- // classes contain the dex file, some index and the label.
- template <typename LabelType>
- struct TypePatchInfo {
- TypePatchInfo(const DexFile& df, uint32_t index)
- : dex_file(df), type_index(index), label() { }
-
- const DexFile& dex_file;
- uint32_t type_index;
+ uint32_t index;
LabelType label;
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 681988d..9870876 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -422,6 +422,50 @@
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
};
+class LoadStringSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit LoadStringSlowPathARM(HLoadString* instruction) : SlowPathCodeARM(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ HLoadString* load = instruction_->AsLoadString();
+ const uint32_t string_index = load->GetStringIndex();
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
+ arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+
+ RestoreLiveRegisters(codegen, locations);
+
+ // Store the resolved String to the BSS entry.
+ // TODO: Change art_quick_resolve_string to kSaveEverything and use a temporary for the
+ // .bss entry address in the fast path, so that we can avoid another calculation here.
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ arm_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(IP, IP, ShifterOperand(PC));
+ __ str(locations->Out().AsRegister<Register>(), Address(IP));
+
+ __ b(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
+};
+
class TypeCheckSlowPathARM : public SlowPathCodeARM {
public:
TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
@@ -5641,15 +5685,8 @@
case HLoadString::LoadKind::kDexCacheAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCachePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops()) {
- return HLoadString::LoadKind::kDexCacheViaMethod;
- }
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
@@ -5659,12 +5696,13 @@
void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = load->NeedsEnvironment()
- ? LocationSummary::kCallOnMainOnly
+ ? ((load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod)
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kCallOnSlowPath)
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
- DCHECK(load_kind != HLoadString::LoadKind::kDexCachePcRelative) << "Not supported";
if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RegisterLocation(R0));
@@ -5686,6 +5724,7 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
@@ -5702,6 +5741,23 @@
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
+ case HLoadString::LoadKind::kBssEntry: {
+ DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(out, out, ShifterOperand(PC));
+ GenerateGcRootFieldLoad(load, out_loc, out, 0);
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
+ codegen_->AddSlowPath(slow_path);
+ __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
default:
break;
}
@@ -6850,7 +6906,8 @@
__ bl(GetFrameEntryLabel());
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- relative_call_patches_.emplace_back(invoke->GetTargetMethod());
+ relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
__ BindTrackedLabel(&relative_call_patches_.back().label);
// Arbitrarily branch to the BL itself, override at link time.
__ bl(&relative_call_patches_.back().label);
@@ -6952,17 +7009,37 @@
return DeduplicateUint32Literal(address, &uint32_literals_);
}
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorARM::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PcRelativePatchInfo& info : infos) {
+ const DexFile& dex_file = info.target_dex_file;
+ size_t offset_or_index = info.offset_or_index;
+ DCHECK(info.add_pc_label.IsBound());
+ uint32_t add_pc_offset = dchecked_integral_cast<uint32_t>(info.add_pc_label.Position());
+ // Add MOVW patch.
+ DCHECK(info.movw_label.IsBound());
+ uint32_t movw_offset = dchecked_integral_cast<uint32_t>(info.movw_label.Position());
+ linker_patches->push_back(Factory(movw_offset, &dex_file, add_pc_offset, offset_or_index));
+ // Add MOVT patch.
+ DCHECK(info.movt_label.IsBound());
+ uint32_t movt_offset = dchecked_integral_cast<uint32_t>(info.movt_label.Position());
+ linker_patches->push_back(Factory(movt_offset, &dex_file, add_pc_offset, offset_or_index));
+ }
+}
+
void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
method_patches_.size() +
call_patches_.size() +
relative_call_patches_.size() +
- /* MOVW+MOVT for each base */ 2u * pc_relative_dex_cache_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
boot_image_string_patches_.size() +
- /* MOVW+MOVT for each base */ 2u * pc_relative_string_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
- /* MOVW+MOVT for each base */ 2u * pc_relative_type_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
for (const auto& entry : method_patches_) {
@@ -6983,32 +7060,13 @@
target_method.dex_file,
target_method.dex_method_index));
}
- for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
+ for (const PatchInfo<Label>& info : relative_call_patches_) {
uint32_t literal_offset = info.label.Position();
- linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
- info.target_method.dex_file,
- info.target_method.dex_method_index));
+ linker_patches->push_back(
+ LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
}
- for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
- const DexFile& dex_file = info.target_dex_file;
- size_t base_element_offset = info.offset_or_index;
- DCHECK(info.add_pc_label.IsBound());
- uint32_t add_pc_offset = dchecked_integral_cast<uint32_t>(info.add_pc_label.Position());
- // Add MOVW patch.
- DCHECK(info.movw_label.IsBound());
- uint32_t movw_offset = dchecked_integral_cast<uint32_t>(info.movw_label.Position());
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(movw_offset,
- &dex_file,
- add_pc_offset,
- base_element_offset));
- // Add MOVT patch.
- DCHECK(info.movt_label.IsBound());
- uint32_t movt_offset = dchecked_integral_cast<uint32_t>(info.movt_label.Position());
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(movt_offset,
- &dex_file,
- add_pc_offset,
- base_element_offset));
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
+ linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
Literal* literal = entry.second;
@@ -7018,25 +7076,12 @@
target_string.dex_file,
target_string.string_index));
}
- for (const PcRelativePatchInfo& info : pc_relative_string_patches_) {
- const DexFile& dex_file = info.target_dex_file;
- uint32_t string_index = info.offset_or_index;
- DCHECK(info.add_pc_label.IsBound());
- uint32_t add_pc_offset = dchecked_integral_cast<uint32_t>(info.add_pc_label.Position());
- // Add MOVW patch.
- DCHECK(info.movw_label.IsBound());
- uint32_t movw_offset = dchecked_integral_cast<uint32_t>(info.movw_label.Position());
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(movw_offset,
- &dex_file,
- add_pc_offset,
- string_index));
- // Add MOVT patch.
- DCHECK(info.movt_label.IsBound());
- uint32_t movt_offset = dchecked_integral_cast<uint32_t>(info.movt_label.Position());
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(movt_offset,
- &dex_file,
- add_pc_offset,
- string_index));
+ if (!GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
+ } else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
+ linker_patches);
}
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
@@ -7047,26 +7092,8 @@
target_type.dex_file,
target_type.type_index));
}
- for (const PcRelativePatchInfo& info : pc_relative_type_patches_) {
- const DexFile& dex_file = info.target_dex_file;
- uint32_t type_index = info.offset_or_index;
- DCHECK(info.add_pc_label.IsBound());
- uint32_t add_pc_offset = dchecked_integral_cast<uint32_t>(info.add_pc_label.Position());
- // Add MOVW patch.
- DCHECK(info.movw_label.IsBound());
- uint32_t movw_offset = dchecked_integral_cast<uint32_t>(info.movw_label.Position());
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(movw_offset,
- &dex_file,
- add_pc_offset,
- type_index));
- // Add MOVT patch.
- DCHECK(info.movt_label.IsBound());
- uint32_t movt_offset = dchecked_integral_cast<uint32_t>(info.movt_label.Position());
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(movt_offset,
- &dex_file,
- add_pc_offset,
- type_index));
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
Literal* literal = entry.second;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 6416d40..ef2e23f 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -593,6 +593,10 @@
uint32_t offset_or_index,
ArenaDeque<PcRelativePatchInfo>* patches);
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
+
// Labels for each block that will be compiled.
Label* block_labels_; // Indexed by block id.
Label frame_entry_label_;
@@ -609,12 +613,12 @@
MethodToLiteralMap call_patches_;
// Relative call patch info.
// Using ArenaDeque<> which retains element addresses on push/emplace_back().
- ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
+ ArenaDeque<PatchInfo<Label>> relative_call_patches_;
// PC-relative patch info for each HArmDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// Deduplication map for boot string literals for kBootImageLinkTimeAddress.
BootStringToLiteralMap boot_image_string_patches_;
- // PC-relative String patch info.
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4f7f36b..969d653 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -329,6 +329,55 @@
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
+class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit LoadStringSlowPathARM64(HLoadString* instruction) : SlowPathCodeARM64(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
+ arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ Primitive::Type type = instruction_->GetType();
+ arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
+
+ RestoreLiveRegisters(codegen, locations);
+
+ // Store the resolved String to the BSS entry.
+ UseScratchRegisterScope temps(arm64_codegen->GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ const DexFile& dex_file = instruction_->AsLoadString()->GetDexFile();
+ // TODO: Change art_quick_resolve_string to kSaveEverything and use a temporary
+ // for the ADRP in the fast path, so that we can avoid the ADRP here.
+ vixl::aarch64::Label* adrp_label =
+ arm64_codegen->NewPcRelativeStringPatch(dex_file, string_index);
+ arm64_codegen->EmitAdrpPlaceholder(adrp_label, temp);
+ vixl::aarch64::Label* strp_label =
+ arm64_codegen->NewPcRelativeStringPatch(dex_file, string_index, adrp_label);
+ {
+ SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
+ __ Bind(strp_label);
+ __ str(RegisterFrom(locations->Out(), Primitive::kPrimNot),
+ MemOperand(temp, /* offset placeholder */ 0));
+ }
+
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
+};
+
class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
@@ -3631,19 +3680,11 @@
const DexFile& dex_file = invoke->GetDexFile();
uint32_t element_offset = invoke->GetDexCacheArrayOffset();
vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(adrp_label);
- __ adrp(XRegisterFrom(temp), /* offset placeholder */ 0);
- }
+ EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
// Add LDR with its PC-relative DexCache access patch.
vixl::aarch64::Label* ldr_label =
NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(ldr_label);
- __ ldr(XRegisterFrom(temp), MemOperand(XRegisterFrom(temp), /* offset placeholder */ 0));
- }
+ EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -3676,7 +3717,8 @@
__ Bl(&frame_entry_label_);
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
- relative_call_patches_.emplace_back(invoke->GetTargetMethod());
+ relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
vixl::aarch64::Label* label = &relative_call_patches_.back().label;
SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(label);
@@ -3798,6 +3840,45 @@
return DeduplicateUint64Literal(address);
}
+void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label,
+ vixl::aarch64::Register reg) {
+ DCHECK(reg.IsX());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
+ __ Bind(fixup_label);
+ __ adrp(reg, /* offset placeholder */ 0);
+}
+
+void CodeGeneratorARM64::EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
+ vixl::aarch64::Register out,
+ vixl::aarch64::Register base) {
+ DCHECK(out.IsX());
+ DCHECK(base.IsX());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
+ __ Bind(fixup_label);
+ __ add(out, base, Operand(/* offset placeholder */ 0));
+}
+
+void CodeGeneratorARM64::EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_label,
+ vixl::aarch64::Register out,
+ vixl::aarch64::Register base) {
+ DCHECK(base.IsX());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
+ __ Bind(fixup_label);
+ __ ldr(out, MemOperand(base, /* offset placeholder */ 0));
+}
+
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PcRelativePatchInfo& info : infos) {
+ linker_patches->push_back(Factory(info.label.GetLocation(),
+ &info.target_dex_file,
+ info.pc_insn_label->GetLocation(),
+ info.offset_or_index));
+ }
+}
+
void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
@@ -3825,10 +3906,9 @@
target_method.dex_file,
target_method.dex_method_index));
}
- for (const MethodPatchInfo<vixl::aarch64::Label>& info : relative_call_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.GetLocation(),
- info.target_method.dex_file,
- info.target_method.dex_method_index));
+ for (const PatchInfo<vixl::aarch64::Label>& info : relative_call_patches_) {
+ linker_patches->push_back(
+ LinkerPatch::RelativeCodePatch(info.label.GetLocation(), &info.dex_file, info.index));
}
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.GetLocation(),
@@ -3843,11 +3923,12 @@
target_string.dex_file,
target_string.string_index));
}
- for (const PcRelativePatchInfo& info : pc_relative_string_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(info.label.GetLocation(),
- &info.target_dex_file,
- info.pc_insn_label->GetLocation(),
- info.offset_or_index));
+ if (!GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
+ } else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
+ linker_patches);
}
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
@@ -3856,12 +3937,8 @@
target_type.dex_file,
target_type.type_index));
}
- for (const PcRelativePatchInfo& info : pc_relative_type_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(info.label.GetLocation(),
- &info.target_dex_file,
- info.pc_insn_label->GetLocation(),
- info.offset_or_index));
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
@@ -4018,19 +4095,11 @@
const DexFile& dex_file = cls->GetDexFile();
uint32_t type_index = cls->GetTypeIndex();
vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeTypePatch(dex_file, type_index);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(adrp_label);
- __ adrp(out.X(), /* offset placeholder */ 0);
- }
+ codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
// Add ADD with its PC-relative type patch.
vixl::aarch64::Label* add_label =
codegen_->NewPcRelativeTypePatch(dex_file, type_index, adrp_label);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(add_label);
- __ add(out.X(), out.X(), Operand(/* offset placeholder */ 0));
- }
+ codegen_->EmitAddPlaceholder(add_label, out.X(), out.X());
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
@@ -4067,11 +4136,7 @@
uint32_t element_offset = cls->GetDexCacheElementOffset();
vixl::aarch64::Label* adrp_label =
codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(adrp_label);
- __ adrp(out.X(), /* offset placeholder */ 0);
- }
+ codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
// Add LDR with its PC-relative DexCache access patch.
vixl::aarch64::Label* ldr_label =
codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
@@ -4156,7 +4221,7 @@
case HLoadString::LoadKind::kDexCacheAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCachePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
@@ -4167,7 +4232,9 @@
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = load->NeedsEnvironment()
- ? LocationSummary::kCallOnMainOnly
+ ? ((load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod)
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kCallOnSlowPath)
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
@@ -4191,20 +4258,13 @@
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
uint32_t string_index = load->GetStringIndex();
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(adrp_label);
- __ adrp(out.X(), /* offset placeholder */ 0);
- }
+ codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
// Add ADD with its PC-relative String patch.
vixl::aarch64::Label* add_label =
codegen_->NewPcRelativeStringPatch(dex_file, string_index, adrp_label);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(add_label);
- __ add(out.X(), out.X(), Operand(/* offset placeholder */ 0));
- }
+ codegen_->EmitAddPlaceholder(add_label, out.X(), out.X());
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
@@ -4212,6 +4272,28 @@
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress()));
return; // No dex cache slow path.
}
+ case HLoadString::LoadKind::kBssEntry: {
+ // Add ADRP with its PC-relative String .bss entry patch.
+ const DexFile& dex_file = load->GetDexFile();
+ uint32_t string_index = load->GetStringIndex();
+ DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+ vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
+ codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
+ // Add LDR with its PC-relative String patch.
+ vixl::aarch64::Label* ldr_label =
+ codegen_->NewPcRelativeStringPatch(dex_file, string_index, adrp_label);
+ // /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
+ GenerateGcRootFieldLoad(load,
+ load->GetLocations()->Out(),
+ out.X(),
+ /* placeholder */ 0u,
+ ldr_label);
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
+ codegen_->AddSlowPath(slow_path);
+ __ Cbz(out.X(), slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
default:
break;
}
@@ -4981,6 +5063,7 @@
uint32_t offset,
vixl::aarch64::Label* fixup_label,
bool requires_read_barrier) {
+ DCHECK(fixup_label == nullptr || offset == 0u);
Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
if (requires_read_barrier) {
DCHECK(kEmitCompilerReadBarrier);
@@ -4997,9 +5080,7 @@
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(fixup_label);
- __ ldr(root_reg, MemOperand(obj, offset));
+ codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj);
}
static_assert(
sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
@@ -5028,9 +5109,7 @@
if (fixup_label == nullptr) {
__ Add(root_reg.X(), obj.X(), offset);
} else {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(fixup_label);
- __ add(root_reg.X(), obj.X(), offset);
+ codegen_->EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X());
}
// /* mirror::Object* */ root = root->Read()
codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
@@ -5041,9 +5120,7 @@
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(fixup_label);
- __ ldr(root_reg, MemOperand(obj, offset));
+ codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X());
}
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a152245..eb28ecb 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -564,6 +564,14 @@
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
+ void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
+ void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
+ vixl::aarch64::Register out,
+ vixl::aarch64::Register base);
+ void EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_label,
+ vixl::aarch64::Register out,
+ vixl::aarch64::Register base);
+
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
// Fast path implementation of ReadBarrier::Barrier for a heap
@@ -691,6 +699,10 @@
void EmitJumpTables();
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
+
// Labels for each block that will be compiled.
// We use a deque so that the `vixl::aarch64::Label` objects do not move in memory.
ArenaDeque<vixl::aarch64::Label> block_labels_; // Indexed by block id.
@@ -713,12 +725,12 @@
MethodToLiteralMap call_patches_;
// Relative call patch info.
// Using ArenaDeque<> which retains element addresses on push/emplace_back().
- ArenaDeque<MethodPatchInfo<vixl::aarch64::Label>> relative_call_patches_;
+ ArenaDeque<PatchInfo<vixl::aarch64::Label>> relative_call_patches_;
// PC-relative DexCache access info.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// Deduplication map for boot string literals for kBootImageLinkTimeAddress.
BootStringToLiteralMap boot_image_string_patches_;
- // PC-relative String patch info.
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 5c0ca85..1c540c2 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -279,7 +279,8 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ HLoadString* load = instruction_->AsLoadString();
+ const uint32_t string_index = load->GetStringIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -289,6 +290,19 @@
type);
RestoreLiveRegisters(codegen, locations);
+
+ // Store the resolved String to the BSS entry.
+ // TODO: Change art_quick_resolve_string to kSaveEverything and use a temporary for the
+ // .bss entry address in the fast path, so that we can avoid another calculation here.
+ bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
+ Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+ DCHECK_NE(out, AT);
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ mips_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
+ mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out, TMP, 0);
+
__ B(GetExitLabel());
}
@@ -957,6 +971,24 @@
}
}
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorMIPS::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PcRelativePatchInfo& info : infos) {
+ const DexFile& dex_file = info.target_dex_file;
+ size_t offset_or_index = info.offset_or_index;
+ DCHECK(info.high_label.IsBound());
+ uint32_t high_offset = __ GetLabelLocation(&info.high_label);
+ // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
+ // the assembler's base label used for PC-relative addressing.
+ uint32_t pc_rel_offset = info.pc_rel_label.IsBound()
+ ? __ GetLabelLocation(&info.pc_rel_label)
+ : __ GetPcRelBaseLabelLocation();
+ linker_patches->push_back(Factory(high_offset, &dex_file, pc_rel_offset, offset_or_index));
+ }
+}
+
void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
@@ -987,48 +1019,17 @@
target_method.dex_file,
target_method.dex_method_index));
}
- for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
- const DexFile& dex_file = info.target_dex_file;
- size_t base_element_offset = info.offset_or_index;
- DCHECK(info.high_label.IsBound());
- uint32_t high_offset = __ GetLabelLocation(&info.high_label);
- DCHECK(info.pc_rel_label.IsBound());
- uint32_t pc_rel_offset = __ GetLabelLocation(&info.pc_rel_label);
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(high_offset,
- &dex_file,
- pc_rel_offset,
- base_element_offset));
+ EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
+ linker_patches);
+ if (!GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
+ } else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
+ linker_patches);
}
- for (const PcRelativePatchInfo& info : pc_relative_string_patches_) {
- const DexFile& dex_file = info.target_dex_file;
- size_t string_index = info.offset_or_index;
- DCHECK(info.high_label.IsBound());
- uint32_t high_offset = __ GetLabelLocation(&info.high_label);
- // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
- // the assembler's base label used for PC-relative literals.
- uint32_t pc_rel_offset = info.pc_rel_label.IsBound()
- ? __ GetLabelLocation(&info.pc_rel_label)
- : __ GetPcRelBaseLabelLocation();
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(high_offset,
- &dex_file,
- pc_rel_offset,
- string_index));
- }
- for (const PcRelativePatchInfo& info : pc_relative_type_patches_) {
- const DexFile& dex_file = info.target_dex_file;
- size_t type_index = info.offset_or_index;
- DCHECK(info.high_label.IsBound());
- uint32_t high_offset = __ GetLabelLocation(&info.high_label);
- // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
- // the assembler's base label used for PC-relative literals.
- uint32_t pc_rel_offset = info.pc_rel_label.IsBound()
- ? __ GetLabelLocation(&info.pc_rel_label)
- : __ GetPcRelBaseLabelLocation();
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(high_offset,
- &dex_file,
- pc_rel_offset,
- type_index));
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
Literal* literal = entry.second;
@@ -1118,6 +1119,36 @@
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
}
+void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholder(
+ PcRelativePatchInfo* info, Register out, Register base) {
+ bool reordering = __ SetReorder(false);
+ if (GetInstructionSetFeatures().IsR6()) {
+ DCHECK_EQ(base, ZERO);
+ __ Bind(&info->high_label);
+ __ Bind(&info->pc_rel_label);
+ // Add a 32-bit offset to PC.
+ __ Auipc(out, /* placeholder */ 0x1234);
+ __ Addiu(out, out, /* placeholder */ 0x5678);
+ } else {
+ // If base is ZERO, emit NAL to obtain the actual base.
+ if (base == ZERO) {
+ // Generate a dummy PC-relative call to obtain PC.
+ __ Nal();
+ }
+ __ Bind(&info->high_label);
+ __ Lui(out, /* placeholder */ 0x1234);
+ // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
+ // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
+ if (base == ZERO) {
+ __ Bind(&info->pc_rel_label);
+ }
+ __ Ori(out, out, /* placeholder */ 0x5678);
+ // Add a 32-bit offset to PC.
+ __ Addu(out, out, (base == ZERO) ? RA : base);
+ }
+ __ SetReorder(reordering);
+}
+
void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
MipsLabel done;
Register card = AT;
@@ -4229,6 +4260,8 @@
}
// We disable PC-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
+ // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
+ // with irreducible loops.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
bool fallback_load = has_irreducible_loops;
switch (desired_string_load_kind) {
@@ -4244,10 +4277,8 @@
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = false;
break;
- case HLoadString::LoadKind::kDexCachePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
fallback_load = false;
@@ -4627,23 +4658,7 @@
DCHECK(!kEmitCompilerReadBarrier);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
- bool reordering = __ SetReorder(false);
- if (isR6) {
- __ Bind(&info->high_label);
- __ Bind(&info->pc_rel_label);
- // Add a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
- __ Addiu(out, out, /* placeholder */ 0x5678);
- } else {
- __ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
- // We do not bind info->pc_rel_label here, we'll use the assembler's label
- // for PC-relative literals and the base from HMipsComputeBaseMethodAddress.
- __ Ori(out, out, /* placeholder */ 0x5678);
- // Add a 32-bit offset to PC.
- __ Addu(out, out, base_or_current_method_reg);
- }
- __ SetReorder(reordering);
+ codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
@@ -4732,7 +4747,9 @@
void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
+ ? ((load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod)
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kCallOnSlowPath)
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
@@ -4741,12 +4758,12 @@
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
if (codegen_->GetInstructionSetFeatures().IsR6()) {
break;
}
FALLTHROUGH_INTENDED;
// We need an extra register for PC-relative dex cache accesses.
- case HLoadString::LoadKind::kDexCachePcRelative:
case HLoadString::LoadKind::kDexCacheViaMethod:
locations->SetInAt(0, Location::RequiresRegister());
break;
@@ -4768,6 +4785,7 @@
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
default:
@@ -4785,25 +4803,10 @@
return; // No dex cache slow path.
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
- bool reordering = __ SetReorder(false);
- if (isR6) {
- __ Bind(&info->high_label);
- __ Bind(&info->pc_rel_label);
- // Add a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
- __ Addiu(out, out, /* placeholder */ 0x5678);
- } else {
- __ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
- // We do not bind info->pc_rel_label here, we'll use the assembler's label
- // for PC-relative literals and the base from HMipsComputeBaseMethodAddress.
- __ Ori(out, out, /* placeholder */ 0x5678);
- // Add a 32-bit offset to PC.
- __ Addu(out, out, base_or_current_method_reg);
- }
- __ SetReorder(reordering);
+ codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
@@ -4815,15 +4818,28 @@
codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
+ case HLoadString::LoadKind::kBssEntry: {
+ DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, 0);
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqz(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
default:
break;
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
- codegen_->AddSlowPath(slow_path);
- __ B(slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
+ DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+ codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
@@ -6011,25 +6027,8 @@
Register reg = base->GetLocations()->Out().AsRegister<Register>();
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
- bool reordering = __ SetReorder(false);
- if (codegen_->GetInstructionSetFeatures().IsR6()) {
- __ Bind(&info->high_label);
- __ Bind(&info->pc_rel_label);
- // Add a 32-bit offset to PC.
- __ Auipc(reg, /* placeholder */ 0x1234);
- __ Addiu(reg, reg, /* placeholder */ 0x5678);
- } else {
- // Generate a dummy PC-relative call to obtain PC.
- __ Nal();
- __ Bind(&info->high_label);
- __ Lui(reg, /* placeholder */ 0x1234);
- __ Bind(&info->pc_rel_label);
- __ Ori(reg, reg, /* placeholder */ 0x5678);
- // Add a 32-bit offset to PC.
- __ Addu(reg, reg, RA);
- // TODO: Can we share this code with that of VisitMipsComputeBaseMethodAddress()?
- }
- __ SetReorder(reordering);
+ // TODO: Reuse MipsComputeBaseMethodAddress on R2 instead of passing ZERO to force emitting NAL.
+ codegen_->EmitPcRelativeAddressPlaceholder(info, reg, ZERO);
}
void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index f943978..0e8d8d4 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -435,6 +435,8 @@
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, uint32_t type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
+ void EmitPcRelativeAddressPlaceholder(PcRelativePatchInfo* info, Register out, Register base);
+
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
@@ -455,6 +457,10 @@
uint32_t offset_or_index,
ArenaDeque<PcRelativePatchInfo>* patches);
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
+
// Labels for each block that will be compiled.
MipsLabel* block_labels_;
MipsLabel frame_entry_label_;
@@ -473,7 +479,7 @@
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// Deduplication map for boot string literals for kBootImageLinkTimeAddress.
BootStringToLiteralMap boot_image_string_patches_;
- // PC-relative String patch info.
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a7051ae..0b23599 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -212,6 +212,42 @@
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
};
+class LoadStringSlowPathX86 : public SlowPathCode {
+ public:
+ explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
+ x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ RestoreLiveRegisters(codegen, locations);
+
+ // Store the resolved String to the BSS entry.
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
+ locations->Out().AsRegister<Register>());
+ Label* fixup_label = x86_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
+ __ Bind(fixup_label);
+
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
+};
+
class LoadClassSlowPathX86 : public SlowPathCode {
public:
LoadClassSlowPathX86(HLoadClass* cls,
@@ -4294,7 +4330,8 @@
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
__ movl(temp.AsRegister<Register>(), Immediate(/* placeholder */ 0));
- method_patches_.emplace_back(invoke->GetTargetMethod());
+ method_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
__ Bind(&method_patches_.back().label); // Bind the label at the end of the "movl" insn.
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
@@ -4339,7 +4376,8 @@
__ call(GetFrameEntryLabel());
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
- relative_call_patches_.emplace_back(invoke->GetTargetMethod());
+ relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
Label* label = &relative_call_patches_.back().label;
__ call(label); // Bind to the patch label, override at link time.
__ Bind(label); // Bind the label at the end of the "call" insn.
@@ -4398,7 +4436,8 @@
}
}
-void CodeGeneratorX86::RecordStringPatch(HLoadString* load_string) {
+void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
+ DCHECK(GetCompilerOptions().IsBootImage());
string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
__ Bind(&string_patches_.back().label);
}
@@ -4408,6 +4447,12 @@
__ Bind(&type_patches_.back().label);
}
+Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+ return &string_patches_.back().label;
+}
+
Label* CodeGeneratorX86::NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset) {
// Add the patch entry and bind its label at the end of the instruction.
@@ -4415,6 +4460,21 @@
return &pc_relative_dex_cache_patches_.back().label;
}
+// The label points to the end of the "movl" or another instruction but the literal offset
+// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
+constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
+
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorX86::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PatchInfo<Label>>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PatchInfo<Label>& info : infos) {
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ linker_patches->push_back(
+ Factory(literal_offset, &info.dex_file, GetMethodAddressOffset(), info.index));
+ }
+}
+
void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
@@ -4425,59 +4485,38 @@
string_patches_.size() +
type_patches_.size();
linker_patches->reserve(size);
- // The label points to the end of the "movl" insn but the literal offset for method
- // patch needs to point to the embedded constant which occupies the last 4 bytes.
- constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
- for (const MethodPatchInfo<Label>& info : method_patches_) {
+ for (const PatchInfo<Label>& info : method_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
- info.target_method.dex_file,
- info.target_method.dex_method_index));
+ linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset, &info.dex_file, info.index));
}
- for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
+ for (const PatchInfo<Label>& info : relative_call_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
- info.target_method.dex_file,
- info.target_method.dex_method_index));
+ linker_patches->push_back(
+ LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
}
- for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(literal_offset,
- &info.target_dex_file,
- GetMethodAddressOffset(),
- info.element_offset));
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
+ linker_patches);
for (const Label& label : simple_patches_) {
uint32_t literal_offset = label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
- if (GetCompilerOptions().GetCompilePic()) {
- for (const StringPatchInfo<Label>& info : string_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(literal_offset,
- &info.dex_file,
- GetMethodAddressOffset(),
- info.string_index));
- }
- for (const TypePatchInfo<Label>& info : type_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(literal_offset,
- &info.dex_file,
- GetMethodAddressOffset(),
- info.type_index));
- }
+ if (!GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
} else {
- for (const StringPatchInfo<Label>& info : string_patches_) {
+ for (const PatchInfo<Label>& info : string_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
- &info.dex_file,
- info.string_index));
+ linker_patches->push_back(
+ LinkerPatch::StringPatch(literal_offset, &info.dex_file, info.index));
}
- for (const TypePatchInfo<Label>& info : type_patches_) {
+ }
+ if (GetCompilerOptions().GetCompilePic()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
+ } else {
+ for (const PatchInfo<Label>& info : type_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
- &info.dex_file,
- info.type_index));
+ linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
}
}
}
@@ -5991,7 +6030,7 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().GetCompilePic());
FALLTHROUGH_INTENDED;
- case HLoadString::LoadKind::kDexCachePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
@@ -6014,13 +6053,15 @@
void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnMainOnly
+ ? ((load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod)
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kCallOnSlowPath)
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod ||
load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadString::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadString::LoadKind::kBssEntry) {
locations->SetInAt(0, Location::RequiresRegister());
}
if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
@@ -6038,13 +6079,13 @@
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
__ movl(out, Immediate(/* placeholder */ 0));
- codegen_->RecordStringPatch(load);
+ codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
- codegen_->RecordStringPatch(load);
+ codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
@@ -6054,6 +6095,19 @@
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
}
+ case HLoadString::LoadKind::kBssEntry: {
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ Address address = Address(method_address, CodeGeneratorX86::kDummy32BitOffset);
+ Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
+ // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label);
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
default:
break;
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1bd28da..25f5c2a 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -411,8 +411,9 @@
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
void RecordSimplePatch();
- void RecordStringPatch(HLoadString* load_string);
+ void RecordBootStringPatch(HLoadString* load_string);
void RecordTypePatch(HLoadClass* load_class);
+ Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -580,15 +581,9 @@
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
- struct PcRelativeDexCacheAccessInfo {
- PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off)
- : target_dex_file(dex_file), element_offset(element_off), label() { }
-
- const DexFile& target_dex_file;
- uint32_t element_offset;
- // NOTE: Label is bound to the end of the instruction that has an embedded 32-bit offset.
- Label label;
- };
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ void EmitPcRelativeLinkerPatches(const ArenaDeque<PatchInfo<Label>>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
// Labels for each block that will be compiled.
Label* block_labels_; // Indexed by block id.
@@ -600,16 +595,16 @@
const X86InstructionSetFeatures& isa_features_;
// Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
- ArenaDeque<MethodPatchInfo<Label>> method_patches_;
- ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
+ ArenaDeque<PatchInfo<Label>> method_patches_;
+ ArenaDeque<PatchInfo<Label>> relative_call_patches_;
// PC-relative DexCache access info.
- ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
+ ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
// Patch locations for patchoat where the linker doesn't do any other work.
ArenaDeque<Label> simple_patches_;
- // String patch locations.
- ArenaDeque<StringPatchInfo<Label>> string_patches_;
+ // String patch locations; type depends on configuration (app .bss or boot image PIC/non-PIC).
+ ArenaDeque<PatchInfo<Label>> string_patches_;
// Type patch locations.
- ArenaDeque<TypePatchInfo<Label>> type_patches_;
+ ArenaDeque<PatchInfo<Label>> type_patches_;
// Offset to the start of the constant area in the assembled code.
// Used for fixups to the constant area.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b243ee0..28638d7 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -287,6 +287,44 @@
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
};
+class LoadStringSlowPathX86_64 : public SlowPathCode {
+ public:
+ explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(string_index));
+ x86_64_codegen->InvokeRuntime(kQuickResolveString,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ RestoreLiveRegisters(codegen, locations);
+
+ // Store the resolved String to the BSS entry.
+ __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
+ locations->Out().AsRegister<CpuRegister>());
+ Label* fixup_label = x86_64_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
+ __ Bind(fixup_label);
+
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
+};
+
class TypeCheckSlowPathX86_64 : public SlowPathCode {
public:
TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
@@ -772,7 +810,8 @@
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
__ movl(temp.AsRegister<CpuRegister>(), Immediate(0)); // Placeholder.
- method_patches_.emplace_back(invoke->GetTargetMethod());
+ method_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
__ Bind(&method_patches_.back().label); // Bind the label at the end of the "movl" insn.
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
@@ -819,7 +858,8 @@
__ call(&frame_entry_label_);
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
- relative_call_patches_.emplace_back(invoke->GetTargetMethod());
+ relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
Label* label = &relative_call_patches_.back().label;
__ call(label); // Bind to the patch label, override at link time.
__ Bind(label); // Bind the label at the end of the "call" insn.
@@ -879,7 +919,8 @@
}
}
-void CodeGeneratorX86_64::RecordStringPatch(HLoadString* load_string) {
+void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
+ DCHECK(GetCompilerOptions().IsBootImage());
string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
__ Bind(&string_patches_.back().label);
}
@@ -889,6 +930,12 @@
__ Bind(&type_patches_.back().label);
}
+Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+ return &string_patches_.back().label;
+}
+
Label* CodeGeneratorX86_64::NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset) {
// Add a patch entry and return the label.
@@ -896,6 +943,21 @@
return &pc_relative_dex_cache_patches_.back().label;
}
+// The label points to the end of the "movl" or another instruction but the literal offset
+// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
+constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
+
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorX86_64::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PatchInfo<Label>>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PatchInfo<Label>& info : infos) {
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ linker_patches->push_back(
+ Factory(literal_offset, &info.dex_file, info.label.Position(), info.index));
+ }
+}
+
void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
@@ -906,48 +968,29 @@
string_patches_.size() +
type_patches_.size();
linker_patches->reserve(size);
- // The label points to the end of the "movl" insn but the literal offset for method
- // patch needs to point to the embedded constant which occupies the last 4 bytes.
- constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
- for (const MethodPatchInfo<Label>& info : method_patches_) {
+ for (const PatchInfo<Label>& info : method_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
- info.target_method.dex_file,
- info.target_method.dex_method_index));
+ linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset, &info.dex_file, info.index));
}
- for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
+ for (const PatchInfo<Label>& info : relative_call_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
- info.target_method.dex_file,
- info.target_method.dex_method_index));
+ linker_patches->push_back(
+ LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
}
- for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(literal_offset,
- &info.target_dex_file,
- info.label.Position(),
- info.element_offset));
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
+ linker_patches);
for (const Label& label : simple_patches_) {
uint32_t literal_offset = label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
- for (const StringPatchInfo<Label>& info : string_patches_) {
+ if (!GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
+ } else {
// These are always PC-relative, see GetSupportedLoadStringKind().
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(literal_offset,
- &info.dex_file,
- info.label.Position(),
- info.string_index));
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
}
- for (const TypePatchInfo<Label>& info : type_patches_) {
- // These are always PC-relative, see GetSupportedLoadClassKind().
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(literal_offset,
- &info.dex_file,
- info.label.Position(),
- info.type_index));
- }
+ // These are always PC-relative, see GetSupportedLoadClassKind().
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
}
void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -5390,7 +5433,7 @@
case HLoadString::LoadKind::kDexCacheAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCachePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
@@ -5401,7 +5444,9 @@
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = load->NeedsEnvironment()
- ? LocationSummary::kCallOnMainOnly
+ ? ((load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod)
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kCallOnSlowPath)
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
@@ -5420,7 +5465,7 @@
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
- codegen_->RecordStringPatch(load);
+ codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
@@ -5430,6 +5475,19 @@
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
}
+ case HLoadString::LoadKind::kBssEntry: {
+ Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
+ /* no_rip */ false);
+ Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
+ // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label);
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
default:
break;
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 8dec44e..57ef83f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -406,8 +406,9 @@
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
void RecordSimplePatch();
- void RecordStringPatch(HLoadString* load_string);
+ void RecordBootStringPatch(HLoadString* load_string);
void RecordTypePatch(HLoadClass* load_class);
+ Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -555,14 +556,9 @@
static constexpr int32_t kDummy32BitOffset = 256;
private:
- struct PcRelativeDexCacheAccessInfo {
- PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off)
- : target_dex_file(dex_file), element_offset(element_off), label() { }
-
- const DexFile& target_dex_file;
- uint32_t element_offset;
- Label label;
- };
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ static void EmitPcRelativeLinkerPatches(const ArenaDeque<PatchInfo<Label>>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
// Labels for each block that will be compiled.
Label* block_labels_; // Indexed by block id.
@@ -578,16 +574,16 @@
int constant_area_start_;
// Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
- ArenaDeque<MethodPatchInfo<Label>> method_patches_;
- ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
+ ArenaDeque<PatchInfo<Label>> method_patches_;
+ ArenaDeque<PatchInfo<Label>> relative_call_patches_;
// PC-relative DexCache access info.
- ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
+ ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
// Patch locations for patchoat where the linker doesn't do any other work.
ArenaDeque<Label> simple_patches_;
- // String patch locations.
- ArenaDeque<StringPatchInfo<Label>> string_patches_;
+ // String patch locations; type depends on configuration (app .bss or boot image PIC).
+ ArenaDeque<PatchInfo<Label>> string_patches_;
// Type patch locations.
- ArenaDeque<TypePatchInfo<Label>> type_patches_;
+ ArenaDeque<PatchInfo<Label>> type_patches_;
// Fixups for jump tables need to be handled specially.
ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index 7010171..82b8123 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -62,21 +62,6 @@
}
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache strings array,
- // we need to add the dex cache arrays base as the special input.
- if (load_string->GetLoadKind() == HLoadString::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_string->GetDexFile();
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &dex_file);
- base->UpdateElementOffset(layout.StringOffset(load_string->GetStringIndex()));
- // Add the special argument base to the load.
- load_string->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 4456b49..31fff26 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -68,21 +68,6 @@
}
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache strings array,
- // we need to add the dex cache arrays base as the special input.
- if (load_string->GetLoadKind() == HLoadString::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_string->GetDexFile();
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &dex_file);
- base->UpdateElementOffset(layout.StringOffset(load_string->GetStringIndex()));
- // Add the special argument base to the load.
- load_string->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index f1ca928..1ff2252 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2610,12 +2610,8 @@
LoadKind load_kind = GetLoadKind();
if (HasAddress(load_kind)) {
return GetAddress() == other_load_string->GetAddress();
- } else if (HasStringReference(load_kind)) {
- return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
} else {
- DCHECK(HasDexCacheReference(load_kind)) << load_kind;
- // If the string indexes and dex files are the same, dex cache element offsets
- // must also be the same, so we don't need to compare them.
+ DCHECK(HasStringReference(load_kind)) << load_kind;
return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
}
}
@@ -2645,8 +2641,8 @@
return os << "BootImageAddress";
case HLoadString::LoadKind::kDexCacheAddress:
return os << "DexCacheAddress";
- case HLoadString::LoadKind::kDexCachePcRelative:
- return os << "DexCachePcRelative";
+ case HLoadString::LoadKind::kBssEntry:
+ return os << "BssEntry";
case HLoadString::LoadKind::kDexCacheViaMethod:
return os << "DexCacheViaMethod";
default:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b0e61e6..5cfbf42 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5651,10 +5651,9 @@
// Used for strings outside the boot image referenced by JIT-compiled code.
kDexCacheAddress,
- // Load from resolved strings array in the dex cache using a PC-relative load.
- // Used for strings outside boot image when we know that we can access
- // the dex cache arrays using a PC-relative load.
- kDexCachePcRelative,
+ // Load from an entry in the .bss section using a PC-relative load.
+ // Used for strings outside boot image when .bss is accessible with a PC-relative load.
+ kBssEntry,
// Load from resolved strings array accessed through the class loaded from
// the compiled method's own ArtMethod*. This is the default access type when
@@ -5673,7 +5672,7 @@
string_index_(string_index) {
SetPackedFlag<kFlagIsInDexCache>(false);
SetPackedField<LoadKindField>(LoadKind::kDexCacheViaMethod);
- load_data_.ref.dex_file = &dex_file;
+ load_data_.dex_file_ = &dex_file;
}
void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
@@ -5686,20 +5685,11 @@
const DexFile& dex_file,
uint32_t string_index) {
DCHECK(HasStringReference(load_kind));
- load_data_.ref.dex_file = &dex_file;
+ load_data_.dex_file_ = &dex_file;
string_index_ = string_index;
SetLoadKindInternal(load_kind);
}
- void SetLoadKindWithDexCacheReference(LoadKind load_kind,
- const DexFile& dex_file,
- uint32_t element_index) {
- DCHECK(HasDexCacheReference(load_kind));
- load_data_.ref.dex_file = &dex_file;
- load_data_.ref.dex_cache_element_index = element_index;
- SetLoadKindInternal(load_kind);
- }
-
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
}
@@ -5711,8 +5701,6 @@
return string_index_;
}
- uint32_t GetDexCacheElementOffset() const;
-
uint64_t GetAddress() const {
DCHECK(HasAddress(GetLoadKind()));
return load_data_.address;
@@ -5782,6 +5770,7 @@
static bool HasStringReference(LoadKind load_kind) {
return load_kind == LoadKind::kBootImageLinkTimeAddress ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
+ load_kind == LoadKind::kBssEntry ||
load_kind == LoadKind::kDexCacheViaMethod;
}
@@ -5789,10 +5778,6 @@
return load_kind == LoadKind::kBootImageAddress || load_kind == LoadKind::kDexCacheAddress;
}
- static bool HasDexCacheReference(LoadKind load_kind) {
- return load_kind == LoadKind::kDexCachePcRelative;
- }
-
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod.
@@ -5805,10 +5790,7 @@
uint32_t string_index_;
union {
- struct {
- const DexFile* dex_file; // For string reference and dex cache reference.
- uint32_t dex_cache_element_index; // Only for dex cache reference.
- } ref;
+ const DexFile* dex_file_; // For string reference.
uint64_t address; // Up to 64-bit, needed for kDexCacheAddress on 64-bit targets.
} load_data_;
@@ -5818,15 +5800,8 @@
// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
inline const DexFile& HLoadString::GetDexFile() const {
- DCHECK(HasStringReference(GetLoadKind()) || HasDexCacheReference(GetLoadKind()))
- << GetLoadKind();
- return *load_data_.ref.dex_file;
-}
-
-// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
-inline uint32_t HLoadString::GetDexCacheElementOffset() const {
- DCHECK(HasDexCacheReference(GetLoadKind())) << GetLoadKind();
- return load_data_.ref.dex_cache_element_index;
+ DCHECK(HasStringReference(GetLoadKind())) << GetLoadKind();
+ return *load_data_.dex_file_;
}
// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
@@ -5834,7 +5809,7 @@
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kDexCachePcRelative ||
+ GetLoadKind() == LoadKind::kBssEntry ||
GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
GetLoadKind() == LoadKind::kBootImageAddress) << GetLoadKind();
// HLoadString::GetInputRecords() returns an empty array at this point,
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index 6006e6c..82feb95 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -83,6 +83,7 @@
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
// Add a base register for PC-relative literals on R2.
InitializePCRelativeBasePointer();
load_string->AddSpecialInput(base_);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 75587af..b1fdb17 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -92,7 +92,7 @@
void VisitLoadString(HLoadString* load_string) OVERRIDE {
HLoadString::LoadKind load_kind = load_string->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadString::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadString::LoadKind::kBssEntry) {
InitializePCRelativeBasePointer();
load_string->AddSpecialInput(base_);
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 5a47df1..15cebfe 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -84,7 +84,7 @@
void VisitNewArray(HNewArray* instr) OVERRIDE;
void VisitParameterValue(HParameterValue* instr) OVERRIDE;
void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
- void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact)
+ void SetClassAsTypeInfo(HInstruction* instr, ObjPtr<mirror::Class> klass, bool is_exact)
REQUIRES_SHARED(Locks::mutator_lock_);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
@@ -427,7 +427,7 @@
}
void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
bool is_exact) {
if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
// Calls to String.<init> are replaced with a StringFactory.
@@ -454,7 +454,7 @@
}
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
- } else if (IsAdmissible(klass)) {
+ } else if (IsAdmissible(klass.Decode())) {
ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
@@ -512,7 +512,7 @@
}
ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = nullptr;
+ ObjPtr<mirror::Class> klass;
// The field index is unknown only during tests.
if (info.GetFieldIndex() != kUnknownFieldIndex) {
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 1fa6624..61428b2 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -21,6 +21,7 @@
#include "driver/dex_compilation_unit.h"
#include "handle_scope-inl.h"
#include "nodes.h"
+#include "obj_ptr.h"
#include "optimization.h"
#include "optimizing_compiler_stats.h"
@@ -62,6 +63,11 @@
return handles_->NewHandle(object);
}
+ template <typename T>
+ MutableHandle<T> NewHandle(ObjPtr<T> object) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return handles_->NewHandle(object);
+ }
+
ReferenceTypeInfo::TypeHandle GetObjectClassHandle();
ReferenceTypeInfo::TypeHandle GetClassClassHandle();
ReferenceTypeInfo::TypeHandle GetStringClassHandle();
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index a4a3e06..c1cfe8d 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -163,7 +163,7 @@
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
mirror::Class* klass = dex_cache->GetResolvedType(type_index);
- if (compiler_driver_->IsBootImage()) {
+ if (codegen_->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Check if the class is a boot image class.
DCHECK(!runtime->UseJitCompilation());
if (!compiler_driver_->GetSupportBootImageFixup()) {
@@ -281,7 +281,7 @@
? compilation_unit_.GetDexCache()
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
- if (compiler_driver_->IsBootImage()) {
+ if (codegen_->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed.
DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
@@ -311,6 +311,8 @@
!codegen_->GetCompilerOptions().GetCompilePic()) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(string);
+ } else {
+ desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
}
}
@@ -319,6 +321,7 @@
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadString::LoadKind::kBssEntry:
case HLoadString::LoadKind::kDexCacheViaMethod:
load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index);
break;
@@ -327,13 +330,6 @@
DCHECK_NE(address, 0u);
load_string->SetLoadKindWithAddress(load_kind, address);
break;
- case HLoadString::LoadKind::kDexCachePcRelative: {
- PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
- DexCacheArraysLayout layout(pointer_size, &dex_file);
- size_t element_index = layout.StringOffset(string_index);
- load_string->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
- break;
- }
}
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index ee5811c..0ed8a35 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -262,12 +262,6 @@
CHECK_NE(rm, PC);
}
- // LDR(literal) - pc relative load.
- explicit Address(int32_t offset) :
- rn_(PC), rm_(R0), offset_(offset),
- am_(Offset), is_immed_offset_(false), shift_(LSL) {
- }
-
static bool CanHoldLoadOffsetArm(LoadOperandType type, int offset);
static bool CanHoldStoreOffsetArm(StoreOperandType type, int offset);
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 2269ba2..61b7f08 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2461,58 +2461,36 @@
}
} else {
// Register shift.
- if (ad.GetRegister() == PC) {
- // PC relative literal encoding.
- int32_t offset = ad.GetOffset();
- if (must_be_32bit || offset < 0 || offset >= (1 << 10) || !load) {
- int32_t up = B23;
- if (offset < 0) {
- offset = -offset;
- up = 0;
- }
- CHECK_LT(offset, (1 << 12));
- int32_t encoding = 0x1f << 27 | 0xf << 16 | B22 | (load ? B20 : 0) |
- offset | up |
- static_cast<uint32_t>(rd) << 12;
- Emit32(encoding);
- } else {
- // 16 bit literal load.
- CHECK_GE(offset, 0);
- CHECK_LT(offset, (1 << 10));
- int32_t encoding = B14 | (load ? B11 : 0) | static_cast<uint32_t>(rd) << 8 | offset >> 2;
- Emit16(encoding);
- }
- } else {
- if (ad.GetShiftCount() != 0) {
- // If there is a shift count this must be 32 bit.
- must_be_32bit = true;
- } else if (IsHighRegister(ad.GetRegisterOffset())) {
- must_be_32bit = true;
- }
+ CHECK_NE(ad.GetRegister(), PC);
+ if (ad.GetShiftCount() != 0) {
+ // If there is a shift count this must be 32 bit.
+ must_be_32bit = true;
+ } else if (IsHighRegister(ad.GetRegisterOffset())) {
+ must_be_32bit = true;
+ }
- if (must_be_32bit) {
- int32_t encoding = 0x1f << 27 | (load ? B20 : 0) | static_cast<uint32_t>(rd) << 12 |
- ad.encodingThumb(true);
- if (half) {
- encoding |= B21;
- } else if (!byte) {
- encoding |= B22;
- }
- if (load && is_signed && (byte || half)) {
- encoding |= B24;
- }
- Emit32(encoding);
- } else {
- // 16 bit register offset.
- int32_t encoding = B14 | B12 | (load ? B11 : 0) | static_cast<uint32_t>(rd) |
- ad.encodingThumb(false);
- if (byte) {
- encoding |= B10;
- } else if (half) {
- encoding |= B9;
- }
- Emit16(encoding);
+ if (must_be_32bit) {
+ int32_t encoding = 0x1f << 27 | (load ? B20 : 0) | static_cast<uint32_t>(rd) << 12 |
+ ad.encodingThumb(true);
+ if (half) {
+ encoding |= B21;
+ } else if (!byte) {
+ encoding |= B22;
}
+ if (load && is_signed && (byte || half)) {
+ encoding |= B24;
+ }
+ Emit32(encoding);
+ } else {
+ // 16 bit register offset.
+ int32_t encoding = B14 | B12 | (load ? B11 : 0) | static_cast<uint32_t>(rd) |
+ ad.encodingThumb(false);
+ if (byte) {
+ encoding |= B10;
+ } else if (half) {
+ encoding |= B9;
+ }
+ Emit16(encoding);
}
}
}
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 3b05173..86a4aa2 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -1245,22 +1245,6 @@
EmitAndCheck(&assembler, "LoadStoreRegOffset");
}
-TEST_F(Thumb2AssemblerTest, LoadStoreLiteral) {
- __ ldr(R0, Address(4));
- __ str(R0, Address(4));
-
- __ ldr(R0, Address(-8));
- __ str(R0, Address(-8));
-
- // Limits.
- __ ldr(R0, Address(0x3ff)); // 10 bits (16 bit).
- __ ldr(R0, Address(0x7ff)); // 11 bits (32 bit).
- __ str(R0, Address(0x3ff)); // 32 bit (no 16 bit str(literal)).
- __ str(R0, Address(0x7ff)); // 11 bits (32 bit).
-
- EmitAndCheck(&assembler, "LoadStoreLiteral");
-}
-
TEST_F(Thumb2AssemblerTest, LoadStoreLimits) {
__ ldr(R0, Address(R4, 124)); // 16 bit.
__ ldr(R0, Address(R4, 128)); // 32 bit.
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 81c6ec5..91f3970 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5012,17 +5012,6 @@
" 28: f841 0008 str.w r0, [r1, r8]\n",
nullptr
};
-const char* const LoadStoreLiteralResults[] = {
- " 0: 4801 ldr r0, [pc, #4] ; (8 <LoadStoreLiteral+0x8>)\n",
- " 2: f8cf 0004 str.w r0, [pc, #4] ; 8 <LoadStoreLiteral+0x8>\n",
- " 6: f85f 0008 ldr.w r0, [pc, #-8] ; 0 <LoadStoreLiteral>\n",
- " a: f84f 0008 str.w r0, [pc, #-8] ; 4 <LoadStoreLiteral+0x4>\n",
- " e: 48ff ldr r0, [pc, #1020] ; (40c <LoadStoreLiteral+0x40c>)\n",
- " 10: f8df 07ff ldr.w r0, [pc, #2047] ; 813 <LoadStoreLiteral+0x813>\n",
- " 14: f8cf 03ff str.w r0, [pc, #1023] ; 417 <LoadStoreLiteral+0x417>\n",
- " 18: f8cf 07ff str.w r0, [pc, #2047] ; 81b <LoadStoreLiteral+0x81b>\n",
- nullptr
-};
const char* const LoadStoreLimitsResults[] = {
" 0: 6fe0 ldr r0, [r4, #124] ; 0x7c\n",
" 2: f8d4 0080 ldr.w r0, [r4, #128] ; 0x80\n",
@@ -5708,7 +5697,6 @@
test_results["MixedBranch32"] = MixedBranch32Results;
test_results["Shifts"] = ShiftsResults;
test_results["LoadStoreRegOffset"] = LoadStoreRegOffsetResults;
- test_results["LoadStoreLiteral"] = LoadStoreLiteralResults;
test_results["LoadStoreLimits"] = LoadStoreLimitsResults;
test_results["CompareAndBranch"] = CompareAndBranchResults;
test_results["AddConstant"] = AddConstantResults;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 1ddf961..abdbbb5 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -523,8 +523,6 @@
compiled_methods_zip_filename_(nullptr),
compiled_methods_filename_(nullptr),
passes_to_run_filename_(nullptr),
- app_image_(false),
- boot_image_(false),
multi_image_(false),
is_host_(false),
class_loader_(nullptr),
@@ -691,8 +689,8 @@
}
void ProcessOptions(ParserOptions* parser_options) {
- boot_image_ = !image_filenames_.empty();
- app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
+ compiler_options_->boot_image_ = !image_filenames_.empty();
+ compiler_options_->app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
if (IsAppImage() && IsBootImage()) {
Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
@@ -744,7 +742,7 @@
android_root_ += android_root_env_var;
}
- if (!boot_image_ && parser_options->boot_image_filename.empty()) {
+ if (!IsBootImage() && parser_options->boot_image_filename.empty()) {
parser_options->boot_image_filename += android_root_;
parser_options->boot_image_filename += "/framework/boot.art";
}
@@ -1327,7 +1325,7 @@
}
void LoadClassProfileDescriptors() {
- if (profile_compilation_info_ != nullptr && app_image_) {
+ if (profile_compilation_info_ != nullptr && IsAppImage()) {
Runtime* runtime = Runtime::Current();
CHECK(runtime != nullptr);
std::set<DexCacheResolvedClasses> resolved_classes(
@@ -1634,8 +1632,6 @@
compiler_kind_,
instruction_set_,
instruction_set_features_.get(),
- IsBootImage(),
- IsAppImage(),
image_classes_.release(),
compiled_classes_.release(),
compiled_methods_.release(),
@@ -1726,7 +1722,7 @@
}
if (IsImage()) {
- if (app_image_ && image_base_ == 0) {
+ if (IsAppImage() && image_base_ == 0) {
gc::Heap* const heap = Runtime::Current()->GetHeap();
for (gc::space::ImageSpace* image_space : heap->GetBootImageSpaces()) {
image_base_ = std::max(image_base_, RoundUp(
@@ -1794,7 +1790,10 @@
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
- elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer->GetBssSize());
+ elf_writer->PrepareDynamicSection(rodata_size,
+ text_size,
+ oat_writer->GetBssSize(),
+ oat_writer->GetBssRootsOffset());
if (IsImage()) {
// Update oat layout.
@@ -1977,11 +1976,11 @@
}
bool IsAppImage() const {
- return app_image_;
+ return compiler_options_->IsAppImage();
}
bool IsBootImage() const {
- return boot_image_;
+ return compiler_options_->IsBootImage();
}
bool IsHost() const {
@@ -2575,8 +2574,6 @@
std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
std::unique_ptr<std::vector<std::string>> passes_to_run_;
- bool app_image_;
- bool boot_image_;
bool multi_image_;
bool is_host_;
std::string android_root_;
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index 296cdb6..0987df7 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -20,6 +20,7 @@
"dexlayout.cc",
"dex_ir.cc",
"dex_ir_builder.cc",
+ "dex_visualize.cc",
],
cflags: ["-Wall"],
shared_libs: [
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 6ae9f1c..f3d2c90 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -217,6 +217,17 @@
uint32_t MethodIdsSize() const { return method_ids_.Size(); }
uint32_t ClassDefsSize() const { return class_defs_.Size(); }
+ uint32_t StringDatasSize() const { return string_datas_.Size(); }
+ uint32_t TypeListsSize() const { return type_lists_.Size(); }
+ uint32_t EncodedArraySize() const { return encoded_array_items_.Size(); }
+ uint32_t AnnotationSize() const { return annotation_items_.Size(); }
+ uint32_t AnnotationSetSize() const { return annotation_set_items_.Size(); }
+ uint32_t AnnotationSetRefListsSize() const { return annotation_set_ref_lists_.Size(); }
+ uint32_t AnnotationsDirectorySize() const { return annotations_directory_items_.Size(); }
+ uint32_t DebugInfoSize() const { return debug_info_items_.Size(); }
+ uint32_t CodeItemsSize() const { return code_items_.Size(); }
+ uint32_t ClassDatasSize() const { return class_datas_.Size(); }
+
private:
EncodedValue* ReadEncodedValue(const uint8_t** data);
EncodedValue* ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length);
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index e6868d7..599f48b 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -24,6 +24,8 @@
namespace art {
namespace dex_ir {
+static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* collections);
+
Header* DexIrBuilder(const DexFile& dex_file) {
const DexFile::Header& disk_header = dex_file.GetHeader();
Header* header = new Header(disk_header.magic_,
@@ -69,8 +71,87 @@
collections.CreateClassDef(dex_file, i);
}
+ CheckAndSetRemainingOffsets(dex_file, &collections);
+
return header;
}
+static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* collections) {
+ const DexFile::Header& disk_header = dex_file.GetHeader();
+ // Read MapItems and validate/set remaining offsets.
+ const DexFile::MapList* map =
+ reinterpret_cast<const DexFile::MapList*>(dex_file.Begin() + disk_header.map_off_);
+ const uint32_t count = map->size_;
+ for (uint32_t i = 0; i < count; ++i) {
+ const DexFile::MapItem* item = map->list_ + i;
+ switch (item->type_) {
+ case DexFile::kDexTypeHeaderItem:
+ CHECK_EQ(item->size_, 1u);
+ CHECK_EQ(item->offset_, 0u);
+ break;
+ case DexFile::kDexTypeStringIdItem:
+ CHECK_EQ(item->size_, collections->StringIdsSize());
+ CHECK_EQ(item->offset_, collections->StringIdsOffset());
+ break;
+ case DexFile::kDexTypeTypeIdItem:
+ CHECK_EQ(item->size_, collections->TypeIdsSize());
+ CHECK_EQ(item->offset_, collections->TypeIdsOffset());
+ break;
+ case DexFile::kDexTypeProtoIdItem:
+ CHECK_EQ(item->size_, collections->ProtoIdsSize());
+ CHECK_EQ(item->offset_, collections->ProtoIdsOffset());
+ break;
+ case DexFile::kDexTypeFieldIdItem:
+ CHECK_EQ(item->size_, collections->FieldIdsSize());
+ CHECK_EQ(item->offset_, collections->FieldIdsOffset());
+ break;
+ case DexFile::kDexTypeMethodIdItem:
+ CHECK_EQ(item->size_, collections->MethodIdsSize());
+ CHECK_EQ(item->offset_, collections->MethodIdsOffset());
+ break;
+ case DexFile::kDexTypeClassDefItem:
+ CHECK_EQ(item->size_, collections->ClassDefsSize());
+ CHECK_EQ(item->offset_, collections->ClassDefsOffset());
+ break;
+ case DexFile::kDexTypeMapList:
+ CHECK_EQ(item->size_, 1u);
+ CHECK_EQ(item->offset_, disk_header.map_off_);
+ break;
+ case DexFile::kDexTypeTypeList:
+ collections->SetTypeListsOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeAnnotationSetRefList:
+ collections->SetAnnotationSetRefListsOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeAnnotationSetItem:
+ collections->SetAnnotationSetOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeClassDataItem:
+ collections->SetClassDatasOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeCodeItem:
+ collections->SetCodeItemsOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeStringDataItem:
+ collections->SetStringDatasOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeDebugInfoItem:
+ collections->SetDebugInfoOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeAnnotationItem:
+ collections->SetAnnotationOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeEncodedArrayItem:
+ collections->SetEncodedArrayOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeAnnotationsDirectoryItem:
+ collections->SetAnnotationsDirectoryOffset(item->offset_);
+ break;
+ default:
+ LOG(ERROR) << "Unknown map list item type.";
+ }
+ }
+}
+
} // namespace dex_ir
} // namespace art
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
new file mode 100644
index 0000000..be7bade
--- /dev/null
+++ b/dexlayout/dex_visualize.cc
@@ -0,0 +1,377 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Implementation file of the dex layout visualization.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#include "dex_visualize.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "dex_ir.h"
+#include "dexlayout.h"
+#include "jit/offline_profiling_info.h"
+
+namespace art {
+
+struct FileSection {
+ public:
+ std::string name_;
+ uint16_t type_;
+ std::function<uint32_t(const dex_ir::Collections&)> size_fn_;
+ std::function<uint32_t(const dex_ir::Collections&)> offset_fn_;
+};
+
+static const std::vector<FileSection> kFileSections = {
+ {
+ "StringId",
+ DexFile::kDexTypeStringIdItem,
+ &dex_ir::Collections::StringIdsSize,
+ &dex_ir::Collections::StringIdsOffset
+ }, {
+ "TypeId",
+ DexFile::kDexTypeTypeIdItem,
+ &dex_ir::Collections::TypeIdsSize,
+ &dex_ir::Collections::TypeIdsOffset
+ }, {
+ "ProtoId",
+ DexFile::kDexTypeProtoIdItem,
+ &dex_ir::Collections::ProtoIdsSize,
+ &dex_ir::Collections::ProtoIdsOffset
+ }, {
+ "FieldId",
+ DexFile::kDexTypeFieldIdItem,
+ &dex_ir::Collections::FieldIdsSize,
+ &dex_ir::Collections::FieldIdsOffset
+ }, {
+ "MethodId",
+ DexFile::kDexTypeMethodIdItem,
+ &dex_ir::Collections::MethodIdsSize,
+ &dex_ir::Collections::MethodIdsOffset
+ }, {
+ "ClassDef",
+ DexFile::kDexTypeClassDefItem,
+ &dex_ir::Collections::ClassDefsSize,
+ &dex_ir::Collections::ClassDefsOffset
+ }, {
+ "StringData",
+ DexFile::kDexTypeStringDataItem,
+ &dex_ir::Collections::StringDatasSize,
+ &dex_ir::Collections::StringDatasOffset
+ }, {
+ "TypeList",
+ DexFile::kDexTypeTypeList,
+ &dex_ir::Collections::TypeListsSize,
+ &dex_ir::Collections::TypeListsOffset
+ }, {
+ "EncArr",
+ DexFile::kDexTypeEncodedArrayItem,
+ &dex_ir::Collections::EncodedArraySize,
+ &dex_ir::Collections::EncodedArrayOffset
+ }, {
+ "Annotation",
+ DexFile::kDexTypeAnnotationItem,
+ &dex_ir::Collections::AnnotationSize,
+ &dex_ir::Collections::AnnotationOffset
+ }, {
+ "AnnoSet",
+ DexFile::kDexTypeAnnotationSetItem,
+ &dex_ir::Collections::AnnotationSetSize,
+ &dex_ir::Collections::AnnotationSetOffset
+ }, {
+ "AnnoSetRL",
+ DexFile::kDexTypeAnnotationSetRefList,
+ &dex_ir::Collections::AnnotationSetRefListsSize,
+ &dex_ir::Collections::AnnotationSetRefListsOffset
+ }, {
+ "AnnoDir",
+ DexFile::kDexTypeAnnotationsDirectoryItem,
+ &dex_ir::Collections::AnnotationsDirectorySize,
+ &dex_ir::Collections::AnnotationsDirectoryOffset
+ }, {
+ "DebugInfo",
+ DexFile::kDexTypeDebugInfoItem,
+ &dex_ir::Collections::DebugInfoSize,
+ &dex_ir::Collections::DebugInfoOffset
+ }, {
+ "CodeItem",
+ DexFile::kDexTypeCodeItem,
+ &dex_ir::Collections::CodeItemsSize,
+ &dex_ir::Collections::CodeItemsOffset
+ }, {
+ "ClassData",
+ DexFile::kDexTypeClassDataItem,
+ &dex_ir::Collections::ClassDatasSize,
+ &dex_ir::Collections::ClassDatasOffset
+ }
+};
+
+class Dumper {
+ public:
+ // Colors are based on the type of the section in MapList.
+ Dumper(const dex_ir::Collections& collections, size_t dex_file_index) {
+ // Build the table that will map from offset to color
+ table_.emplace_back(DexFile::kDexTypeHeaderItem, 0u);
+ for (const FileSection& s : kFileSections) {
+ table_.emplace_back(s.type_, s.offset_fn_(collections));
+ }
+ // Sort into descending order by offset.
+ std::sort(table_.begin(),
+ table_.end(),
+ [](const SectionColor& a, const SectionColor& b) { return a.offset_ > b.offset_; });
+ // Open the file and emit the gnuplot prologue.
+ std::string dex_file_name("classes");
+ std::string out_file_base_name("layout");
+ if (dex_file_index > 0) {
+ out_file_base_name += std::to_string(dex_file_index + 1);
+ dex_file_name += std::to_string(dex_file_index + 1);
+ }
+ dex_file_name += ".dex";
+ std::string out_file_name(out_file_base_name + ".gnuplot");
+ std::string png_file_name(out_file_base_name + ".png");
+ out_file_ = fopen(out_file_name.c_str(), "w");
+ fprintf(out_file_, "set terminal png size 1920,1080\n");
+ fprintf(out_file_, "set output \"%s\"\n", png_file_name.c_str());
+ fprintf(out_file_, "set title \"%s\"\n", dex_file_name.c_str());
+ fprintf(out_file_, "set xlabel \"Page offset into dex\"\n");
+ fprintf(out_file_, "set ylabel \"ClassDef index\"\n");
+ fprintf(out_file_, "set xtics rotate out (");
+ fprintf(out_file_, "\"Header\" %d, ", 0);
+ bool printed_one = false;
+ for (const FileSection& s : kFileSections) {
+ if (s.size_fn_(collections) > 0) {
+ if (printed_one) {
+ fprintf(out_file_, ", ");
+ }
+ fprintf(out_file_, "\"%s\" %d", s.name_.c_str(), s.offset_fn_(collections) / kPageSize);
+ printed_one = true;
+ }
+ }
+ fprintf(out_file_, ")\n");
+ fprintf(out_file_,
+ "plot \"-\" using 1:2:3:4:5 with vector nohead linewidth 1 lc variable notitle\n");
+ }
+
+ int GetColor(uint32_t offset) const {
+ // The dread linear search to find the right section for the reference.
+ uint16_t section = 0;
+ for (uint16_t i = 0; i < table_.size(); ++i) {
+ if (table_[i].offset_ < offset) {
+ section = table_[i].type_;
+ break;
+ }
+ }
+ // And a lookup table from type to color.
+ ColorMapType::const_iterator iter = kColorMap.find(section);
+ if (iter != kColorMap.end()) {
+ return iter->second;
+ }
+ return 0;
+ }
+
+ void DumpAddressRange(uint32_t from, uint32_t size, int class_index) {
+ const uint32_t low_page = from / kPageSize;
+ const uint32_t high_page = (size > 0) ? (from + size - 1) / kPageSize : low_page;
+ const uint32_t size_delta = high_page - low_page;
+ fprintf(out_file_, "%d %d %d 0 %d\n", low_page, class_index, size_delta, GetColor(from));
+ }
+
+ void DumpAddressRange(const dex_ir::Item* item, int class_index) {
+ if (item != nullptr) {
+ DumpAddressRange(item->GetOffset(), item->GetSize(), class_index);
+ }
+ }
+
+ void DumpStringData(const dex_ir::StringData* string_data, int class_index) {
+ DumpAddressRange(string_data, class_index);
+ }
+
+ void DumpStringId(const dex_ir::StringId* string_id, int class_index) {
+ DumpAddressRange(string_id, class_index);
+ if (string_id == nullptr) {
+ return;
+ }
+ DumpStringData(string_id->DataItem(), class_index);
+ }
+
+ void DumpTypeId(const dex_ir::TypeId* type_id, int class_index) {
+ DumpAddressRange(type_id, class_index);
+ DumpStringId(type_id->GetStringId(), class_index);
+ }
+
+ void DumpFieldId(const dex_ir::FieldId* field_id, int class_index) {
+ DumpAddressRange(field_id, class_index);
+ if (field_id == nullptr) {
+ return;
+ }
+ DumpTypeId(field_id->Class(), class_index);
+ DumpTypeId(field_id->Type(), class_index);
+ DumpStringId(field_id->Name(), class_index);
+ }
+
+ void DumpFieldItem(const dex_ir::FieldItem* field, int class_index) {
+ DumpAddressRange(field, class_index);
+ if (field == nullptr) {
+ return;
+ }
+ DumpFieldId(field->GetFieldId(), class_index);
+ }
+
+ void DumpProtoId(const dex_ir::ProtoId* proto_id, int class_index) {
+ DumpAddressRange(proto_id, class_index);
+ if (proto_id == nullptr) {
+ return;
+ }
+ DumpStringId(proto_id->Shorty(), class_index);
+ const dex_ir::TypeIdVector& parameters = proto_id->Parameters();
+ for (const dex_ir::TypeId* t : parameters) {
+ DumpTypeId(t, class_index);
+ }
+ DumpTypeId(proto_id->ReturnType(), class_index);
+ }
+
+ void DumpMethodId(const dex_ir::MethodId* method_id, int class_index) {
+ DumpAddressRange(method_id, class_index);
+ if (method_id == nullptr) {
+ return;
+ }
+ DumpTypeId(method_id->Class(), class_index);
+ DumpProtoId(method_id->Proto(), class_index);
+ DumpStringId(method_id->Name(), class_index);
+ }
+
+ void DumpMethodItem(const dex_ir::MethodItem* method, const DexFile* dex_file, int class_index) {
+ if (profile_info_ != nullptr) {
+ uint32_t method_idx = method->GetMethodId()->GetIndex();
+ MethodReference mr(dex_file, method_idx);
+ if (!profile_info_->ContainsMethod(mr)) {
+ return;
+ }
+ }
+ DumpAddressRange(method, class_index);
+ if (method == nullptr) {
+ return;
+ }
+ DumpMethodId(method->GetMethodId(), class_index);
+ const dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr) {
+ DumpAddressRange(code_item, class_index);
+ }
+ }
+
+ ~Dumper() {
+ fclose(out_file_);
+ }
+
+ private:
+ struct SectionColor {
+ public:
+ SectionColor(uint16_t type, uint32_t offset) : type_(type), offset_(offset) { }
+ uint16_t type_;
+ uint32_t offset_;
+ };
+
+ using ColorMapType = std::map<uint16_t, int>;
+ const ColorMapType kColorMap = {
+ { DexFile::kDexTypeHeaderItem, 1 },
+ { DexFile::kDexTypeStringIdItem, 2 },
+ { DexFile::kDexTypeTypeIdItem, 3 },
+ { DexFile::kDexTypeProtoIdItem, 4 },
+ { DexFile::kDexTypeFieldIdItem, 5 },
+ { DexFile::kDexTypeMethodIdItem, 6 },
+ { DexFile::kDexTypeClassDefItem, 7 },
+ { DexFile::kDexTypeTypeList, 8 },
+ { DexFile::kDexTypeAnnotationSetRefList, 9 },
+ { DexFile::kDexTypeAnnotationSetItem, 10 },
+ { DexFile::kDexTypeClassDataItem, 11 },
+ { DexFile::kDexTypeCodeItem, 12 },
+ { DexFile::kDexTypeStringDataItem, 13 },
+ { DexFile::kDexTypeDebugInfoItem, 14 },
+ { DexFile::kDexTypeAnnotationItem, 15 },
+ { DexFile::kDexTypeEncodedArrayItem, 16 },
+ { DexFile::kDexTypeAnnotationsDirectoryItem, 16 }
+ };
+
+ std::vector<SectionColor> table_;
+ FILE* out_file_;
+
+ DISALLOW_COPY_AND_ASSIGN(Dumper);
+};
+
+/*
+ * Dumps a gnuplot data file showing the parts of the dex_file that belong to each class.
+ * If profiling information is present, it dumps only those classes that are marked as hot.
+ */
+void VisualizeDexLayout(dex_ir::Header* header, const DexFile* dex_file, size_t dex_file_index) {
+ std::unique_ptr<Dumper> dumper(new Dumper(header->GetCollections(), dex_file_index));
+
+ const uint32_t class_defs_size = header->GetCollections().ClassDefsSize();
+ for (uint32_t class_index = 0; class_index < class_defs_size; class_index++) {
+ dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(class_index);
+ if (profile_info_ != nullptr && !profile_info_->ContainsClass(*dex_file, class_index)) {
+ continue;
+ }
+ dumper->DumpAddressRange(class_def, class_index);
+ // Type id.
+ dumper->DumpTypeId(class_def->ClassType(), class_index);
+ // Superclass type id.
+ dumper->DumpTypeId(class_def->Superclass(), class_index);
+ // Interfaces.
+ // TODO(jeffhao): get TypeList from class_def to use Item interface.
+ static constexpr uint32_t kInterfaceSizeKludge = 8;
+ dumper->DumpAddressRange(class_def->InterfacesOffset(), kInterfaceSizeKludge, class_index);
+ // Source file info.
+ dumper->DumpStringId(class_def->SourceFile(), class_index);
+ // Annotations.
+ dumper->DumpAddressRange(class_def->Annotations(), class_index);
+ // TODO(sehr): walk the annotations and dump them.
+ // Class data.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data != nullptr) {
+ dumper->DumpAddressRange(class_data, class_index);
+ if (class_data->StaticFields()) {
+ for (auto& field_item : *class_data->StaticFields()) {
+ dumper->DumpFieldItem(field_item.get(), class_index);
+ }
+ }
+ if (class_data->InstanceFields()) {
+ for (auto& field_item : *class_data->InstanceFields()) {
+ dumper->DumpFieldItem(field_item.get(), class_index);
+ }
+ }
+ if (class_data->DirectMethods()) {
+ for (auto& method_item : *class_data->DirectMethods()) {
+ dumper->DumpMethodItem(method_item.get(), dex_file, class_index);
+ }
+ }
+ if (class_data->VirtualMethods()) {
+ for (auto& method_item : *class_data->VirtualMethods()) {
+ dumper->DumpMethodItem(method_item.get(), dex_file, class_index);
+ }
+ }
+ }
+ } // for
+}
+
+} // namespace art
diff --git a/dexlayout/dex_visualize.h b/dexlayout/dex_visualize.h
new file mode 100644
index 0000000..b1d2ed7
--- /dev/null
+++ b/dexlayout/dex_visualize.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#ifndef ART_DEXLAYOUT_DEX_VISUALIZE_H_
+#define ART_DEXLAYOUT_DEX_VISUALIZE_H_
+
+#include <stddef.h>
+
+namespace art {
+
+class DexFile;
+namespace dex_ir {
+class Header;
+} // namespace dex_ir
+
+void VisualizeDexLayout(dex_ir::Header* header, const DexFile* dex_file, size_t dex_file_index);
+
+} // namespace art
+
+#endif // ART_DEXLAYOUT_DEX_VISUALIZE_H_
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 6f34a33..e614137 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -34,6 +34,8 @@
#include "dex_ir_builder.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
+#include "dex_visualize.h"
+#include "jit/offline_profiling_info.h"
#include "os.h"
#include "utils.h"
@@ -50,6 +52,11 @@
FILE* out_file_ = stdout;
/*
+ * Profile information file.
+ */
+ProfileCompilationInfo* profile_info_ = nullptr;
+
+/*
* Flags for use with createAccessFlagStr().
*/
enum AccessFor {
@@ -1587,13 +1594,18 @@
/*
* Dumps the requested sections of the file.
*/
-static void ProcessDexFile(const char* file_name, const DexFile* dex_file) {
+static void ProcessDexFile(const char* file_name, const DexFile* dex_file, size_t dex_file_index) {
if (options_.verbose_) {
fprintf(out_file_, "Opened '%s', DEX version '%.3s'\n",
file_name, dex_file->GetHeader().magic_ + 4);
}
std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file));
+ if (options_.visualize_pattern_) {
+ VisualizeDexLayout(header.get(), dex_file, dex_file_index);
+ return;
+ }
+
// Headers.
if (options_.show_file_headers_) {
DumpFileHeader(header.get());
@@ -1658,7 +1670,7 @@
fprintf(out_file_, "Checksum verified\n");
} else {
for (size_t i = 0; i < dex_files.size(); i++) {
- ProcessDexFile(file_name, dex_files[i].get());
+ ProcessDexFile(file_name, dex_files[i].get(), i);
}
}
return 0;
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 736d230..c4892d2 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -28,6 +28,8 @@
namespace art {
+class ProfileCompilationInfo;
+
/* Supported output formats. */
enum OutputFormat {
kOutputPlain = 0, // default
@@ -47,13 +49,16 @@
bool show_file_headers_;
bool show_section_headers_;
bool verbose_;
+ bool visualize_pattern_;
OutputFormat output_format_;
const char* output_file_name_;
+ const char* profile_file_name_;
};
/* Prototypes. */
extern struct Options options_;
extern FILE* out_file_;
+extern ProfileCompilationInfo* profile_info_;
int ProcessFile(const char* file_name);
} // namespace art
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index ec5edf4..f385b09 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -25,8 +25,12 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include "base/logging.h"
+#include "jit/offline_profiling_info.h"
#include "mem_map.h"
namespace art {
@@ -37,9 +41,9 @@
* Shows usage.
*/
static void Usage(void) {
- fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n");
- fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-w]"
- " dexfile...\n\n", kProgramName);
+ fprintf(stderr, "Copyright (C) 2016 The Android Open Source Project\n\n");
+ fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]"
+ " [-s] [-w] dexfile...\n\n", kProgramName);
fprintf(stderr, " -a : display annotations\n");
fprintf(stderr, " -b : build dex_ir\n");
fprintf(stderr, " -c : verify checksum and exit\n");
@@ -51,6 +55,8 @@
fprintf(stderr, " -i : ignore checksum failures\n");
fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
fprintf(stderr, " -o : output file name (defaults to stdout)\n");
+ fprintf(stderr, " -p : profile file name (defaults to no profile)\n");
+ fprintf(stderr, " -s : visualize reference pattern\n");
fprintf(stderr, " -w : output dex files\n");
}
@@ -69,7 +75,7 @@
// Parse all arguments.
while (1) {
- const int ic = getopt(argc, argv, "abcdefghil:o:w");
+ const int ic = getopt(argc, argv, "abcdefghil:o:p:sw");
if (ic < 0) {
break; // done
}
@@ -114,6 +120,13 @@
case 'o': // output file
options_.output_file_name_ = optarg;
break;
+ case 'p': // profile file
+ options_.profile_file_name_ = optarg;
+ break;
+ case 's': // visualize access pattern
+ options_.visualize_pattern_ = true;
+ options_.verbose_ = false;
+ break;
case 'w': // output dex files
options_.output_dex_files_ = true;
break;
@@ -146,6 +159,20 @@
}
}
+ // Open profile file.
+ if (options_.profile_file_name_) {
+ int profile_fd = open(options_.profile_file_name_, O_RDONLY);
+ if (profile_fd < 0) {
+ fprintf(stderr, "Can't open %s\n", options_.profile_file_name_);
+ return 1;
+ }
+ profile_info_ = new ProfileCompilationInfo();
+ if (!profile_info_->Load(profile_fd)) {
+ fprintf(stderr, "Can't read profile info from %s\n", options_.profile_file_name_);
+ return 1;
+ }
+ }
+
// Process all files supplied on command line.
int result = 0;
while (optind < argc) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index be5224b..fe31176 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -155,8 +155,11 @@
if (isa == kMips || isa == kMips64) {
builder_->WriteMIPSabiflagsSection();
}
- builder_->PrepareDynamicSection(
- elf_file->GetPath(), rodata_size, text_size, oat_file_->BssSize());
+ builder_->PrepareDynamicSection(elf_file->GetPath(),
+ rodata_size,
+ text_size,
+ oat_file_->BssSize(),
+ oat_file_->BssRootsOffset());
builder_->WriteDynamicSection();
Walk();
@@ -1687,7 +1690,9 @@
ImageDumper* const image_dumper_;
};
- static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value)
+ static void PrettyObjectValue(std::ostream& os,
+ ObjPtr<mirror::Class> type,
+ ObjPtr<mirror::Object> value)
REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(type != nullptr);
if (value == nullptr) {
@@ -1700,11 +1705,11 @@
mirror::Class* klass = value->AsClass();
os << StringPrintf("%p Class: %s\n", klass, PrettyDescriptor(klass).c_str());
} else {
- os << StringPrintf("%p %s\n", value, PrettyDescriptor(type).c_str());
+ os << StringPrintf("%p %s\n", value.Decode(), PrettyDescriptor(type).c_str());
}
}
- static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj)
+ static void PrintField(std::ostream& os, ArtField* field, ObjPtr<mirror::Object> obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
os << StringPrintf("%s: ", field->GetName());
switch (field->GetTypeAsPrimitiveType()) {
@@ -1736,16 +1741,17 @@
case Primitive::kPrimNot: {
// Get the value, don't compute the type unless it is non-null as we don't want
// to cause class loading.
- mirror::Object* value = field->GetObj(obj);
+ ObjPtr<mirror::Object> value = field->GetObj(obj);
if (value == nullptr) {
os << StringPrintf("null %s\n", PrettyDescriptor(field->GetTypeDescriptor()).c_str());
} else {
// Grab the field type without causing resolution.
- mirror::Class* field_type = field->GetType<false>();
+ ObjPtr<mirror::Class> field_type = field->GetType<false>();
if (field_type != nullptr) {
PrettyObjectValue(os, field_type, value);
} else {
- os << StringPrintf("%p %s\n", value,
+ os << StringPrintf("%p %s\n",
+ value.Decode(),
PrettyDescriptor(field->GetTypeDescriptor()).c_str());
}
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index d58f38c..b259f64 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -504,7 +504,8 @@
void Visit(ArtField* field) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
- dest->SetDeclaringClass(patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass()));
+ dest->SetDeclaringClass(
+ patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Decode()));
}
private:
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 5e39f42..432ba36 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1688,7 +1688,7 @@
EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
- EXPECT_EQ(val, f->GetObj(trg));
+ EXPECT_OBJ_PTR_EQ(val, f->GetObj(trg));
}
#endif
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index ca96169..a52c714 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -34,15 +34,15 @@
namespace art {
template<ReadBarrierOption kReadBarrierOption>
-inline mirror::Class* ArtField::GetDeclaringClass() {
+inline ObjPtr<mirror::Class> ArtField::GetDeclaringClass() {
GcRootSource gc_root_source(this);
- mirror::Class* result = declaring_class_.Read<kReadBarrierOption>(&gc_root_source);
+ ObjPtr<mirror::Class> result = declaring_class_.Read<kReadBarrierOption>(&gc_root_source);
DCHECK(result != nullptr);
DCHECK(result->IsLoaded() || result->IsErroneous()) << result->GetStatus();
return result;
}
-inline void ArtField::SetDeclaringClass(mirror::Class* new_declaring_class) {
+inline void ArtField::SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class) {
declaring_class_ = GcRoot<mirror::Class>(new_declaring_class);
}
@@ -61,7 +61,7 @@
return MemberOffset(offset_);
}
-inline uint32_t ArtField::Get32(mirror::Object* object) {
+inline uint32_t ArtField::Get32(ObjPtr<mirror::Object> object) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -71,7 +71,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::Set32(mirror::Object* object, uint32_t new_value) {
+inline void ArtField::Set32(ObjPtr<mirror::Object> object, uint32_t new_value) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -81,7 +81,7 @@
}
}
-inline uint64_t ArtField::Get64(mirror::Object* object) {
+inline uint64_t ArtField::Get64(ObjPtr<mirror::Object> object) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -91,7 +91,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
+inline void ArtField::Set64(ObjPtr<mirror::Object> object, uint64_t new_value) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -101,23 +101,24 @@
}
}
-inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
+template<class MirrorType>
+inline ObjPtr<MirrorType> ArtField::GetObj(ObjPtr<mirror::Object> object) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
- return object->GetFieldObjectVolatile<mirror::Object>(GetOffset());
+ return object->GetFieldObjectVolatile<MirrorType>(GetOffset());
}
- return object->GetFieldObject<mirror::Object>(GetOffset());
+ return object->GetFieldObject<MirrorType>(GetOffset());
}
template<bool kTransactionActive>
-inline void ArtField::SetObj(mirror::Object* object, mirror::Object* new_value) {
+inline void ArtField::SetObj(ObjPtr<mirror::Object> object, ObjPtr<mirror::Object> new_value) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
- object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
+ object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value.Decode());
} else {
- object->SetFieldObject<kTransactionActive>(GetOffset(), new_value);
+ object->SetFieldObject<kTransactionActive>(GetOffset(), new_value.Decode());
}
}
@@ -140,46 +141,46 @@
(object)->SetField ## type<kTransactionActive>(GetOffset(), value); \
}
-inline uint8_t ArtField::GetBoolean(mirror::Object* object) {
+inline uint8_t ArtField::GetBoolean(ObjPtr<mirror::Object> object) {
FIELD_GET(object, Boolean);
}
template<bool kTransactionActive>
-inline void ArtField::SetBoolean(mirror::Object* object, uint8_t z) {
+inline void ArtField::SetBoolean(ObjPtr<mirror::Object> object, uint8_t z) {
FIELD_SET(object, Boolean, z);
}
-inline int8_t ArtField::GetByte(mirror::Object* object) {
+inline int8_t ArtField::GetByte(ObjPtr<mirror::Object> object) {
FIELD_GET(object, Byte);
}
template<bool kTransactionActive>
-inline void ArtField::SetByte(mirror::Object* object, int8_t b) {
+inline void ArtField::SetByte(ObjPtr<mirror::Object> object, int8_t b) {
FIELD_SET(object, Byte, b);
}
-inline uint16_t ArtField::GetChar(mirror::Object* object) {
+inline uint16_t ArtField::GetChar(ObjPtr<mirror::Object> object) {
FIELD_GET(object, Char);
}
template<bool kTransactionActive>
-inline void ArtField::SetChar(mirror::Object* object, uint16_t c) {
+inline void ArtField::SetChar(ObjPtr<mirror::Object> object, uint16_t c) {
FIELD_SET(object, Char, c);
}
-inline int16_t ArtField::GetShort(mirror::Object* object) {
+inline int16_t ArtField::GetShort(ObjPtr<mirror::Object> object) {
FIELD_GET(object, Short);
}
template<bool kTransactionActive>
-inline void ArtField::SetShort(mirror::Object* object, int16_t s) {
+inline void ArtField::SetShort(ObjPtr<mirror::Object> object, int16_t s) {
FIELD_SET(object, Short, s);
}
#undef FIELD_GET
#undef FIELD_SET
-inline int32_t ArtField::GetInt(mirror::Object* object) {
+inline int32_t ArtField::GetInt(ObjPtr<mirror::Object> object) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this);
@@ -188,7 +189,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetInt(mirror::Object* object, int32_t i) {
+inline void ArtField::SetInt(ObjPtr<mirror::Object> object, int32_t i) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this);
@@ -196,7 +197,7 @@
Set32<kTransactionActive>(object, i);
}
-inline int64_t ArtField::GetLong(mirror::Object* object) {
+inline int64_t ArtField::GetLong(ObjPtr<mirror::Object> object) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this);
@@ -205,7 +206,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetLong(mirror::Object* object, int64_t j) {
+inline void ArtField::SetLong(ObjPtr<mirror::Object> object, int64_t j) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this);
@@ -213,7 +214,7 @@
Set64<kTransactionActive>(object, j);
}
-inline float ArtField::GetFloat(mirror::Object* object) {
+inline float ArtField::GetFloat(ObjPtr<mirror::Object> object) {
DCHECK_EQ(Primitive::kPrimFloat, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetI(Get32(object));
@@ -221,14 +222,14 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetFloat(mirror::Object* object, float f) {
+inline void ArtField::SetFloat(ObjPtr<mirror::Object> object, float f) {
DCHECK_EQ(Primitive::kPrimFloat, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetF(f);
Set32<kTransactionActive>(object, bits.GetI());
}
-inline double ArtField::GetDouble(mirror::Object* object) {
+inline double ArtField::GetDouble(ObjPtr<mirror::Object> object) {
DCHECK_EQ(Primitive::kPrimDouble, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetJ(Get64(object));
@@ -236,20 +237,20 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetDouble(mirror::Object* object, double d) {
+inline void ArtField::SetDouble(ObjPtr<mirror::Object> object, double d) {
DCHECK_EQ(Primitive::kPrimDouble, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetD(d);
Set64<kTransactionActive>(object, bits.GetJ());
}
-inline mirror::Object* ArtField::GetObject(mirror::Object* object) {
+inline ObjPtr<mirror::Object> ArtField::GetObject(ObjPtr<mirror::Object> object) {
DCHECK_EQ(Primitive::kPrimNot, GetTypeAsPrimitiveType()) << PrettyField(this);
return GetObj(object);
}
template<bool kTransactionActive>
-inline void ArtField::SetObject(mirror::Object* object, mirror::Object* l) {
+inline void ArtField::SetObject(ObjPtr<mirror::Object> object, ObjPtr<mirror::Object> l) {
DCHECK_EQ(Primitive::kPrimNot, GetTypeAsPrimitiveType()) << PrettyField(this);
SetObj<kTransactionActive>(object, l);
}
@@ -288,16 +289,16 @@
}
template <bool kResolve>
-inline mirror::Class* ArtField::GetType() {
+inline ObjPtr<mirror::Class> ArtField::GetType() {
const uint32_t field_index = GetDexFieldIndex();
- auto* declaring_class = GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
if (UNLIKELY(declaring_class->IsProxyClass())) {
return ProxyFindSystemClass(GetTypeDescriptor());
}
auto* dex_cache = declaring_class->GetDexCache();
const DexFile* const dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
- mirror::Class* type = dex_cache->GetResolvedType(field_id.type_idx_);
+ ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(field_id.type_idx_);
if (kResolve && UNLIKELY(type == nullptr)) {
type = ResolveGetType(field_id.type_idx_);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
@@ -309,7 +310,7 @@
return Primitive::ComponentSize(GetTypeAsPrimitiveType());
}
-inline mirror::DexCache* ArtField::GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_) {
+inline ObjPtr<mirror::DexCache> ArtField::GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetDeclaringClass()->GetDexCache();
}
@@ -317,13 +318,13 @@
return GetDexCache()->GetDexFile();
}
-inline mirror::String* ArtField::GetStringName(Thread* self, bool resolve) {
+inline ObjPtr<mirror::String> ArtField::GetStringName(Thread* self, bool resolve) {
auto dex_field_index = GetDexFieldIndex();
CHECK_NE(dex_field_index, DexFile::kDexNoIndex);
- auto* dex_cache = GetDexCache();
+ ObjPtr<mirror::DexCache> dex_cache = GetDexCache();
const auto* dex_file = dex_cache->GetDexFile();
const auto& field_id = dex_file->GetFieldId(dex_field_index);
- auto* name = dex_cache->GetResolvedString(field_id.name_idx_);
+ ObjPtr<mirror::String> name = dex_cache->GetResolvedString(field_id.name_idx_);
if (resolve && name == nullptr) {
name = ResolveGetStringName(self, *dex_file, field_id.name_idx_, dex_cache);
}
@@ -337,8 +338,8 @@
template <typename Visitor>
inline void ArtField::UpdateObjects(const Visitor& visitor) {
- mirror::Class* old_class = DeclaringClassRoot().Read<kWithoutReadBarrier>();
- mirror::Class* new_class = visitor(old_class);
+ ObjPtr<mirror::Class> old_class = DeclaringClassRoot().Read<kWithoutReadBarrier>();
+ ObjPtr<mirror::Class> new_class = visitor(old_class.Decode());
if (old_class != new_class) {
SetDeclaringClass(new_class);
}
@@ -369,7 +370,7 @@
}
template <bool kExactOffset>
-inline ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass,
+inline ArtField* ArtField::FindInstanceFieldWithOffset(ObjPtr<mirror::Class> klass,
uint32_t field_offset) {
DCHECK(klass != nullptr);
ArtField* field = FindFieldWithOffset<kExactOffset>(klass->GetIFields(), field_offset);
@@ -382,7 +383,8 @@
}
template <bool kExactOffset>
-inline ArtField* ArtField::FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset) {
+inline ArtField* ArtField::FindStaticFieldWithOffset(ObjPtr<mirror::Class> klass,
+ uint32_t field_offset) {
DCHECK(klass != nullptr);
return FindFieldWithOffset<kExactOffset>(klass->GetSFields(), field_offset);
}
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 3b4db0b..78c62d6 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -30,10 +30,6 @@
namespace art {
-ArtField::ArtField() : access_flags_(0), field_dex_idx_(0), offset_(0) {
- declaring_class_ = GcRoot<mirror::Class>(nullptr);
-}
-
void ArtField::SetOffset(MemberOffset num_bytes) {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
@@ -47,20 +43,23 @@
offset_ = num_bytes.Uint32Value();
}
-mirror::Class* ArtField::ProxyFindSystemClass(const char* descriptor) {
+ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
DCHECK(GetDeclaringClass()->IsProxyClass());
return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor);
}
-mirror::Class* ArtField::ResolveGetType(uint32_t type_idx) {
+ObjPtr<mirror::Class> ArtField::ResolveGetType(uint32_t type_idx) {
return Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this);
}
-mirror::String* ArtField::ResolveGetStringName(Thread* self, const DexFile& dex_file,
- uint32_t string_idx, mirror::DexCache* dex_cache) {
+ObjPtr<mirror::String> ArtField::ResolveGetStringName(Thread* self,
+ const DexFile& dex_file,
+ uint32_t string_idx,
+ ObjPtr<mirror::DexCache> dex_cache) {
StackHandleScope<1> hs(self);
- return Runtime::Current()->GetClassLinker()->ResolveString(
- dex_file, string_idx, hs.NewHandle(dex_cache));
+ return Runtime::Current()->GetClassLinker()->ResolveString(dex_file,
+ string_idx,
+ hs.NewHandle(dex_cache));
}
} // namespace art
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 16e6c75..8ba383c 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -21,6 +21,7 @@
#include "gc_root.h"
#include "modifiers.h"
+#include "obj_ptr.h"
#include "offsets.h"
#include "primitive.h"
#include "read_barrier_option.h"
@@ -39,12 +40,10 @@
class ArtField FINAL {
public:
- ArtField();
-
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetDeclaringClass(mirror::Class *new_declaring_class)
+ void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -87,68 +86,72 @@
void SetOffset(MemberOffset num_bytes) REQUIRES_SHARED(Locks::mutator_lock_);
// field access, null object for static fields
- uint8_t GetBoolean(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint8_t GetBoolean(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetBoolean(mirror::Object* object, uint8_t z) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetBoolean(ObjPtr<mirror::Object> object, uint8_t z) REQUIRES_SHARED(Locks::mutator_lock_);
- int8_t GetByte(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ int8_t GetByte(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetByte(mirror::Object* object, int8_t b) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetByte(ObjPtr<mirror::Object> object, int8_t b) REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetChar(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint16_t GetChar(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetChar(mirror::Object* object, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetChar(ObjPtr<mirror::Object> object, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
- int16_t GetShort(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ int16_t GetShort(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetShort(mirror::Object* object, int16_t s) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetShort(ObjPtr<mirror::Object> object, int16_t s) REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetInt(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ int32_t GetInt(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetInt(mirror::Object* object, int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetInt(ObjPtr<mirror::Object> object, int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
- int64_t GetLong(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ int64_t GetLong(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetLong(mirror::Object* object, int64_t j) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetLong(ObjPtr<mirror::Object> object, int64_t j) REQUIRES_SHARED(Locks::mutator_lock_);
- float GetFloat(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ float GetFloat(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetFloat(mirror::Object* object, float f) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetFloat(ObjPtr<mirror::Object> object, float f) REQUIRES_SHARED(Locks::mutator_lock_);
- double GetDouble(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ double GetDouble(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetDouble(mirror::Object* object, double d) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetDouble(ObjPtr<mirror::Object> object, double d) REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Object> GetObject(ObjPtr<mirror::Object> object)
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetObject(mirror::Object* object, mirror::Object* l)
+ void SetObject(ObjPtr<mirror::Object> object, ObjPtr<mirror::Object> l)
REQUIRES_SHARED(Locks::mutator_lock_);
// Raw field accesses.
- uint32_t Get32(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t Get32(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void Set32(mirror::Object* object, uint32_t new_value)
+ void Set32(ObjPtr<mirror::Object> object, uint32_t new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- uint64_t Get64(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint64_t Get64(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void Set64(mirror::Object* object, uint64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Set64(ObjPtr<mirror::Object> object, uint64_t new_value)
+ REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetObj(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ template<class MirrorType = mirror::Object>
+ ObjPtr<MirrorType> GetObj(ObjPtr<mirror::Object> object)
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetObj(mirror::Object* object, mirror::Object* new_value)
+ void SetObj(ObjPtr<mirror::Object> object, ObjPtr<mirror::Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
@@ -163,20 +166,20 @@
// If kExactOffset is true then we only find the matching offset, not the field containing the
// offset.
template <bool kExactOffset = true>
- static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
+ static ArtField* FindInstanceFieldWithOffset(ObjPtr<mirror::Class> klass, uint32_t field_offset)
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a static field with this offset in the given class or null if not found.
// If kExactOffset is true then we only find the matching offset, not the field containing the
// offset.
template <bool kExactOffset = true>
- static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
+ static ArtField* FindStaticFieldWithOffset(ObjPtr<mirror::Class> klass, uint32_t field_offset)
REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetName() REQUIRES_SHARED(Locks::mutator_lock_);
// Resolves / returns the name from the dex cache.
- mirror::String* GetStringName(Thread* self, bool resolve)
+ ObjPtr<mirror::String> GetStringName(Thread* self, bool resolve)
REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -186,11 +189,11 @@
bool IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kResolve>
- mirror::Class* GetType() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Class> GetType() REQUIRES_SHARED(Locks::mutator_lock_);
size_t FieldSize() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::DexCache> GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -204,22 +207,24 @@
REQUIRES_SHARED(Locks::mutator_lock_);
private:
- mirror::Class* ProxyFindSystemClass(const char* descriptor)
+ ObjPtr<mirror::Class> ProxyFindSystemClass(const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Class* ResolveGetType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx,
- mirror::DexCache* dex_cache)
+ ObjPtr<mirror::Class> ResolveGetType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::String> ResolveGetStringName(Thread* self,
+ const DexFile& dex_file,
+ uint32_t string_idx,
+ ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
GcRoot<mirror::Class> declaring_class_;
- uint32_t access_flags_;
+ uint32_t access_flags_ = 0;
// Dex cache index of field id
- uint32_t field_dex_idx_;
+ uint32_t field_dex_idx_ = 0;
// Offset of field within an instance or in the Class' static fields
- uint32_t offset_;
+ uint32_t offset_ = 0;
};
} // namespace art
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 17873b5..08c036e 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -27,7 +27,7 @@
// Headers for LogMessage::LogLine.
#ifdef ART_TARGET_ANDROID
-#include "cutils/log.h"
+#include <android/log.h>
#include <android/set_abort_message.h>
#else
#include <sys/types.h>
@@ -64,7 +64,7 @@
#else
UNUSED(abort_message);
#endif
- Runtime::Abort();
+ Runtime::Abort(abort_message);
}
void InitLogging(char* argv[]) {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 9f07702..c671b81 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -274,7 +274,7 @@
AbortF("field operation on NULL object: %p", java_object);
return false;
}
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o.Decode())) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
@@ -334,7 +334,7 @@
}
if (invoke != kVirtual) {
ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(jc);
- if (!m->GetDeclaringClass()->IsAssignableFrom(c.Decode())) {
+ if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
AbortF("can't call %s %s with class %s", invoke == kStatic ? "static" : "nonvirtual",
PrettyMethod(m).c_str(), PrettyClass(c).c_str());
return false;
@@ -388,7 +388,7 @@
return false;
}
ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(java_class);
- if (!m->GetDeclaringClass()->IsAssignableFrom(c.Decode())) {
+ if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
AbortF("can't call static %s on class %s", PrettyMethod(m).c_str(), PrettyClass(c).c_str());
return false;
}
@@ -939,7 +939,7 @@
ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(jc);
if (c == nullptr) {
*msg += "NULL";
- } else if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c.Decode())) {
+ } else if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
StringAppendF(msg, "INVALID POINTER:%p", jc);
} else if (!c->IsClass()) {
*msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
@@ -1108,7 +1108,7 @@
}
ObjPtr<mirror::Array> a = soa.Decode<mirror::Array>(java_array);
- if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a.Decode()))) {
+ if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a))) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("jarray is an invalid %s: %p (%p)",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 51e5aae..378da57 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -63,6 +63,7 @@
}
inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx, ArtMethod* referrer) {
+ Thread::PoisonObjectPointersIfDebug();
mirror::Class* declaring_class = referrer->GetDeclaringClass();
// MethodVerifier refuses methods with string_idx out of bounds.
DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());;
@@ -70,7 +71,6 @@
mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
string_idx,
mirror::DexCache::kDexCacheStringCacheSize).Read();
- Thread::PoisonObjectPointersIfDebug();
if (UNLIKELY(string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
@@ -84,8 +84,8 @@
}
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtMethod* referrer) {
- mirror::Class* resolved_type = referrer->GetDexCacheResolvedType(type_idx, image_pointer_size_);
Thread::PoisonObjectPointersIfDebug();
+ mirror::Class* resolved_type = referrer->GetDexCacheResolvedType(type_idx, image_pointer_size_);
if (UNLIKELY(resolved_type == nullptr)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(Thread::Current());
@@ -100,10 +100,10 @@
}
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtField* referrer) {
- mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ Thread::PoisonObjectPointersIfDebug();
+ ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
- Thread::PoisonObjectPointersIfDebug();
if (UNLIKELY(resolved_type == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 7dea614..0d3c012 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1090,7 +1090,7 @@
}
static mirror::String* GetDexPathListElementName(ScopedObjectAccessUnchecked& soa,
- mirror::Object* element)
+ ObjPtr<mirror::Object> element)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
@@ -1100,11 +1100,11 @@
DCHECK(dex_file_name_field != nullptr);
DCHECK(element != nullptr);
CHECK_EQ(dex_file_field->GetDeclaringClass(), element->GetClass()) << PrettyTypeOf(element);
- mirror::Object* dex_file = dex_file_field->GetObject(element);
+ ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
if (dex_file == nullptr) {
return nullptr;
}
- mirror::Object* const name_object = dex_file_name_field->GetObject(dex_file);
+ ObjPtr<mirror::Object> name_object = dex_file_name_field->GetObject(dex_file);
if (name_object != nullptr) {
return name_object->AsString();
}
@@ -1131,28 +1131,28 @@
// Unsupported class loader.
return false;
}
- mirror::Object* dex_path_list = dex_path_list_field->GetObject(class_loader);
+ ObjPtr<mirror::Object> dex_path_list = dex_path_list_field->GetObject(class_loader);
if (dex_path_list != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
- mirror::Object* dex_elements_obj = dex_elements_field->GetObject(dex_path_list);
+ ObjPtr<mirror::Object> dex_elements_obj = dex_elements_field->GetObject(dex_path_list);
// Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
// at the mCookie which is a DexFile vector.
if (dex_elements_obj != nullptr) {
- mirror::ObjectArray<mirror::Object>* dex_elements =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> dex_elements =
dex_elements_obj->AsObjectArray<mirror::Object>();
// Reverse order since we insert the parent at the front.
for (int32_t i = dex_elements->GetLength() - 1; i >= 0; --i) {
- mirror::Object* const element = dex_elements->GetWithoutChecks(i);
+ ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i);
if (element == nullptr) {
*error_msg = StringPrintf("Null dex element at index %d", i);
return false;
}
- mirror::String* const name = GetDexPathListElementName(soa, element);
+ ObjPtr<mirror::String> const name = GetDexPathListElementName(soa, element);
if (name == nullptr) {
*error_msg = StringPrintf("Null name for dex element at index %d", i);
return false;
}
- out_dex_file_names->push_front(name);
+ out_dex_file_names->push_front(name.Decode());
}
}
}
@@ -2395,12 +2395,12 @@
ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
ArtField* const dex_file_field =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
- mirror::Object* dex_path_list =
+ ObjPtr<mirror::Object> dex_path_list =
soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
GetObject(class_loader.Get());
if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
- mirror::Object* dex_elements_obj =
+ ObjPtr<mirror::Object> dex_elements_obj =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
GetObject(dex_path_list);
// Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
@@ -2409,14 +2409,14 @@
Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
- mirror::Object* element = dex_elements->GetWithoutChecks(i);
+ ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i);
if (element == nullptr) {
// Should never happen, fall back to java code to throw a NPE.
break;
}
- mirror::Object* dex_file = dex_file_field->GetObject(element);
+ ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
if (dex_file != nullptr) {
- mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
+ ObjPtr<mirror::LongArray> long_array = cookie_field->GetObject(dex_file)->AsLongArray();
if (long_array == nullptr) {
// This should never happen so log a warning.
LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
@@ -7638,6 +7638,32 @@
return string;
}
+ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(const DexFile& dex_file,
+ uint16_t type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
+ if (type == nullptr) {
+ const char* descriptor = dex_file.StringByTypeIdx(type_idx);
+ DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
+ if (descriptor[1] == '\0') {
+ // only the descriptors of primitive types should be 1 character long, also avoid class lookup
+ // for primitive classes that aren't backed by dex files.
+ type = FindPrimitiveClass(descriptor[0]);
+ } else {
+ Thread* const self = Thread::Current();
+ DCHECK(self != nullptr);
+ const size_t hash = ComputeModifiedUtf8Hash(descriptor);
+ // Find the class in the loaded classes table.
+ type = LookupClass(self, descriptor, hash, class_loader.Decode());
+ }
+ }
+ if (type != nullptr || type->IsResolved()) {
+ return type;
+ }
+ return nullptr;
+}
+
mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
uint16_t type_idx,
mirror::Class* referrer) {
@@ -8256,16 +8282,18 @@
ScopedObjectAccessUnchecked soa(self);
// For now, create a libcore-level DexFile for each ART DexFile. This "explodes" multidex.
- StackHandleScope<10> hs(self);
+ StackHandleScope<11> hs(self);
ArtField* dex_elements_field =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
- mirror::Class* dex_elements_class = dex_elements_field->GetType<true>();
- DCHECK(dex_elements_class != nullptr);
+ Handle<mirror::Class> dex_elements_class(hs.NewHandle(dex_elements_field->GetType<true>()));
+ DCHECK(dex_elements_class.Get() != nullptr);
DCHECK(dex_elements_class->IsArrayClass());
Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements(hs.NewHandle(
- mirror::ObjectArray<mirror::Object>::Alloc(self, dex_elements_class, dex_files.size())));
+ mirror::ObjectArray<mirror::Object>::Alloc(self,
+ dex_elements_class.Get(),
+ dex_files.size())));
Handle<mirror::Class> h_dex_element_class =
hs.NewHandle(dex_elements_class->GetComponentType());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index df7fb61..f69a576 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -278,6 +278,14 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ // Look up a resolved type with the given ID from the DexFile. The ClassLoader is used to search
+ // for the type, since it may be referenced from but not contained within the given DexFile.
+ ObjPtr<mirror::Class> LookupResolvedType(const DexFile& dex_file,
+ uint16_t type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Resolve a type with the given ID from the DexFile, storing the
// result in DexCache. The ClassLoader is used to search for the
// type, since it may be referenced from but not contained within
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 451b752..4a926e7 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -243,10 +243,10 @@
kRuntimePointerSize));
}
- void AssertField(mirror::Class* klass, ArtField* field)
+ void AssertField(ObjPtr<mirror::Class> klass, ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_) {
EXPECT_TRUE(field != nullptr);
- EXPECT_EQ(klass, field->GetDeclaringClass());
+ EXPECT_OBJ_PTR_EQ(klass, field->GetDeclaringClass());
EXPECT_TRUE(field->GetName() != nullptr);
EXPECT_TRUE(field->GetType<true>() != nullptr);
}
@@ -358,7 +358,7 @@
MemberOffset current_ref_offset = start_ref_offset;
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
ArtField* field = klass->GetInstanceField(i);
- mirror::Class* field_type = field->GetType<true>();
+ ObjPtr<mirror::Class> field_type = field->GetType<true>();
ASSERT_TRUE(field_type != nullptr);
if (!field->IsPrimitiveType()) {
ASSERT_TRUE(!field_type->IsPrimitive());
@@ -865,6 +865,28 @@
AssertNonExistentClass("[[[[LNonExistentClass;");
}
+TEST_F(ClassLinkerTest, LookupResolvedType) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(LoadDex("MyClass"))));
+ AssertNonExistentClass("LMyClass;");
+ ObjPtr<mirror::Class> klass = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader);
+ uint32_t type_idx = klass->GetClassDef()->class_idx_;
+ ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
+ const DexFile& dex_file = klass->GetDexFile();
+ EXPECT_EQ(dex_cache->GetResolvedType(type_idx), klass.Decode());
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
+ klass);
+ // Zero out the resolved type and make sure LookupResolvedType still finds it.
+ dex_cache->SetResolvedType(type_idx, nullptr);
+ EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
+ klass);
+}
+
TEST_F(ClassLinkerTest, LibCore) {
ScopedObjectAccess soa(Thread::Current());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
@@ -1018,8 +1040,8 @@
"Ljava/lang/String;");
EXPECT_EQ(s8->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
EXPECT_TRUE(s8->GetObject(statics.Get())->AsString()->Equals("android"));
- s8->SetObject<false>(s8->GetDeclaringClass(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "robot"));
+ mirror::String* str_value = mirror::String::AllocFromModifiedUtf8(soa.Self(), "robot");
+ s8->SetObject<false>(s8->GetDeclaringClass(), str_value);
// TODO: Remove EXPECT_FALSE when GCC can handle EXPECT_EQ
// http://code.google.com/p/googletest/issues/detail?id=322
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index d52365d..3e54a64 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CLASS_TABLE_INL_H_
#include "class_table.h"
+#include "oat_file.h"
namespace art {
@@ -32,6 +33,11 @@
for (GcRoot<mirror::Object>& root : strong_roots_) {
visitor.VisitRoot(root.AddressWithoutBarrier());
}
+ for (const OatFile* oat_file : oat_files_) {
+ for (GcRoot<mirror::Object>& root : oat_file->GetBssGcRoots()) {
+ visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
+ }
+ }
}
template<class Visitor>
@@ -45,6 +51,11 @@
for (GcRoot<mirror::Object>& root : strong_roots_) {
visitor.VisitRoot(root.AddressWithoutBarrier());
}
+ for (const OatFile* oat_file : oat_files_) {
+ for (GcRoot<mirror::Object>& root : oat_file->GetBssGcRoots()) {
+ visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
+ }
+ }
}
template <typename Visitor>
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 0600876..2ae7e8c 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -165,6 +165,16 @@
}
}
strong_roots_.push_back(GcRoot<mirror::Object>(obj));
+ // If `obj` is a dex cache associated with a new oat file with GC roots, add it to oat_files_.
+ if (obj->IsDexCache()) {
+ const DexFile* dex_file = down_cast<mirror::DexCache*>(obj)->GetDexFile();
+ if (dex_file != nullptr && dex_file->GetOatDexFile() != nullptr) {
+ const OatFile* oat_file = dex_file->GetOatDexFile()->GetOatFile();
+ if (!oat_file->GetBssGcRoots().empty() && !ContainsElement(oat_files_, oat_file)) {
+ oat_files_.push_back(oat_file);
+ }
+ }
+ }
return true;
}
@@ -201,6 +211,7 @@
void ClassTable::ClearStrongRoots() {
WriterMutexLock mu(Thread::Current(), lock_);
+ oat_files_.clear();
strong_roots_.clear();
}
} // namespace art
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 8c91806..acb15c7 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -32,6 +32,8 @@
namespace art {
+class OatFile;
+
namespace mirror {
class ClassLoader;
} // namespace mirror
@@ -173,6 +175,8 @@
// loader which may not be owned by the class loader must be held strongly live. Also dex caches
// are held live to prevent them being unloading once they have classes in them.
std::vector<GcRoot<mirror::Object>> strong_roots_ GUARDED_BY(lock_);
+ // Keep track of oat files with GC roots associated with dex caches in `strong_roots_`.
+ std::vector<const OatFile*> oat_files_ GUARDED_BY(lock_);
friend class ImageWriter; // for InsertWithoutLocks.
};
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index eda1ddd..ea07195 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -523,12 +523,12 @@
ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
ArtField* dex_file_field =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
- mirror::Object* dex_path_list =
+ ObjPtr<mirror::Object> dex_path_list =
soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
GetObject(class_loader.Get());
if (dex_path_list != nullptr && dex_file_field!= nullptr && cookie_field != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
- mirror::Object* dex_elements_obj =
+ ObjPtr<mirror::Object> dex_elements_obj =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
GetObject(dex_path_list);
// Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
@@ -537,14 +537,14 @@
Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
- mirror::Object* element = dex_elements->GetWithoutChecks(i);
+ ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i);
if (element == nullptr) {
// Should never happen, fall back to java code to throw a NPE.
break;
}
- mirror::Object* dex_file = dex_file_field->GetObject(element);
+ ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
if (dex_file != nullptr) {
- mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
+ ObjPtr<mirror::LongArray> long_array = cookie_field->GetObject(dex_file)->AsLongArray();
DCHECK(long_array != nullptr);
int32_t long_array_size = long_array->GetLength();
for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index a7948e4..2158d81 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -25,10 +25,18 @@
#include "arch/instruction_set.h"
#include "base/mutex.h"
#include "globals.h"
+// TODO: Add inl file and avoid including inl.
+#include "obj_ptr-inl.h"
#include "os.h"
namespace art {
+// OBJ pointer helpers to avoid needing .Decode everywhere.
+#define EXPECT_OBJ_PTR_EQ(a, b) EXPECT_EQ(MakeObjPtr(a).Decode(), MakeObjPtr(b).Decode());
+#define ASSERT_OBJ_PTR_EQ(a, b) ASSERT_EQ(MakeObjPtr(a).Decode(), MakeObjPtr(b).Decode());
+#define EXPECT_OBJ_PTR_NE(a, b) EXPECT_NE(MakeObjPtr(a).Decode(), MakeObjPtr(b).Decode());
+#define ASSERT_OBJ_PTR_NE(a, b) ASSERT_NE(MakeObjPtr(a).Decode(), MakeObjPtr(b).Decode());
+
class ClassLinker;
class CompilerCallbacks;
class DexFile;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 0206cae..1bdb0fc 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -46,6 +46,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
+#include "obj_ptr-inl.h"
#include "reflection.h"
#include "safe_map.h"
#include "scoped_thread_state_change-inl.h"
@@ -985,7 +986,7 @@
gRegistry->DisposeObject(object_id, reference_count);
}
-JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
+JDWP::JdwpTypeTag Dbg::GetTypeTag(ObjPtr<mirror::Class> klass) {
DCHECK(klass != nullptr);
if (klass->IsArrayClass()) {
return JDWP::TT_ARRAY;
@@ -1367,12 +1368,12 @@
return m == event_location.method;
}
-bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
+bool Dbg::MatchType(ObjPtr<mirror::Class> event_class, JDWP::RefTypeId class_id) {
if (event_class == nullptr) {
return false;
}
JDWP::JdwpError error;
- mirror::Class* expected_class = DecodeClass(class_id, &error);
+ ObjPtr<mirror::Class> expected_class = DecodeClass(class_id, &error);
CHECK(expected_class != nullptr);
return expected_class->IsAssignableFrom(event_class);
}
@@ -1742,7 +1743,7 @@
return field_value;
case Primitive::kPrimNot:
- field_value.SetL(f->GetObject(o));
+ field_value.SetL(f->GetObject(o).Decode());
return field_value;
case Primitive::kPrimVoid:
@@ -1868,7 +1869,7 @@
return JDWP::ERR_INVALID_OBJECT;
}
if (v != nullptr) {
- mirror::Class* field_type;
+ ObjPtr<mirror::Class> field_type;
{
StackHandleScope<2> hs(Thread::Current());
HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
@@ -1994,8 +1995,7 @@
CHECK(thread_object != nullptr) << error;
ArtField* java_lang_Thread_name_field =
soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- mirror::String* s =
- reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
+ ObjPtr<mirror::String> s(java_lang_Thread_name_field->GetObject(thread_object));
if (s != nullptr) {
*name = s->ToModifiedUtf8();
}
@@ -2021,7 +2021,7 @@
CHECK(c != nullptr);
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
CHECK(f != nullptr);
- mirror::Object* group = f->GetObject(thread_object);
+ ObjPtr<mirror::Object> group = f->GetObject(thread_object);
CHECK(group != nullptr);
JDWP::ObjectId thread_group_id = gRegistry->Add(group);
expandBufAddObjectId(pReply, thread_group_id);
@@ -2063,7 +2063,7 @@
ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName");
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
CHECK(f != nullptr);
- mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
+ ObjPtr<mirror::String> s = f->GetObject(thread_group)->AsString();
std::string thread_group_name(s->ToModifiedUtf8());
expandBufAddUtf8String(pReply, thread_group_name);
@@ -2077,7 +2077,7 @@
if (error != JDWP::ERR_NONE) {
return error;
}
- mirror::Object* parent;
+ ObjPtr<mirror::Object> parent;
{
ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent");
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
@@ -2104,12 +2104,12 @@
// Get the ThreadGroup[] "groups" out of this thread group...
ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
- mirror::Object* groups_array = groups_field->GetObject(thread_group);
+ ObjPtr<mirror::Object> groups_array = groups_field->GetObject(thread_group);
CHECK(groups_array != nullptr);
CHECK(groups_array->IsObjectArray());
- mirror::ObjectArray<mirror::Object>* groups_array_as_array =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> groups_array_as_array =
groups_array->AsObjectArray<mirror::Object>();
// Copy the first 'size' elements out of the array into the result.
@@ -2154,7 +2154,7 @@
JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
ScopedObjectAccessUnchecked soa(Thread::Current());
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
- mirror::Object* group = f->GetObject(f->GetDeclaringClass());
+ ObjPtr<mirror::Object> group = f->GetObject(f->GetDeclaringClass());
return gRegistry->Add(group);
}
@@ -2252,7 +2252,7 @@
}
ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
DCHECK(thread_group_field != nullptr);
- mirror::Object* group = thread_group_field->GetObject(peer);
+ ObjPtr<mirror::Object> group = thread_group_field->GetObject(peer);
return (group == desired_thread_group);
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 7398c4e..5d0315e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -31,6 +31,7 @@
#include "jdwp/jdwp.h"
#include "jni.h"
#include "jvalue.h"
+#include "obj_ptr.h"
#include "thread.h"
#include "thread_state.h"
@@ -317,7 +318,7 @@
const JDWP::EventLocation& event_location)
REQUIRES_SHARED(Locks::mutator_lock_);
- static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id)
+ static bool MatchType(ObjPtr<mirror::Class> event_class, JDWP::RefTypeId class_id)
REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
@@ -689,7 +690,7 @@
static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
REQUIRES_SHARED(Locks::mutator_lock_);
- static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
+ static JDWP::JdwpTypeTag GetTypeTag(ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::FieldId ToFieldId(const ArtField* f)
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 5763479..feb75a8 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -54,7 +54,7 @@
const DexFile::AnnotationSetItem* FindAnnotationSetForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = field->GetDexFile();
- mirror::Class* klass = field->GetDeclaringClass();
+ ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
dex_file->GetAnnotationsDirectory(*klass->GetClassDef());
if (annotations_dir == nullptr) {
@@ -302,7 +302,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass->GetDexFile();
Thread* self = Thread::Current();
- mirror::Object* element_object = nullptr;
+ ObjPtr<mirror::Object> element_object = nullptr;
bool set_object = false;
Primitive::Type primitive_type = Primitive::kPrimVoid;
const uint8_t* annotation = *annotation_ptr;
@@ -577,7 +577,7 @@
}
if (set_object) {
- annotation_value->value_.SetL(element_object);
+ annotation_value->value_.SetL(element_object.Decode());
}
return true;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 99b8805..e37db7d 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -410,14 +410,15 @@
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
}
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
if (access_check) {
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer);
return nullptr;
}
mirror::Class* referring_class = referrer->GetDeclaringClass();
- if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class, resolved_field,
+ if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
+ resolved_field,
field_idx))) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
@@ -696,7 +697,7 @@
// Incompatible class change.
return nullptr;
}
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
if (is_static) {
// Check class is initialized else fail so that we can contend to initialize the class with
// other threads that may be racing to do this.
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 2cd0331..4311d19 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -18,10 +18,15 @@
#include "callee_save_frame.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "class_linker-inl.h"
+#include "class_table-inl.h"
#include "dex_file-inl.h"
-#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
+#include "oat_file.h"
+#include "runtime.h"
namespace art {
@@ -56,7 +61,20 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
- return ResolveStringFromCode(caller, string_idx);
+ mirror::String* result = ResolveStringFromCode(caller, string_idx);
+ if (LIKELY(result != nullptr)) {
+ // For AOT code, we need a write barrier for the dex cache that holds the GC roots in the .bss.
+ const DexFile* dex_file = caller->GetDexFile();
+ if (dex_file != nullptr &&
+ dex_file->GetOatDexFile() != nullptr &&
+ !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
+ mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
+ // Note that we emit the barrier before the compiled code stores the string as GC root.
+ // This is OK as there is no suspend point point in between.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ }
+ }
+ return result;
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 5b65029..70eb1de 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -151,14 +151,14 @@
StaticObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
- return field->GetObj(field->GetDeclaringClass());
+ return field->GetObj(field->GetDeclaringClass()).Decode();
}
field = FindFieldFromCode<StaticObjectRead, true>(field_idx,
referrer,
self,
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
- return field->GetObj(field->GetDeclaringClass());
+ return field->GetObj(field->GetDeclaringClass()).Decode();
}
return nullptr; // Will throw exception by checking with Thread::Current.
}
@@ -299,7 +299,7 @@
InstanceObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetObj(obj);
+ return field->GetObj(obj).Decode();
}
field = FindInstanceField<InstanceObjectRead, true>(field_idx,
referrer,
@@ -307,7 +307,7 @@
sizeof(mirror::HeapReference<mirror::Object>),
&obj);
if (LIKELY(field != nullptr)) {
- return field->GetObj(obj);
+ return field->GetObj(obj).Decode();
}
return nullptr; // Will throw exception by checking with Thread::Current.
}
diff --git a/runtime/gc_root-inl.h b/runtime/gc_root-inl.h
index ae8a38f..11ccd33 100644
--- a/runtime/gc_root-inl.h
+++ b/runtime/gc_root-inl.h
@@ -21,6 +21,7 @@
#include <sstream>
+#include "obj_ptr-inl.h"
#include "read_barrier-inl.h"
namespace art {
@@ -31,10 +32,15 @@
return down_cast<MirrorType*>(
ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(&root_, gc_root_source));
}
+
template<class MirrorType>
inline GcRoot<MirrorType>::GcRoot(MirrorType* ref)
: root_(mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref)) { }
+template<class MirrorType>
+inline GcRoot<MirrorType>::GcRoot(ObjPtr<MirrorType, kIsDebugBuild> ref)
+ : GcRoot(ref.Decode()) { }
+
inline std::string RootInfo::ToString() const {
std::ostringstream oss;
Describe(oss);
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0a98f55..85cd0a4 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -24,6 +24,7 @@
namespace art {
class ArtField;
class ArtMethod;
+template<class MirrorType, bool kPoison> class ObjPtr;
namespace mirror {
class Object;
@@ -196,7 +197,10 @@
}
ALWAYS_INLINE GcRoot() {}
- explicit ALWAYS_INLINE GcRoot(MirrorType* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ explicit ALWAYS_INLINE GcRoot(MirrorType* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ explicit ALWAYS_INLINE GcRoot(ObjPtr<MirrorType, kIsDebugBuild> ref)
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 75a0391..1814746 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -135,6 +135,12 @@
GetReferences()[i].Assign(object);
}
+template<class MirrorType, bool kPoison>
+inline MutableHandle<MirrorType> StackHandleScopeCollection::NewHandle(
+ ObjPtr<MirrorType, kPoison> ptr) {
+ return NewHandle(ptr.Decode());
+}
+
} // namespace art
#endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 2b283ae..fc729a5 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -252,6 +252,10 @@
return scopes_.top()->NewHandle(object);
}
+ template<class MirrorType, bool kPoison>
+ MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType, kPoison> ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
static constexpr size_t kNumReferencesPerScope = 4;
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index f70503d..5cc1de2 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -36,21 +36,27 @@
return false;
}
if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) {
- LOG(ERROR) << "JNI ERROR (app bug): invalid " << kind_ << " " << iref;
- AbortIfNoCheckJNI();
+ AbortIfNoCheckJNI(StringPrintf("JNI ERROR (app bug): invalid %s %p",
+ GetIndirectRefKindString(kind_),
+ iref));
return false;
}
const int topIndex = segment_state_.parts.topIndex;
int idx = ExtractIndex(iref);
if (UNLIKELY(idx >= topIndex)) {
- LOG(ERROR) << "JNI ERROR (app bug): accessed stale " << kind_ << " "
- << iref << " (index " << idx << " in a table of size " << topIndex << ")";
- AbortIfNoCheckJNI();
+ std::string msg = StringPrintf(
+ "JNI ERROR (app bug): accessed stale %s %p (index %d in a table of size %d)",
+ GetIndirectRefKindString(kind_),
+ iref,
+ idx,
+ topIndex);
+ AbortIfNoCheckJNI(msg);
return false;
}
if (UNLIKELY(table_[idx].GetReference()->IsNull())) {
- LOG(ERROR) << "JNI ERROR (app bug): accessed deleted " << kind_ << " " << iref;
- AbortIfNoCheckJNI();
+ AbortIfNoCheckJNI(StringPrintf("JNI ERROR (app bug): accessed deleted %s %p",
+ GetIndirectRefKindString(kind_),
+ iref));
return false;
}
if (UNLIKELY(!CheckEntry("use", iref, idx))) {
@@ -63,10 +69,13 @@
inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
IndirectRef checkRef = ToIndirectRef(idx);
if (UNLIKELY(checkRef != iref)) {
- LOG(ERROR) << "JNI ERROR (app bug): attempt to " << what
- << " stale " << kind_ << " " << iref
- << " (should be " << checkRef << ")";
- AbortIfNoCheckJNI();
+ std::string msg = StringPrintf(
+ "JNI ERROR (app bug): attempt to %s stale %s %p (should be %p)",
+ what,
+ GetIndirectRefKindString(kind_),
+ iref,
+ checkRef);
+ AbortIfNoCheckJNI(msg);
return false;
}
return true;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 202e472..b742ccc 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -32,6 +32,20 @@
static constexpr bool kDumpStackOnNonLocalReference = false;
+const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
+ switch (kind) {
+ case kHandleScopeOrInvalid:
+ return "HandleScopeOrInvalid";
+ case kLocal:
+ return "Local";
+ case kGlobal:
+ return "Global";
+ case kWeakGlobal:
+ return "WeakGlobal";
+ }
+ return "IndirectRefKind Error";
+}
+
template<typename T>
class MutatorLockedDumpable {
public:
@@ -58,12 +72,14 @@
return os;
}
-void IndirectReferenceTable::AbortIfNoCheckJNI() {
+void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
// If -Xcheck:jni is on, it'll give a more detailed error before aborting.
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
if (!vm->IsCheckJniEnabled()) {
// Otherwise, we want to abort rather than hand back a bad reference.
- LOG(FATAL) << "JNI ERROR (app bug): see above.";
+ LOG(FATAL) << msg;
+ } else {
+ LOG(ERROR) << msg;
}
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 13c6225..e194f79 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -119,6 +119,7 @@
kWeakGlobal = 3 // <<weak global reference>>
};
std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
+const char* GetIndirectRefKindString(const IndirectRefKind& kind);
/*
* Determine what kind of indirect reference this is.
@@ -372,8 +373,8 @@
return reinterpret_cast<IndirectRef>(uref);
}
- // Abort if check_jni is not enabled.
- static void AbortIfNoCheckJNI();
+ // Abort if check_jni is not enabled. Otherwise, just log as an error.
+ static void AbortIfNoCheckJNI(const std::string& msg);
/* extra debugging checks */
bool GetChecked(IndirectRef) const;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 5934f13..7a6162c 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -46,7 +46,7 @@
CHECK(self->IsExceptionPending());
return false;
}
- Object* obj;
+ ObjPtr<Object> obj;
if (is_static) {
obj = f->GetDeclaringClass();
} else {
@@ -60,9 +60,18 @@
// Report this field access to instrumentation if needed.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
- Object* this_object = f->IsStatic() ? nullptr : obj;
- instrumentation->FieldReadEvent(self, this_object, shadow_frame.GetMethod(),
- shadow_frame.GetDexPC(), f);
+ StackHandleScope<1> hs(self);
+ // Wrap in handle wrapper in case the listener does thread suspension.
+ HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
+ ObjPtr<Object> this_object;
+ if (!f->IsStatic()) {
+ this_object = obj;
+ }
+ instrumentation->FieldReadEvent(self,
+ this_object.Decode(),
+ shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(),
+ f);
}
uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
switch (field_type) {
@@ -85,7 +94,7 @@
shadow_frame.SetVRegLong(vregA, f->GetLong(obj));
break;
case Primitive::kPrimNot:
- shadow_frame.SetVRegReference(vregA, f->GetObject(obj));
+ shadow_frame.SetVRegReference(vregA, f->GetObject(obj).Decode());
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
@@ -241,7 +250,7 @@
CHECK(self->IsExceptionPending());
return false;
}
- Object* obj;
+ ObjPtr<Object> obj;
if (is_static) {
obj = f->GetDeclaringClass();
} else {
@@ -257,10 +266,16 @@
// the field from the base of the object, we need to look for it first.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
+ StackHandleScope<1> hs(self);
+ // Wrap in handle wrapper in case the listener does thread suspension.
+ HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
- Object* this_object = f->IsStatic() ? nullptr : obj;
- instrumentation->FieldWriteEvent(self, this_object, shadow_frame.GetMethod(),
- shadow_frame.GetDexPC(), f, field_value);
+ ObjPtr<Object> this_object = f->IsStatic() ? nullptr : obj;
+ instrumentation->FieldWriteEvent(self, this_object.Decode(),
+ shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(),
+ f,
+ field_value);
}
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -286,14 +301,14 @@
if (do_assignability_check && reg != nullptr) {
// FieldHelper::GetType can resolve classes, use a handle wrapper which will restore the
// object in the destructor.
- Class* field_class;
+ ObjPtr<Class> field_class;
{
StackHandleScope<2> hs(self);
HandleWrapper<mirror::Object> h_reg(hs.NewHandleWrapper(®));
- HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
+ HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
field_class = f->GetType<true>();
}
- if (!reg->VerifierInstanceOf(field_class)) {
+ if (!reg->VerifierInstanceOf(field_class.Decode())) {
// This should never happen.
std::string temp1, temp2, temp3;
self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
@@ -489,15 +504,14 @@
// Separate declaration is required solely for the attributes.
template <bool is_range,
- bool do_assignability_check,
- size_t kVarArgMax>
+ bool do_assignability_check>
REQUIRES_SHARED(Locks::mutator_lock_)
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
JValue* result,
uint16_t number_of_inputs,
- uint32_t (&arg)[kVarArgMax],
+ uint32_t (&arg)[Instruction::kMaxVarArgRegs],
uint32_t vregC) ALWAYS_INLINE;
void ArtInterpreterToCompiledCodeBridge(Thread* self,
@@ -563,14 +577,13 @@
}
template <bool is_range,
- bool do_assignability_check,
- size_t kVarArgMax>
+ bool do_assignability_check>
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
JValue* result,
uint16_t number_of_inputs,
- uint32_t (&arg)[kVarArgMax],
+ uint32_t (&arg)[Instruction::kMaxVarArgRegs],
uint32_t vregC) {
bool string_init = false;
// Replace calls to String.<init> with equivalent StringFactory call.
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 39846da..eb8cdbc 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -786,9 +786,9 @@
kAndroidHardcodedSystemPropertiesFieldName);
return;
}
- Handle<mirror::ObjectArray<mirror::ObjectArray<mirror::String>>> h_2string_array(
- hs.NewHandle(reinterpret_cast<mirror::ObjectArray<mirror::ObjectArray<mirror::String>>*>(
- static_properties->GetObject(h_props_class.Get()))));
+ ObjPtr<mirror::Object> props = static_properties->GetObject(h_props_class.Get());
+ Handle<mirror::ObjectArray<mirror::ObjectArray<mirror::String>>> h_2string_array(hs.NewHandle(
+ props->AsObjectArray<mirror::ObjectArray<mirror::String>>()));
if (h_2string_array.Get() == nullptr) {
AbortTransactionOrFail(self, "Field %s is null", kAndroidHardcodedSystemPropertiesFieldName);
return;
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 9ba62c9..dc3bf16 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -19,6 +19,7 @@
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class.h"
+#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
@@ -35,7 +36,7 @@
: lock_("ObjectRegistry lock", kJdwpObjectRegistryLock), next_id_(1) {
}
-JDWP::RefTypeId ObjectRegistry::AddRefType(mirror::Class* c) {
+JDWP::RefTypeId ObjectRegistry::AddRefType(ObjPtr<mirror::Class> c) {
return Add(c);
}
@@ -43,7 +44,7 @@
return Add(c_h);
}
-JDWP::ObjectId ObjectRegistry::Add(mirror::Object* o) {
+JDWP::ObjectId ObjectRegistry::Add(ObjPtr<mirror::Object> o) {
if (o == nullptr) {
return 0;
}
@@ -118,7 +119,9 @@
return entry->id;
}
-bool ObjectRegistry::ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
+bool ObjectRegistry::ContainsLocked(Thread* self,
+ ObjPtr<mirror::Object> o,
+ int32_t identity_hash_code,
ObjectRegistryEntry** out_entry) {
DCHECK(o != nullptr);
for (auto it = object_to_entry_.lower_bound(identity_hash_code), end = object_to_entry_.end();
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 7fa57c6..9cacc66 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -25,6 +25,7 @@
#include "base/casts.h"
#include "handle.h"
#include "jdwp/jdwp.h"
+#include "obj_ptr.h"
#include "safe_map.h"
namespace art {
@@ -62,11 +63,11 @@
public:
ObjectRegistry();
- JDWP::ObjectId Add(mirror::Object* o)
+ JDWP::ObjectId Add(ObjPtr<mirror::Object> o)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
- JDWP::RefTypeId AddRefType(mirror::Class* c)
+ JDWP::RefTypeId AddRefType(ObjPtr<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
@@ -121,7 +122,9 @@
void Promote(ObjectRegistryEntry& entry)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(lock_);
- bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
+ bool ContainsLocked(Thread* self,
+ ObjPtr<mirror::Object> o,
+ int32_t identity_hash_code,
ObjectRegistryEntry** out_entry)
REQUIRES(lock_) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 7b27578..7977815 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -97,15 +97,20 @@
kind, c->GetDescriptor(&temp), name, sig);
}
-static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c,
- const char* kind, jint idx, bool return_errors)
+static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa,
+ ObjPtr<mirror::Class> c,
+ const char* kind,
+ jint idx,
+ bool return_errors)
REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
<< "Failed to register native method in " << PrettyDescriptor(c)
<< " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
- "%s is null at index %d", kind, idx);
+ "%s is null at index %d",
+ kind,
+ idx);
}
static ObjPtr<mirror::Class> EnsureInitialized(Thread* self, ObjPtr<mirror::Class> klass)
@@ -282,7 +287,7 @@
return JNI_ERR;
}
ScopedObjectAccess soa(env);
- soa.Self()->SetException(soa.Decode<mirror::Throwable>(exception.get()).Decode());
+ soa.Self()->SetException(soa.Decode<mirror::Throwable>(exception.get()));
return JNI_OK;
}
@@ -417,7 +422,7 @@
ScopedObjectAccess soa(env);
ObjPtr<mirror::Class> c1 = soa.Decode<mirror::Class>(java_class1);
ObjPtr<mirror::Class> c2 = soa.Decode<mirror::Class>(java_class2);
- return c2->IsAssignableFrom(c1.Decode()) ? JNI_TRUE : JNI_FALSE;
+ return c2->IsAssignableFrom(c1) ? JNI_TRUE : JNI_FALSE;
}
static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
@@ -439,7 +444,7 @@
if (exception == nullptr) {
return JNI_ERR;
}
- soa.Self()->SetException(exception.Decode());
+ soa.Self()->SetException(exception);
return JNI_OK;
}
@@ -1227,7 +1232,7 @@
ScopedObjectAccess soa(env);
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(obj);
ArtField* f = soa.DecodeField(fid);
- return soa.AddLocalReference<jobject>(f->GetObject(o.Decode()));
+ return soa.AddLocalReference<jobject>(f->GetObject(o));
}
static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) {
@@ -1244,7 +1249,7 @@
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(java_object);
ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
ArtField* f = soa.DecodeField(fid);
- f->SetObject<false>(o.Decode(), v.Decode());
+ f->SetObject<false>(o, v);
}
static void SetStaticObjectField(JNIEnv* env, jclass, jfieldID fid, jobject java_value) {
@@ -1252,7 +1257,7 @@
ScopedObjectAccess soa(env);
ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
ArtField* f = soa.DecodeField(fid);
- f->SetObject<false>(f->GetDeclaringClass(), v.Decode());
+ f->SetObject<false>(f->GetDeclaringClass(), v);
}
#define GET_PRIMITIVE_FIELD(fn, instance) \
@@ -1261,7 +1266,7 @@
ScopedObjectAccess soa(env); \
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
ArtField* f = soa.DecodeField(fid); \
- return f->Get ##fn (o.Decode())
+ return f->Get ##fn (o)
#define GET_STATIC_PRIMITIVE_FIELD(fn) \
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
@@ -1275,7 +1280,7 @@
ScopedObjectAccess soa(env); \
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
ArtField* f = soa.DecodeField(fid); \
- f->Set ##fn <false>(o.Decode(), value)
+ f->Set ##fn <false>(o, value)
#define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
@@ -2159,13 +2164,13 @@
const char* sig = methods[i].signature;
const void* fnPtr = methods[i].fnPtr;
if (UNLIKELY(name == nullptr)) {
- ReportInvalidJNINativeMethod(soa, c.Decode(), "method name", i, return_errors);
+ ReportInvalidJNINativeMethod(soa, c, "method name", i, return_errors);
return JNI_ERR;
} else if (UNLIKELY(sig == nullptr)) {
- ReportInvalidJNINativeMethod(soa, c.Decode(), "method signature", i, return_errors);
+ ReportInvalidJNINativeMethod(soa, c, "method signature", i, return_errors);
return JNI_ERR;
} else if (UNLIKELY(fnPtr == nullptr)) {
- ReportInvalidJNINativeMethod(soa, c.Decode(), "native function", i, return_errors);
+ ReportInvalidJNINativeMethod(soa, c, "native function", i, return_errors);
return JNI_ERR;
}
bool is_fast = false;
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 5a5f717..3cbd58b 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -341,13 +341,13 @@
// Don't forget about primitive types.
// Object[] = int[] --> false
//
-inline bool Class::IsArrayAssignableFromArray(Class* src) {
+inline bool Class::IsArrayAssignableFromArray(ObjPtr<Class> src) {
DCHECK(IsArrayClass()) << PrettyClass(this);
DCHECK(src->IsArrayClass()) << PrettyClass(src);
return GetComponentType()->IsAssignableFrom(src->GetComponentType());
}
-inline bool Class::IsAssignableFromArray(Class* src) {
+inline bool Class::IsAssignableFromArray(ObjPtr<Class> src) {
DCHECK(!IsInterface()) << PrettyClass(this); // handled first in IsAssignableFrom
DCHECK(src->IsArrayClass()) << PrettyClass(src);
if (!IsArrayClass()) {
@@ -362,34 +362,29 @@
}
template <bool throw_on_failure, bool use_referrers_cache>
-inline bool Class::ResolvedFieldAccessTest(Class* access_to, ArtField* field,
- uint32_t field_idx, DexCache* dex_cache) {
+inline bool Class::ResolvedFieldAccessTest(ObjPtr<Class> access_to,
+ ArtField* field,
+ uint32_t field_idx,
+ ObjPtr<DexCache> dex_cache) {
DCHECK_EQ(use_referrers_cache, dex_cache == nullptr);
if (UNLIKELY(!this->CanAccess(access_to))) {
// The referrer class can't access the field's declaring class but may still be able
// to access the field if the FieldId specifies an accessible subclass of the declaring
// class rather than the declaring class itself.
- DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
+ ObjPtr<DexCache> referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetFieldId(field_idx).class_idx_;
// The referenced class has already been resolved with the field, but may not be in the dex
- // cache. Using ResolveType here without handles in the caller should be safe since there
+ // cache. Use LookupResolveType here to search the class table if it is not in the dex cache.
// should be no thread suspension due to the class being resolved.
- // TODO: Clean this up to use handles in the caller.
- Class* dex_access_to;
- {
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer_dex_cache));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(access_to->GetClassLoader()));
- dex_access_to = Runtime::Current()->GetClassLinker()->ResolveType(
- *referrer_dex_cache->GetDexFile(),
- class_idx,
- h_dex_cache,
- h_class_loader);
- }
+ ObjPtr<Class> dex_access_to = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+ *referrer_dex_cache->GetDexFile(),
+ class_idx,
+ referrer_dex_cache,
+ access_to->GetClassLoader());
DCHECK(dex_access_to != nullptr);
if (UNLIKELY(!this->CanAccess(dex_access_to))) {
if (throw_on_failure) {
- ThrowIllegalAccessErrorClass(this, dex_access_to);
+ ThrowIllegalAccessErrorClass(this, dex_access_to.Decode());
}
return false;
}
@@ -404,36 +399,32 @@
}
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
-inline bool Class::ResolvedMethodAccessTest(Class* access_to, ArtMethod* method,
- uint32_t method_idx, DexCache* dex_cache) {
+inline bool Class::ResolvedMethodAccessTest(ObjPtr<Class> access_to,
+ ArtMethod* method,
+ uint32_t method_idx,
+ ObjPtr<DexCache> dex_cache) {
static_assert(throw_on_failure || throw_invoke_type == kStatic, "Non-default throw invoke type");
DCHECK_EQ(use_referrers_cache, dex_cache == nullptr);
if (UNLIKELY(!this->CanAccess(access_to))) {
// The referrer class can't access the method's declaring class but may still be able
// to access the method if the MethodId specifies an accessible subclass of the declaring
// class rather than the declaring class itself.
- DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
+ ObjPtr<DexCache> referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_;
// The referenced class has already been resolved with the method, but may not be in the dex
- // cache. Using ResolveType here without handles in the caller should be safe since there
- // should be no thread suspension due to the class being resolved.
- // TODO: Clean this up to use handles in the caller.
- Class* dex_access_to;
- {
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer_dex_cache));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(access_to->GetClassLoader()));
- dex_access_to = Runtime::Current()->GetClassLinker()->ResolveType(
- *referrer_dex_cache->GetDexFile(),
- class_idx,
- h_dex_cache,
- h_class_loader);
- }
+ // cache.
+ ObjPtr<Class> dex_access_to = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+ *referrer_dex_cache->GetDexFile(),
+ class_idx,
+ referrer_dex_cache,
+ access_to->GetClassLoader());
DCHECK(dex_access_to != nullptr);
if (UNLIKELY(!this->CanAccess(dex_access_to))) {
if (throw_on_failure) {
- ThrowIllegalAccessErrorClassForMethodDispatch(this, dex_access_to,
- method, throw_invoke_type);
+ ThrowIllegalAccessErrorClassForMethodDispatch(this,
+ dex_access_to.Decode(),
+ method,
+ throw_invoke_type);
}
return false;
}
@@ -447,14 +438,17 @@
return false;
}
-inline bool Class::CanAccessResolvedField(Class* access_to, ArtField* field,
- DexCache* dex_cache, uint32_t field_idx) {
+inline bool Class::CanAccessResolvedField(ObjPtr<Class> access_to,
+ ArtField* field,
+ ObjPtr<DexCache> dex_cache,
+ uint32_t field_idx) {
return ResolvedFieldAccessTest<false, false>(access_to, field, field_idx, dex_cache);
}
-inline bool Class::CheckResolvedFieldAccess(Class* access_to, ArtField* field,
+inline bool Class::CheckResolvedFieldAccess(ObjPtr<Class> access_to,
+ ArtField* field,
uint32_t field_idx) {
- return ResolvedFieldAccessTest<true, true>(access_to, field, field_idx, nullptr);
+ return ResolvedFieldAccessTest<true, true>(access_to.Decode(), field, field_idx, nullptr);
}
inline bool Class::CanAccessResolvedMethod(Class* access_to, ArtMethod* method,
@@ -469,10 +463,10 @@
nullptr);
}
-inline bool Class::IsSubClass(Class* klass) {
+inline bool Class::IsSubClass(ObjPtr<Class> klass) {
DCHECK(!IsInterface()) << PrettyClass(this);
DCHECK(!IsArrayClass()) << PrettyClass(this);
- Class* current = this;
+ ObjPtr<Class> current = this;
do {
if (current == klass) {
return true;
@@ -1032,7 +1026,7 @@
return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
}
-inline bool Class::IsAssignableFrom(Class* src) {
+inline bool Class::IsAssignableFrom(ObjPtr<Class> src) {
DCHECK(src != nullptr);
if (this == src) {
// Can always assign to things of the same type.
@@ -1113,6 +1107,34 @@
}
}
+inline bool Class::CanAccess(ObjPtr<Class> that) {
+ return that->IsPublic() || this->IsInSamePackage(that);
+}
+
+
+inline bool Class::CanAccessMember(ObjPtr<Class> access_to, uint32_t member_flags) {
+ // Classes can access all of their own members
+ if (this == access_to) {
+ return true;
+ }
+ // Public members are trivially accessible
+ if (member_flags & kAccPublic) {
+ return true;
+ }
+ // Private members are trivially not accessible
+ if (member_flags & kAccPrivate) {
+ return false;
+ }
+ // Check for protected access from a sub-class, which may or may not be in the same package.
+ if (member_flags & kAccProtected) {
+ if (!this->IsInterface() && this->IsSubClass(access_to)) {
+ return true;
+ }
+ }
+ // Allow protected access from other classes in the same package.
+ return this->IsInSamePackage(access_to);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 2e5f532..40742d2 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -308,9 +308,9 @@
}
}
-bool Class::IsInSamePackage(Class* that) {
- Class* klass1 = this;
- Class* klass2 = that;
+bool Class::IsInSamePackage(ObjPtr<Class> that) {
+ ObjPtr<Class> klass1 = this;
+ ObjPtr<Class> klass2 = that;
if (klass1 == klass2) {
return true;
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 6c1259b..a0d6f37 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -611,50 +611,28 @@
}
// Returns true if this class is in the same packages as that class.
- bool IsInSamePackage(Class* that) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInSamePackage(ObjPtr<Class> that) REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
// Returns true if this class can access that class.
- bool CanAccess(Class* that) REQUIRES_SHARED(Locks::mutator_lock_) {
- return that->IsPublic() || this->IsInSamePackage(that);
- }
+ bool CanAccess(ObjPtr<Class> that) REQUIRES_SHARED(Locks::mutator_lock_);
// Can this class access a member in the provided class with the provided member access flags?
// Note that access to the class isn't checked in case the declaring class is protected and the
// method has been exposed by a public sub-class
- bool CanAccessMember(Class* access_to, uint32_t member_flags)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Classes can access all of their own members
- if (this == access_to) {
- return true;
- }
- // Public members are trivially accessible
- if (member_flags & kAccPublic) {
- return true;
- }
- // Private members are trivially not accessible
- if (member_flags & kAccPrivate) {
- return false;
- }
- // Check for protected access from a sub-class, which may or may not be in the same package.
- if (member_flags & kAccProtected) {
- if (!this->IsInterface() && this->IsSubClass(access_to)) {
- return true;
- }
- }
- // Allow protected access from other classes in the same package.
- return this->IsInSamePackage(access_to);
- }
+ bool CanAccessMember(ObjPtr<Class> access_to, uint32_t member_flags)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this class access a resolved field?
// Note that access to field's class is checked and this may require looking up the class
// referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
- bool CanAccessResolvedField(Class* access_to, ArtField* field,
- DexCache* dex_cache, uint32_t field_idx)
+ bool CanAccessResolvedField(ObjPtr<Class> access_to,
+ ArtField* field,
+ ObjPtr<DexCache> dex_cache,
+ uint32_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
- uint32_t field_idx)
+ bool CheckResolvedFieldAccess(ObjPtr<Class> access_to, ArtField* field, uint32_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
// Can this class access a resolved method?
@@ -668,14 +646,14 @@
uint32_t method_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsSubClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsSubClass(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Can src be assigned to this class? For example, String can be assigned to Object (by an
// upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
// downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
- ALWAYS_INLINE bool IsAssignableFrom(Class* src) REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool IsAssignableFrom(ObjPtr<Class> src) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -1309,17 +1287,22 @@
void SetVerifyError(Object* klass) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
- bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
- uint32_t field_idx, DexCache* dex_cache)
+ bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
+ ArtField* field,
+ uint32_t field_idx,
+ ObjPtr<DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
+
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
- bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
- uint32_t method_idx, DexCache* dex_cache)
+ bool ResolvedMethodAccessTest(ObjPtr<Class> access_to,
+ ArtMethod* resolved_method,
+ uint32_t method_idx,
+ ObjPtr<DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
bool Implements(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsArrayAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsArrayAssignableFromArray(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsAssignableFromArray(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
void CheckObjectAlloc() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 8b0f8ce..ec32cb6 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -78,6 +78,11 @@
return ret.Get();
}
+template<bool kTransactionActive>
+void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c.Decode());
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index f378568..c5357c9 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -20,6 +20,7 @@
#include "accessible_object.h"
#include "base/enums.h"
#include "gc_root.h"
+#include "obj_ptr.h"
#include "object.h"
#include "object_callbacks.h"
#include "read_barrier_option.h"
@@ -109,9 +110,7 @@
int32_t offset_;
template<bool kTransactionActive>
- void SetDeclaringClass(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
- }
+ void SetDeclaringClass(ObjPtr<mirror::Class> c) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetType(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index c37deb5..90b97fd 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -213,7 +213,7 @@
if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
// TODO: resolve the field type for moving GC.
- mirror::Class* field_type = field.GetType<!kMovingCollector>();
+ ObjPtr<mirror::Class> field_type = field.GetType<!kMovingCollector>();
if (field_type != nullptr) {
CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
@@ -230,7 +230,7 @@
if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
// TODO: resolve the field type for moving GC.
- mirror::Class* field_type = field.GetType<!kMovingCollector>();
+ ObjPtr<mirror::Class> field_type = field.GetType<!kMovingCollector>();
if (field_type != nullptr) {
CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 40ee3a2..a573ae6 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -384,12 +384,12 @@
ArtField* field = FindFieldFromCode<StaticObjectRead, true>(field_idx, clinit, Thread::Current(),
sizeof(HeapReference<Object>));
- Object* s0 = field->GetObj(klass);
+ ObjPtr<Object> s0 = field->GetObj(klass);
EXPECT_TRUE(s0 != nullptr);
Handle<CharArray> char_array(hs.NewHandle(CharArray::Alloc(soa.Self(), 0)));
field->SetObj<false>(field->GetDeclaringClass(), char_array.Get());
- EXPECT_EQ(char_array.Get(), field->GetObj(klass));
+ EXPECT_OBJ_PTR_EQ(char_array.Get(), field->GetObj(klass));
field->SetObj<false>(field->GetDeclaringClass(), nullptr);
EXPECT_EQ(nullptr, field->GetObj(klass));
@@ -759,7 +759,7 @@
EXPECT_TRUE(!X.IsNull());
EXPECT_TRUE(X.IsValid());
EXPECT_TRUE(X.Decode() != nullptr);
- EXPECT_EQ(h_X.Get(), X.Decode());
+ EXPECT_OBJ_PTR_EQ(h_X.Get(), X);
// FindClass may cause thread suspension, it should invalidate X.
ObjPtr<Class, /*kPoison*/ true> Y(class_linker_->FindClass(soa.Self(), "LY;", class_loader));
EXPECT_TRUE(!Y.IsNull());
@@ -773,7 +773,7 @@
X.Assign(h_X.Get());
EXPECT_TRUE(!X.IsNull());
EXPECT_TRUE(X.IsValid());
- EXPECT_EQ(h_X.Get(), X.Decode());
+ EXPECT_OBJ_PTR_EQ(h_X.Get(), X);
// Allow thread suspension to invalidate Y.
soa.Self()->AllowThreadSuspension();
@@ -793,7 +793,7 @@
unpoisoned = h_X.Get();
EXPECT_FALSE(unpoisoned.IsNull());
EXPECT_TRUE(unpoisoned == h_X.Get());
- EXPECT_EQ(unpoisoned.Decode(), h_X.Get());
+ EXPECT_OBJ_PTR_EQ(unpoisoned, h_X.Get());
}
} // namespace mirror
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 671cb60..0d1839b 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -21,7 +21,7 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include "cutils/log.h"
+#include <android/log.h>
#define EVENT_LOG_TAG_dvm_lock_sample 20003
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index ea692cd..5f37b82 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -40,6 +40,7 @@
#include "base/unix_file/fd_file.h"
#include "elf_file.h"
#include "elf_utils.h"
+#include "gc_root.h"
#include "oat.h"
#include "mem_map.h"
#include "mirror/class.h"
@@ -239,6 +240,8 @@
}
// Readjust to be non-inclusive upper bound.
bss_end_ += sizeof(uint32_t);
+ // Find bss roots if present.
+ bss_roots_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbssroots", &symbol_error_msg));
}
return true;
@@ -291,8 +294,31 @@
return false;
}
+ if (!IsAligned<alignof(GcRoot<mirror::Object>)>(bss_begin_) ||
+ !IsAligned<alignof(GcRoot<mirror::Object>)>(bss_roots_) ||
+ !IsAligned<alignof(GcRoot<mirror::Object>)>(bss_end_)) {
+ *error_msg = StringPrintf("In oat file '%s' found unaligned bss symbol(s): "
+ "begin = %p, roots = %p, end = %p",
+ GetLocation().c_str(),
+ bss_begin_,
+ bss_roots_,
+ bss_end_);
+ return false;
+ }
+
+ if (bss_roots_ != nullptr && (bss_roots_ < bss_begin_ || bss_roots_ > bss_end_)) {
+ *error_msg = StringPrintf("In oat file '%s' found bss roots outside .bss: "
+ "%p is outside range [%p, %p]",
+ GetLocation().c_str(),
+ bss_roots_,
+ bss_begin_,
+ bss_end_);
+ return false;
+ }
+
PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
uint8_t* dex_cache_arrays = bss_begin_;
+ uint8_t* dex_cache_arrays_end = (bss_roots_ != nullptr) ? bss_roots_ : bss_end_;
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
for (size_t i = 0; i < dex_file_count; i++) {
@@ -469,13 +495,13 @@
if (dex_cache_arrays != nullptr) {
DexCacheArraysLayout layout(pointer_size, *header);
if (layout.Size() != 0u) {
- if (static_cast<size_t>(bss_end_ - dex_cache_arrays) < layout.Size()) {
+ if (static_cast<size_t>(dex_cache_arrays_end - dex_cache_arrays) < layout.Size()) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with "
"truncated dex cache arrays, %zu < %zu.",
GetLocation().c_str(),
i,
dex_file_location.c_str(),
- static_cast<size_t>(bss_end_ - dex_cache_arrays),
+ static_cast<size_t>(dex_cache_arrays_end - dex_cache_arrays),
layout.Size());
return false;
}
@@ -506,9 +532,9 @@
}
}
- if (dex_cache_arrays != bss_end_) {
+ if (dex_cache_arrays != dex_cache_arrays_end) {
// We expect the bss section to be either empty (dex_cache_arrays and bss_end_
- // both null) or contain just the dex cache arrays and nothing else.
+ // both null) or contain just the dex cache arrays and optionally some GC roots.
*error_msg = StringPrintf("In oat file '%s' found unexpected bss size bigger by %zu bytes.",
GetLocation().c_str(),
static_cast<size_t>(bss_end_ - dex_cache_arrays));
@@ -1082,6 +1108,7 @@
end_(nullptr),
bss_begin_(nullptr),
bss_end_(nullptr),
+ bss_roots_(nullptr),
is_executable_(is_executable),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
@@ -1121,6 +1148,16 @@
return kIsVdexEnabled ? vdex_->End() : End();
}
+ArrayRef<GcRoot<mirror::Object>> OatFile::GetBssGcRoots() const {
+ if (bss_roots_ != nullptr) {
+ auto* roots = reinterpret_cast<GcRoot<mirror::Object>*>(bss_roots_);
+ auto* roots_end = reinterpret_cast<GcRoot<mirror::Object>*>(bss_end_);
+ return ArrayRef<GcRoot<mirror::Object>>(roots, roots_end - roots);
+ } else {
+ return ArrayRef<GcRoot<mirror::Object>>();
+ }
+}
+
const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
const uint32_t* dex_location_checksum,
std::string* error_msg) const {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index a61b941..c3188cb 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -21,6 +21,7 @@
#include <string>
#include <vector>
+#include "base/array_ref.h"
#include "base/mutex.h"
#include "base/stringpiece.h"
#include "dex_file.h"
@@ -38,6 +39,7 @@
class BitVector;
class ElfFile;
+template <class MirrorType> class GcRoot;
class MemMap;
class OatMethodOffsets;
class OatHeader;
@@ -253,6 +255,10 @@
return BssEnd() - BssBegin();
}
+ size_t BssRootsOffset() const {
+ return bss_roots_ - BssBegin();
+ }
+
size_t DexSize() const {
return DexEnd() - DexBegin();
}
@@ -266,6 +272,8 @@
const uint8_t* DexBegin() const;
const uint8_t* DexEnd() const;
+ ArrayRef<GcRoot<mirror::Object>> GetBssGcRoots() const;
+
// Returns the absolute dex location for the encoded relative dex location.
//
// If not null, abs_dex_location is used to resolve the absolute dex
@@ -314,6 +322,9 @@
// Pointer to the end of the .bss section, if present, otherwise null.
uint8_t* bss_end_;
+ // Pointer to the beginning of the GC roots in .bss section, if present, otherwise null.
+ uint8_t* bss_roots_;
+
// Was this oat_file loaded executable?
const bool is_executable_;
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index acad2a9..64e5a63 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -30,6 +30,7 @@
#include "handle_scope-inl.h"
#include "mirror/class_loader.h"
#include "oat_file_assistant.h"
+#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -223,7 +224,7 @@
}
}
-static void IterateOverJavaDexFile(mirror::Object* dex_file,
+static void IterateOverJavaDexFile(ObjPtr<mirror::Object> dex_file,
ArtField* const cookie_field,
std::function<bool(const DexFile*)> fn)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -258,12 +259,12 @@
ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
ArtField* const dex_file_field =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
- mirror::Object* dex_path_list =
+ ObjPtr<mirror::Object> dex_path_list =
soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
GetObject(class_loader.Get());
if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
- mirror::Object* dex_elements_obj =
+ ObjPtr<mirror::Object> dex_elements_obj =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
GetObject(dex_path_list);
// Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
@@ -276,7 +277,7 @@
// Should never happen, fall back to java code to throw a NPE.
break;
}
- mirror::Object* dex_file = dex_file_field->GetObject(element);
+ ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
IterateOverJavaDexFile(dex_file, cookie_field, fn);
}
}
@@ -360,7 +361,7 @@
// We support this being dalvik.system.DexPathList$Element and dalvik.system.DexFile.
- mirror::Object* dex_file;
+ ObjPtr<mirror::Object> dex_file;
if (element_class == element->GetClass()) {
dex_file = dex_file_field->GetObject(element);
} else if (dexfile_class == element->GetClass()) {
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index d5ac33d..7c0c9df 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -88,11 +88,13 @@
return Decode() == ptr.Decode();
}
- ALWAYS_INLINE bool operator==(const MirrorType* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ template <typename PointerType>
+ ALWAYS_INLINE bool operator==(const PointerType* ptr) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Decode() == ptr;
}
- ALWAYS_INLINE bool operator==(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool operator==(std::nullptr_t) const {
return IsNull();
}
@@ -100,16 +102,18 @@
return Decode() != ptr.Decode();
}
- ALWAYS_INLINE bool operator!=(const MirrorType* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ template <typename PointerType>
+ ALWAYS_INLINE bool operator!=(const PointerType* ptr) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Decode() != ptr;
}
- ALWAYS_INLINE bool operator!=(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool operator!=(std::nullptr_t) const {
return !IsNull();
}
// Decode unchecked does not check that object pointer is valid. Do not use if you can avoid it.
- ALWAYS_INLINE MirrorType* DecodeUnchecked() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ ALWAYS_INLINE MirrorType* DecodeUnchecked() const {
if (kPoison) {
return reinterpret_cast<MirrorType*>(
static_cast<uintptr_t>(static_cast<uint32_t>(reference_ << kObjectAlignmentShift)));
@@ -133,14 +137,40 @@
uintptr_t reference_;
};
+template<class MirrorType, bool kPoison, typename PointerType>
+ALWAYS_INLINE bool operator==(const PointerType* a, const ObjPtr<MirrorType, kPoison>& b)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return b == a;
+}
+
+template<class MirrorType, bool kPoison>
+ALWAYS_INLINE bool operator==(std::nullptr_t, const ObjPtr<MirrorType, kPoison>& b) {
+ return b == nullptr;
+}
+
+template<typename MirrorType, bool kPoison, typename PointerType>
+ALWAYS_INLINE bool operator!=(const PointerType* a, const ObjPtr<MirrorType, kPoison>& b)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return b != a;
+}
+
+template<class MirrorType, bool kPoison>
+ALWAYS_INLINE bool operator!=(std::nullptr_t, const ObjPtr<MirrorType, kPoison>& b) {
+ return b != nullptr;
+}
+
template<class MirrorType, bool kPoison = kIsDebugBuild>
static inline ObjPtr<MirrorType, kPoison> MakeObjPtr(MirrorType* ptr) {
return ObjPtr<MirrorType, kPoison>(ptr);
}
+template<class MirrorType, bool kPoison = kIsDebugBuild>
+static inline ObjPtr<MirrorType, kPoison> MakeObjPtr(ObjPtr<MirrorType, kPoison> ptr) {
+ return ObjPtr<MirrorType, kPoison>(ptr);
+}
+
template<class MirrorType, bool kPoison>
-ALWAYS_INLINE std::ostream& operator<<(std::ostream& os, ObjPtr<MirrorType, kPoison> ptr)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ALWAYS_INLINE std::ostream& operator<<(std::ostream& os, ObjPtr<MirrorType, kPoison> ptr);
} // namespace art
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index ac348e7..9c42b2f 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -209,9 +209,9 @@
hs.NewHandle(path_list_field->GetObject(h_class_loader.Get())));
CHECK(path_list.Get() != nullptr);
CHECK(!self->IsExceptionPending());
- art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(
- hs.NewHandle(art::down_cast<art::mirror::ObjectArray<art::mirror::Object>*>(
- dex_path_list_element_field->GetObject(path_list.Get()))));
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(hs.NewHandle(
+ dex_path_list_element_field->GetObject(path_list.Get())->
+ AsObjectArray<art::mirror::Object>()));
CHECK(!self->IsExceptionPending());
CHECK(dex_elements_list.Get() != nullptr);
size_t num_elements = dex_elements_list->GetLength();
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 43b0b3d..1119ccf 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -180,7 +180,7 @@
ArtField* field = &static_fields->At(0);
EXPECT_STREQ("interfaces", field->GetName());
EXPECT_STREQ("[Ljava/lang/Class;", field->GetTypeDescriptor());
- EXPECT_EQ(interfacesFieldClass.Get(), field->GetType<true>());
+ EXPECT_OBJ_PTR_EQ(MakeObjPtr(interfacesFieldClass.Get()), field->GetType<true>());
std::string temp;
EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp));
EXPECT_FALSE(field->IsPrimitiveType());
@@ -189,7 +189,7 @@
field = &static_fields->At(1);
EXPECT_STREQ("throws", field->GetName());
EXPECT_STREQ("[[Ljava/lang/Class;", field->GetTypeDescriptor());
- EXPECT_EQ(throwsFieldClass.Get(), field->GetType<true>());
+ EXPECT_OBJ_PTR_EQ(MakeObjPtr(throwsFieldClass.Get()), field->GetType<true>());
EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp));
EXPECT_FALSE(field->IsPrimitiveType());
}
@@ -224,10 +224,10 @@
ASSERT_TRUE(static_fields1 != nullptr);
ASSERT_EQ(2u, static_fields1->size());
- EXPECT_EQ(static_fields0->At(0).GetDeclaringClass(), proxyClass0.Get());
- EXPECT_EQ(static_fields0->At(1).GetDeclaringClass(), proxyClass0.Get());
- EXPECT_EQ(static_fields1->At(0).GetDeclaringClass(), proxyClass1.Get());
- EXPECT_EQ(static_fields1->At(1).GetDeclaringClass(), proxyClass1.Get());
+ EXPECT_OBJ_PTR_EQ(static_fields0->At(0).GetDeclaringClass(), MakeObjPtr(proxyClass0.Get()));
+ EXPECT_OBJ_PTR_EQ(static_fields0->At(1).GetDeclaringClass(), MakeObjPtr(proxyClass0.Get()));
+ EXPECT_OBJ_PTR_EQ(static_fields1->At(0).GetDeclaringClass(), MakeObjPtr(proxyClass1.Get()));
+ EXPECT_OBJ_PTR_EQ(static_fields1->At(1).GetDeclaringClass(), MakeObjPtr(proxyClass1.Get()));
ASSERT_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
ASSERT_FALSE(Runtime::Current()->IsActiveTransaction());
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index dc6f4eb..b009b47 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -738,8 +738,8 @@
if (field == nullptr || field->IsStatic()) {
return false;
}
- mirror::Class* method_class = method->GetDeclaringClass();
- mirror::Class* field_class = field->GetDeclaringClass();
+ ObjPtr<mirror::Class> method_class = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> field_class = field->GetDeclaringClass();
if (!method_class->CanAccessResolvedField(field_class, field, dex_cache, field_idx) ||
(is_put && field->IsFinal() && method_class != field_class)) {
return false;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index b663b4c..066bc12 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -244,13 +244,13 @@
#define DO_FIRST_ARG(match_descriptor, get_fn, append) { \
if (LIKELY(arg != nullptr && arg->GetClass()->DescriptorEquals(match_descriptor))) { \
ArtField* primitive_field = arg->GetClass()->GetInstanceField(0); \
- append(primitive_field-> get_fn(arg.Decode()));
+ append(primitive_field-> get_fn(arg));
#define DO_ARG(match_descriptor, get_fn, append) \
} else if (LIKELY(arg != nullptr && \
arg->GetClass<>()->DescriptorEquals(match_descriptor))) { \
ArtField* primitive_field = arg->GetClass()->GetInstanceField(0); \
- append(primitive_field-> get_fn(arg.Decode()));
+ append(primitive_field-> get_fn(arg));
#define DO_FAIL(expected) \
} else { \
@@ -801,28 +801,28 @@
ArtField* primitive_field = &klass->GetIFieldsPtr()->At(0);
if (klass->DescriptorEquals("Ljava/lang/Boolean;")) {
src_class = class_linker->FindPrimitiveClass('Z');
- boxed_value.SetZ(primitive_field->GetBoolean(o.Decode()));
+ boxed_value.SetZ(primitive_field->GetBoolean(o));
} else if (klass->DescriptorEquals("Ljava/lang/Byte;")) {
src_class = class_linker->FindPrimitiveClass('B');
- boxed_value.SetB(primitive_field->GetByte(o.Decode()));
+ boxed_value.SetB(primitive_field->GetByte(o));
} else if (klass->DescriptorEquals("Ljava/lang/Character;")) {
src_class = class_linker->FindPrimitiveClass('C');
- boxed_value.SetC(primitive_field->GetChar(o.Decode()));
+ boxed_value.SetC(primitive_field->GetChar(o));
} else if (klass->DescriptorEquals("Ljava/lang/Float;")) {
src_class = class_linker->FindPrimitiveClass('F');
- boxed_value.SetF(primitive_field->GetFloat(o.Decode()));
+ boxed_value.SetF(primitive_field->GetFloat(o));
} else if (klass->DescriptorEquals("Ljava/lang/Double;")) {
src_class = class_linker->FindPrimitiveClass('D');
- boxed_value.SetD(primitive_field->GetDouble(o.Decode()));
+ boxed_value.SetD(primitive_field->GetDouble(o));
} else if (klass->DescriptorEquals("Ljava/lang/Integer;")) {
src_class = class_linker->FindPrimitiveClass('I');
- boxed_value.SetI(primitive_field->GetInt(o.Decode()));
+ boxed_value.SetI(primitive_field->GetInt(o));
} else if (klass->DescriptorEquals("Ljava/lang/Long;")) {
src_class = class_linker->FindPrimitiveClass('J');
- boxed_value.SetJ(primitive_field->GetLong(o.Decode()));
+ boxed_value.SetJ(primitive_field->GetLong(o));
} else if (klass->DescriptorEquals("Ljava/lang/Short;")) {
src_class = class_linker->FindPrimitiveClass('S');
- boxed_value.SetS(primitive_field->GetShort(o.Decode()));
+ boxed_value.SetS(primitive_field->GetShort(o));
} else {
std::string temp;
ThrowIllegalArgumentException(
@@ -888,13 +888,13 @@
}
if ((access_flags & kAccProtected) != 0) {
if (obj != nullptr && !obj->InstanceOf(calling_class) &&
- !declaring_class->IsInSamePackage(calling_class.Decode())) {
+ !declaring_class->IsInSamePackage(calling_class)) {
return false;
- } else if (declaring_class->IsAssignableFrom(calling_class.Decode())) {
+ } else if (declaring_class->IsAssignableFrom(calling_class)) {
return true;
}
}
- return declaring_class->IsInSamePackage(calling_class.Decode());
+ return declaring_class->IsInSamePackage(calling_class);
}
void InvalidReceiverError(ObjPtr<mirror::Object> o, ObjPtr<mirror::Class> c) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index df0dca0..f6a854c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -422,7 +422,7 @@
}
};
-void Runtime::Abort() {
+void Runtime::Abort(const char* msg) {
gAborting++; // set before taking any locks
// Ensure that we don't have multiple threads trying to abort at once,
@@ -437,6 +437,12 @@
AbortState state;
LOG(FATAL_WITHOUT_ABORT) << Dumpable<AbortState>(state);
+ // Sometimes we dump long messages, and the Android abort message only retains the first line.
+ // In those cases, just log the message again, to avoid logcat limits.
+ if (msg != nullptr && strchr(msg, '\n') != nullptr) {
+ LOG(FATAL_WITHOUT_ABORT) << msg;
+ }
+
// Call the abort hook if we have one.
if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
LOG(FATAL_WITHOUT_ABORT) << "Calling abort hook...";
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 30f1b4a..84c6b6f 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -225,7 +225,7 @@
// Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
// callers should prefer.
- NO_RETURN static void Abort() REQUIRES(!Locks::abort_lock_);
+ NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
// Returns the "main" ThreadGroup, used when attaching user threads.
jobject GetMainThreadGroup() const;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index bb6eb79..d75a788 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -175,9 +175,7 @@
inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
AssertThreadSuspensionIsAllowable();
- if (kIsDebugBuild) {
- PoisonObjectPointers();
- }
+ PoisonObjectPointersIfDebug();
DCHECK_EQ(this, Thread::Current());
// Change to non-runnable state, thereby appearing suspended to the system.
TransitionToSuspendedAndRunCheckpoints(new_state);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index ec1bb3f..b8c7096 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -563,8 +563,8 @@
ScopedObjectAccess soa(env);
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- mirror::String* java_name = reinterpret_cast<mirror::String*>(f->GetObject(
- soa.Decode<mirror::Object>(java_peer).Decode()));
+ ObjPtr<mirror::String> java_name =
+ f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString();
std::string thread_name;
if (java_name != nullptr) {
thread_name = java_name->ToModifiedUtf8();
@@ -845,11 +845,9 @@
soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
- SetObject<kTransactionActive>(tlsPtr_.opeer,
- soa.Decode<mirror::Object>(thread_group).Decode());
+ SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_group));
soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
- SetObject<kTransactionActive>(tlsPtr_.opeer,
- soa.Decode<mirror::Object>(thread_name).Decode());
+ SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_name));
soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
}
@@ -948,8 +946,11 @@
mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- return (tlsPtr_.opeer != nullptr) ?
- reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
+ if (tlsPtr_.opeer == nullptr) {
+ return nullptr;
+ }
+ ObjPtr<mirror::Object> name = f->GetObject(tlsPtr_.opeer);
+ return name == nullptr ? nullptr : name->AsString();
}
void Thread::GetThreadName(std::string& name) const {
@@ -1220,14 +1221,14 @@
is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
->GetBoolean(thread->tlsPtr_.opeer);
- mirror::Object* thread_group =
+ ObjPtr<mirror::Object> thread_group =
soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
if (thread_group != nullptr) {
ArtField* group_name_field =
soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
- mirror::String* group_name_string =
- reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
+ ObjPtr<mirror::String> group_name_string =
+ group_name_field->GetObject(thread_group)->AsString();
group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
}
} else {
@@ -1711,7 +1712,7 @@
// Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
// who is waiting.
- mirror::Object* lock =
+ ObjPtr<mirror::Object> lock =
soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
// (This conditional is only needed for tests, where Thread.lock won't have been set.)
if (lock != nullptr) {
@@ -1803,7 +1804,7 @@
void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
// this.group.removeThread(this);
// group can be null if we're in the compiler or a test.
- mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
+ ObjPtr<mirror::Object> ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
->GetObject(tlsPtr_.opeer);
if (ogroup != nullptr) {
ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 87b6dc3..50466ed 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -4563,9 +4563,11 @@
return nullptr;
} else {
std::string temp;
- mirror::Class* klass = field->GetDeclaringClass();
+ ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
const RegType& field_klass =
- FromClass(klass->GetDescriptor(&temp), klass, klass->CannotBeAssignedFromOtherTypes());
+ FromClass(klass->GetDescriptor(&temp),
+ klass.Decode(),
+ klass->CannotBeAssignedFromOtherTypes());
if (obj_type.IsUninitializedTypes()) {
// Field accesses through uninitialized references are only allowable for constructors where
// the field is declared in this class.
@@ -4662,10 +4664,11 @@
}
}
- mirror::Class* field_type_class =
+ ObjPtr<mirror::Class> field_type_class =
can_load_classes_ ? field->GetType<true>() : field->GetType<false>();
if (field_type_class != nullptr) {
- field_type = &FromClass(field->GetTypeDescriptor(), field_type_class,
+ field_type = &FromClass(field->GetTypeDescriptor(),
+ field_type_class.Decode(),
field_type_class->CannotBeAssignedFromOtherTypes());
} else {
DCHECK(!can_load_classes_ || self_->IsExceptionPending());
@@ -4785,12 +4788,12 @@
// Get the field type.
const RegType* field_type;
{
- mirror::Class* field_type_class = can_load_classes_ ? field->GetType<true>() :
+ ObjPtr<mirror::Class> field_type_class = can_load_classes_ ? field->GetType<true>() :
field->GetType<false>();
if (field_type_class != nullptr) {
field_type = &FromClass(field->GetTypeDescriptor(),
- field_type_class,
+ field_type_class.Decode(),
field_type_class->CannotBeAssignedFromOtherTypes());
} else {
Thread* self = Thread::Current();
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index d93aaa1..10f1be5 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -44,7 +44,7 @@
}
}
-inline bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+inline bool RegType::CanAccessMember(ObjPtr<mirror::Class> klass, uint32_t access_flags) const {
if ((access_flags & kAccPublic) != 0) {
return true;
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 9170bb1..472381d 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -29,6 +29,7 @@
#include "base/stringpiece.h"
#include "gc_root.h"
#include "handle_scope.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "primitive.h"
@@ -205,7 +206,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type access a member with the given properties?
- bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
+ bool CanAccessMember(ObjPtr<mirror::Class> klass, uint32_t access_flags) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type be assigned by src?
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 350c838..3e1958f 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -19,6 +19,7 @@
#include "compiler_callbacks.h"
#include "leb128.h"
#include "mirror/class-inl.h"
+#include "obj_ptr-inl.h"
#include "runtime.h"
namespace art {
@@ -107,10 +108,10 @@
}
}
-bool VerifierDeps::IsInClassPath(mirror::Class* klass) {
+bool VerifierDeps::IsInClassPath(ObjPtr<mirror::Class> klass) {
DCHECK(klass != nullptr);
- mirror::DexCache* dex_cache = klass->GetDexCache();
+ ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
if (dex_cache == nullptr) {
// This is a synthesized class, in this case always an array. They are not
// defined in the compiled DEX files and therefore are part of the classpath.
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index dc8dfaf..3223f6f 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -26,6 +26,7 @@
#include "base/array_ref.h"
#include "base/mutex.h"
#include "method_resolution_kind.h"
+#include "obj_ptr.h"
#include "os.h"
namespace art {
@@ -176,7 +177,7 @@
// Returns true if `klass` is null or not defined in any of dex files which
// were reported as being compiled.
- bool IsInClassPath(mirror::Class* klass)
+ bool IsInClassPath(ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the index of `str`. If it is defined in `dex_file_`, this is the dex
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 2232ff4..3c053cf 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -285,31 +285,27 @@
/// CHECK: LoadString load_kind:DexCacheViaMethod
/// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- /// CHECK: LoadString load_kind:DexCachePcRelative
+ /// CHECK: LoadString load_kind:BssEntry
/// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (after)
/// CHECK-DAG: X86ComputeBaseMethodAddress
- /// CHECK-DAG: LoadString load_kind:DexCachePcRelative
+ /// CHECK-DAG: LoadString load_kind:BssEntry
/// CHECK-START-X86_64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- /// CHECK: LoadString load_kind:DexCachePcRelative
+ /// CHECK: LoadString load_kind:BssEntry
/// CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- /// CHECK: LoadString load_kind:DexCachePcRelative
-
- /// CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() dex_cache_array_fixups_arm (after)
- /// CHECK-DAG: ArmDexCacheArraysBase
- /// CHECK-DAG: LoadString load_kind:DexCachePcRelative
+ /// CHECK: LoadString load_kind:BssEntry
/// CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- /// CHECK: LoadString load_kind:DexCachePcRelative
+ /// CHECK: LoadString load_kind:BssEntry
/// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- /// CHECK: LoadString load_kind:DexCachePcRelative
+ /// CHECK: LoadString load_kind:BssEntry
- /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() dex_cache_array_fixups_mips (after)
- /// CHECK-DAG: MipsDexCacheArraysBase
- /// CHECK-DAG: LoadString load_kind:DexCachePcRelative
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
+ /// CHECK-DAG: MipsComputeBaseMethodAddress
+ /// CHECK-DAG: LoadString load_kind:BssEntry
public static String $noinline$getNonBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 211a69f..6464321 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -233,11 +233,9 @@
# Disable 149-suspend-all-stress, its output is flaky (b/28988206).
# Disable 577-profile-foreign-dex (b/27454772).
-# Disable 552-checker-sharpening, until compiler component of new string dex cache is added (@cwadsworth, @vmarko)
TEST_ART_BROKEN_ALL_TARGET_TESTS := \
149-suspend-all-stress \
577-profile-foreign-dex \
- 552-checker-sharpening \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -1040,7 +1038,7 @@
endif
endif
run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)-$(12)$(13)
- run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
+ run_test_options := --output-path $$(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
$$(run_test_options)
ifneq ($(ART_TEST_ANDROID_ROOT),)
run_test_options := --android-root $(ART_TEST_ANDROID_ROOT) $$(run_test_options)
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
index 56cdf02..50c4f20 100755
--- a/tools/jfuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -26,6 +26,7 @@
os.path.realpath(__file__))))
from common.common import FatalError
+from common.common import GetEnvVariableOrError
from common.common import GetJackClassPath
from common.common import RetCode
from common.common import RunCommand
@@ -54,6 +55,7 @@
self._results_dir = None
self._dexfuzz_dir = None
self._inputs_dir = None
+ self._dexfuzz_env = None
def __enter__(self):
"""On entry, enters new temp directory after saving current directory.
@@ -68,7 +70,14 @@
if self._results_dir is None or self._dexfuzz_dir is None or \
self._inputs_dir is None:
raise FatalError('Cannot obtain temp directory')
+ self._dexfuzz_env = os.environ.copy()
+ self._dexfuzz_env['ANDROID_DATA'] = self._dexfuzz_dir
+ top = GetEnvVariableOrError('ANDROID_BUILD_TOP')
+ self._dexfuzz_env['PATH'] = (top + '/art/tools/bisection_search:' +
+ self._dexfuzz_env['PATH'])
os.chdir(self._dexfuzz_dir)
+ os.mkdir('divergent_programs')
+ os.mkdir('bisection_outputs')
return self
def __exit__(self, etype, evalue, etraceback):
@@ -110,15 +119,15 @@
def RunDexFuzz(self):
"""Starts the DexFuzz testing."""
os.chdir(self._dexfuzz_dir)
- os.environ['ANDROID_DATA'] = self._dexfuzz_dir
dexfuzz_args = ['--inputs=' + self._inputs_dir, '--execute',
'--execute-class=Test', '--repeat=' + str(self._num_tests),
- '--dump-output', '--interpreter', '--optimizing']
+ '--dump-output', '--interpreter', '--optimizing',
+ '--bisection-search']
if self._device is not None:
dexfuzz_args += ['--device=' + self._device, '--allarm']
else:
dexfuzz_args += ['--host'] # Assume host otherwise.
- check_call(['dexfuzz'] + dexfuzz_args)
+ check_call(['dexfuzz'] + dexfuzz_args, env=self._dexfuzz_env)
# TODO: summarize findings.