summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/compiled_method.h4
-rw-r--r--compiler/debug/elf_debug_info_writer.h2
-rw-r--r--compiler/dex/verification_results.cc57
-rw-r--r--compiler/dex/verification_results.h17
-rw-r--r--compiler/driver/compiler_driver.cc65
-rw-r--r--compiler/driver/compiler_driver.h26
-rw-r--r--compiler/driver/compiler_driver_test.cc2
-rw-r--r--compiler/image_writer.cc3
-rw-r--r--compiler/intrinsics_list.h6
-rw-r--r--compiler/linker/relative_patcher_test.h3
-rw-r--r--compiler/optimizing/code_generator_arm.cc14
-rw-r--r--compiler/optimizing/code_generator_arm.h5
-rw-r--r--compiler/optimizing/code_generator_arm64.cc14
-rw-r--r--compiler/optimizing/code_generator_arm64.h7
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc256
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h6
-rw-r--r--compiler/optimizing/code_generator_mips.cc12
-rw-r--r--compiler/optimizing/code_generator_mips.h4
-rw-r--r--compiler/optimizing/code_generator_mips64.cc2
-rw-r--r--compiler/optimizing/code_generator_x86.cc16
-rw-r--r--compiler/optimizing/code_generator_x86.h3
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc16
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/escape.cc49
-rw-r--r--compiler/optimizing/escape.h23
-rw-r--r--compiler/optimizing/instruction_builder.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc63
-rw-r--r--compiler/optimizing/intrinsics_arm.cc6
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc6
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc11
-rw-r--r--compiler/optimizing/intrinsics_mips.cc6
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc6
-rw-r--r--compiler/optimizing/intrinsics_x86.cc6
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc6
-rw-r--r--compiler/optimizing/load_store_elimination.cc29
-rw-r--r--compiler/optimizing/nodes.h27
-rw-r--r--compiler/optimizing/sharpening.cc2
-rw-r--r--compiler/utils/atomic_method_ref_map-inl.h83
-rw-r--r--compiler/utils/atomic_method_ref_map.h71
-rw-r--r--compiler/utils/atomic_method_ref_map_test.cc71
-rw-r--r--compiler/utils/string_reference_test.cc19
-rw-r--r--compiler/utils/test_dex_file_builder_test.cc3
-rw-r--r--compiler/verifier_deps_test.cc26
44 files changed, 754 insertions, 306 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index b883e0881a..db55ea0ef7 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -349,6 +349,7 @@ art_cc_test {
"optimizing/ssa_test.cc",
"optimizing/stack_map_test.cc",
"optimizing/suspend_check_test.cc",
+ "utils/atomic_method_ref_map_test.cc",
"utils/dedupe_set_test.cc",
"utils/intrusive_forward_list_test.cc",
"utils/string_reference_test.cc",
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 174e85e1bf..bbf9eee0e5 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -315,11 +315,11 @@ class LinkerPatch {
return target_dex_file_;
}
- uint32_t TargetStringIndex() const {
+ dex::StringIndex TargetStringIndex() const {
DCHECK(patch_type_ == Type::kString ||
patch_type_ == Type::kStringRelative ||
patch_type_ == Type::kStringBssEntry);
- return string_idx_;
+ return dex::StringIndex(string_idx_);
}
const DexFile* TargetDexCacheDexFile() const {
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index 0a4f094494..30d4b47c6a 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -53,7 +53,7 @@ static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
for (uint32_t i = 0; i < parameters_size; ++i) {
uint32_t id = DecodeUnsignedLeb128P1(&stream);
- names.push_back(mi->dex_file->StringDataByIdx(id));
+ names.push_back(mi->dex_file->StringDataByIdx(dex::StringIndex(id)));
}
}
}
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 3fb10d89dd..669d8cd991 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -23,6 +23,7 @@
#include "driver/compiler_options.h"
#include "thread.h"
#include "thread-inl.h"
+#include "utils/atomic_method_ref_map-inl.h"
#include "verified_method.h"
#include "verifier/method_verifier-inl.h"
@@ -35,8 +36,11 @@ VerificationResults::VerificationResults(const CompilerOptions* compiler_options
VerificationResults::~VerificationResults() {
WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
- DeleteResults(preregistered_dex_files_);
STLDeleteValues(&verified_methods_);
+ atomic_verified_methods_.Visit([](const MethodReference& ref ATTRIBUTE_UNUSED,
+ const VerifiedMethod* method) {
+ delete method;
+ });
}
void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
@@ -49,16 +53,17 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
// We'll punt this later.
return;
}
- bool inserted;
- DexFileMethodArray* const array = GetMethodArray(ref.dex_file);
+ AtomicMap::InsertResult result = atomic_verified_methods_.Insert(ref,
+ /*expected*/ nullptr,
+ verified_method.get());
const VerifiedMethod* existing = nullptr;
- if (array != nullptr) {
- DCHECK(array != nullptr);
- Atomic<const VerifiedMethod*>* slot = &(*array)[ref.dex_method_index];
- inserted = slot->CompareExchangeStrongSequentiallyConsistent(nullptr, verified_method.get());
+ bool inserted;
+ if (result != AtomicMap::kInsertResultInvalidDexFile) {
+ inserted = (result == AtomicMap::kInsertResultSuccess);
if (!inserted) {
- existing = slot->LoadSequentiallyConsistent();
- DCHECK_NE(verified_method.get(), existing);
+ // Rare case.
+ CHECK(atomic_verified_methods_.Get(ref, &existing));
+ CHECK_NE(verified_method.get(), existing);
}
} else {
WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
@@ -89,9 +94,9 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
}
const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) {
- DexFileMethodArray* array = GetMethodArray(ref.dex_file);
- if (array != nullptr) {
- return (*array)[ref.dex_method_index].LoadRelaxed();
+ const VerifiedMethod* ret = nullptr;
+ if (atomic_verified_methods_.Get(ref, &ret)) {
+ return ret;
}
ReaderMutexLock mu(Thread::Current(), verified_methods_lock_);
auto it = verified_methods_.find(ref);
@@ -124,10 +129,8 @@ bool VerificationResults::IsCandidateForCompilation(MethodReference&,
return true;
}
-void VerificationResults::PreRegisterDexFile(const DexFile* dex_file) {
- CHECK(preregistered_dex_files_.find(dex_file) == preregistered_dex_files_.end())
- << dex_file->GetLocation();
- DexFileMethodArray array(dex_file->NumMethodIds());
+void VerificationResults::AddDexFile(const DexFile* dex_file) {
+ atomic_verified_methods_.AddDexFile(dex_file);
WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
// There can be some verified methods that are already registered for the dex_file since we set
// up well known classes earlier. Remove these and put them in the array so that we don't
@@ -135,31 +138,13 @@ void VerificationResults::PreRegisterDexFile(const DexFile* dex_file) {
for (auto it = verified_methods_.begin(); it != verified_methods_.end(); ) {
MethodReference ref = it->first;
if (ref.dex_file == dex_file) {
- array[ref.dex_method_index].StoreSequentiallyConsistent(it->second);
+ CHECK(atomic_verified_methods_.Insert(ref, nullptr, it->second) ==
+ AtomicMap::kInsertResultSuccess);
it = verified_methods_.erase(it);
} else {
++it;
}
}
- preregistered_dex_files_.emplace(dex_file, std::move(array));
-}
-
-void VerificationResults::DeleteResults(DexFileResults& array) {
- for (auto& pair : array) {
- for (Atomic<const VerifiedMethod*>& method : pair.second) {
- delete method.LoadSequentiallyConsistent();
- }
- }
- array.clear();
-}
-
-VerificationResults::DexFileMethodArray* VerificationResults::GetMethodArray(
- const DexFile* dex_file) {
- auto it = preregistered_dex_files_.find(dex_file);
- if (it != preregistered_dex_files_.end()) {
- return &it->second;
- }
- return nullptr;
}
} // namespace art
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index b3356e0e10..ea38f4d537 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -26,6 +26,7 @@
#include "class_reference.h"
#include "method_reference.h"
#include "safe_map.h"
+#include "utils/atomic_method_ref_map.h"
namespace art {
@@ -54,26 +55,22 @@ class VerificationResults {
bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags);
- // Add a dex file array to the preregistered_dex_files_ array. These dex files require no locks to
- // access. It is not safe to call if other callers are calling GetVerifiedMethod concurrently.
- void PreRegisterDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
+ // Add a dex file to enable using the atomic map.
+ void AddDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
private:
// Verified methods. The method array is fixed to avoid needing a lock to extend it.
- using DexFileMethodArray = dchecked_vector<Atomic<const VerifiedMethod*>>;
- using DexFileResults = std::map<const DexFile*, DexFileMethodArray>;
+ using AtomicMap = AtomicMethodRefMap<const VerifiedMethod*>;
using VerifiedMethodMap = SafeMap<MethodReference,
const VerifiedMethod*,
MethodReferenceComparator>;
- static void DeleteResults(DexFileResults& array);
-
- DexFileMethodArray* GetMethodArray(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
VerifiedMethodMap verified_methods_ GUARDED_BY(verified_methods_lock_);
const CompilerOptions* const compiler_options_;
- // Dex2oat can preregister dex files to avoid locking when calling GetVerifiedMethod.
- DexFileResults preregistered_dex_files_;
+ // Dex2oat can add dex files to atomic_verified_methods_ to avoid locking when calling
+ // GetVerifiedMethod.
+ AtomicMap atomic_verified_methods_;
ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ad75ec4604..6b62110b91 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -71,6 +71,7 @@
#include "thread_pool.h"
#include "trampolines/trampoline_compiler.h"
#include "transaction.h"
+#include "utils/atomic_method_ref_map-inl.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "utils/swap_space.h"
#include "vdex_file.h"
@@ -287,8 +288,6 @@ CompilerDriver::CompilerDriver(
instruction_set_features_(instruction_set_features),
requires_constructor_barrier_lock_("constructor barrier lock"),
compiled_classes_lock_("compiled classes lock"),
- compiled_methods_lock_("compiled method lock"),
- compiled_methods_(MethodTable::key_compare()),
non_relative_linker_patch_count_(0u),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
@@ -326,12 +325,12 @@ CompilerDriver::~CompilerDriver() {
MutexLock mu(self, compiled_classes_lock_);
STLDeleteValues(&compiled_classes_);
}
- {
- MutexLock mu(self, compiled_methods_lock_);
- for (auto& pair : compiled_methods_) {
- CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, pair.second);
+ compiled_methods_.Visit([this](const MethodReference& ref ATTRIBUTE_UNUSED,
+ CompiledMethod* method) {
+ if (method != nullptr) {
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, method);
}
- }
+ });
compiler_->UnInit();
}
@@ -575,8 +574,7 @@ static void CompileMethod(Thread* self,
const DexFile& dex_file,
optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled,
- Handle<mirror::DexCache> dex_cache)
- REQUIRES(!driver->compiled_methods_lock_) {
+ Handle<mirror::DexCache> dex_cache) {
DCHECK(driver != nullptr);
CompiledMethod* compiled_method = nullptr;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
@@ -842,9 +840,9 @@ static void ResolveConstStrings(Handle<mirror::DexCache> dex_cache,
switch (inst->Opcode()) {
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO: {
- uint32_t string_index = (inst->Opcode() == Instruction::CONST_STRING)
+ dex::StringIndex string_index((inst->Opcode() == Instruction::CONST_STRING)
? inst->VRegB_21c()
- : inst->VRegB_31c();
+ : inst->VRegB_31c());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
break;
@@ -940,6 +938,13 @@ void CompilerDriver::PreCompile(jobject class_loader,
TimingLogger* timings) {
CheckThreadPools();
+ for (const DexFile* dex_file : dex_files) {
+ // Can be already inserted if the caller is CompileOne. This happens for gtests.
+ if (!compiled_methods_.HaveDexFile(dex_file)) {
+ compiled_methods_.AddDexFile(dex_file);
+ }
+ }
+
LoadImageClasses(timings);
VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
@@ -2616,30 +2621,15 @@ void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref,
size_t non_relative_linker_patch_count) {
DCHECK(GetCompiledMethod(method_ref) == nullptr)
<< method_ref.dex_file->PrettyMethod(method_ref.dex_method_index);
- {
- MutexLock mu(Thread::Current(), compiled_methods_lock_);
- compiled_methods_.Put(method_ref, compiled_method);
- non_relative_linker_patch_count_ += non_relative_linker_patch_count;
- }
+ MethodTable::InsertResult result = compiled_methods_.Insert(method_ref,
+ /*expected*/ nullptr,
+ compiled_method);
+ CHECK(result == MethodTable::kInsertResultSuccess);
+ non_relative_linker_patch_count_.FetchAndAddRelaxed(non_relative_linker_patch_count);
DCHECK(GetCompiledMethod(method_ref) != nullptr)
<< method_ref.dex_file->PrettyMethod(method_ref.dex_method_index);
}
-void CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) {
- CompiledMethod* compiled_method = nullptr;
- {
- MutexLock mu(Thread::Current(), compiled_methods_lock_);
- auto it = compiled_methods_.find(method_ref);
- if (it != compiled_methods_.end()) {
- compiled_method = it->second;
- compiled_methods_.erase(it);
- }
- }
- if (compiled_method != nullptr) {
- CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, compiled_method);
- }
-}
-
CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const {
MutexLock mu(Thread::Current(), compiled_classes_lock_);
ClassTable::const_iterator it = compiled_classes_.find(ref);
@@ -2678,13 +2668,9 @@ void CompilerDriver::RecordClassStatus(ClassReference ref, mirror::Class::Status
}
CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const {
- MutexLock mu(Thread::Current(), compiled_methods_lock_);
- MethodTable::const_iterator it = compiled_methods_.find(ref);
- if (it == compiled_methods_.end()) {
- return nullptr;
- }
- CHECK(it->second != nullptr);
- return it->second;
+ CompiledMethod* compiled_method = nullptr;
+ compiled_methods_.Get(ref, &compiled_method);
+ return compiled_method;
}
bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
@@ -2713,8 +2699,7 @@ bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
}
size_t CompilerDriver::GetNonRelativeLinkerPatchCount() const {
- MutexLock mu(Thread::Current(), compiled_methods_lock_);
- return non_relative_linker_patch_count_;
+ return non_relative_linker_patch_count_.LoadRelaxed();
}
void CompilerDriver::SetRequiresConstructorBarrier(Thread* self,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 7418b006ef..cc50197140 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -41,6 +41,7 @@
#include "runtime.h"
#include "safe_map.h"
#include "thread_pool.h"
+#include "utils/atomic_method_ref_map.h"
#include "utils/dex_cache_arrays_layout.h"
namespace art {
@@ -131,7 +132,7 @@ class CompilerDriver {
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
+ REQUIRES(!compiled_classes_lock_, !dex_to_dex_references_lock_);
VerificationResults* GetVerificationResults() const {
DCHECK(Runtime::Current()->IsAotCompiler());
@@ -168,18 +169,12 @@ class CompilerDriver {
CompiledClass* GetCompiledClass(ClassReference ref) const
REQUIRES(!compiled_classes_lock_);
- CompiledMethod* GetCompiledMethod(MethodReference ref) const
- REQUIRES(!compiled_methods_lock_);
- size_t GetNonRelativeLinkerPatchCount() const
- REQUIRES(!compiled_methods_lock_);
-
+ CompiledMethod* GetCompiledMethod(MethodReference ref) const;
+ size_t GetNonRelativeLinkerPatchCount() const;
// Add a compiled method.
void AddCompiledMethod(const MethodReference& method_ref,
CompiledMethod* const compiled_method,
- size_t non_relative_linker_patch_count)
- REQUIRES(!compiled_methods_lock_);
- // Remove and delete a compiled method.
- void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_);
+ size_t non_relative_linker_patch_count);
void SetRequiresConstructorBarrier(Thread* self,
const DexFile* dex_file,
@@ -519,18 +514,15 @@ class CompilerDriver {
mutable Mutex compiled_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ClassTable compiled_classes_ GUARDED_BY(compiled_classes_lock_);
- typedef SafeMap<const MethodReference, CompiledMethod*, MethodReferenceComparator> MethodTable;
-
- public:
- // Lock is public so that non-members can have lock annotations.
- mutable Mutex compiled_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ typedef AtomicMethodRefMap<CompiledMethod*> MethodTable;
private:
// All method references that this compiler has compiled.
- MethodTable compiled_methods_ GUARDED_BY(compiled_methods_lock_);
+ MethodTable compiled_methods_;
+
// Number of non-relative patches in all compiled methods. These patches need space
// in the .oat_patches ELF section if requested in the compiler options.
- size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_);
+ Atomic<size_t> non_relative_linker_patch_count_;
// If image_ is true, specifies the classes that will be included in the image.
// Note if image_classes_ is null, all classes are included in the image.
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index f40c71283b..12684c09c0 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -111,7 +111,7 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(soa.Self(), dex);
EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
- const mirror::String* string = dex_cache->GetResolvedString(i);
+ const mirror::String* string = dex_cache->GetResolvedString(dex::StringIndex(i));
EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
}
EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a706697496..fb5560b124 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1451,7 +1451,8 @@ void ImageWriter::CalculateNewObjectOffsets() {
InternTable* const intern_table = runtime->GetInternTable();
for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) {
uint32_t utf16_length;
- const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(i, &utf16_length);
+ const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i),
+ &utf16_length);
mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data).Ptr();
TryAssignBinSlot(work_stack, string, oat_index);
}
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index 555baf6de9..9bd25d8484 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -117,6 +117,12 @@
V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromBytes", "([BIII)Ljava/lang/String;") \
V(StringNewStringFromChars, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromChars", "(II[C)Ljava/lang/String;") \
V(StringNewStringFromString, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromString", "(Ljava/lang/String;)Ljava/lang/String;") \
+ V(StringBufferAppend, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuffer;", "append", "(Ljava/lang/String;)Ljava/lang/StringBuffer;") \
+ V(StringBufferLength, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kNoThrow, "Ljava/lang/StringBuffer;", "length", "()I") \
+ V(StringBufferToString, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuffer;", "toString", "()Ljava/lang/String;") \
+ V(StringBuilderAppend, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;") \
+ V(StringBuilderLength, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/StringBuilder;", "length", "()I") \
+ V(StringBuilderToString, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "toString", "()Ljava/lang/String;") \
V(UnsafeCASInt, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "compareAndSwapInt", "(Ljava/lang/Object;JII)Z") \
V(UnsafeCASLong, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "compareAndSwapLong", "(Ljava/lang/Object;JJJ)Z") \
V(UnsafeCASObject, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "compareAndSwapObject", "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z") \
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 015178980c..233daf4a39 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -163,7 +163,8 @@ class RelativePatcherTest : public testing::Test {
offset + patch.LiteralOffset(),
target_offset);
} else if (patch.GetType() == LinkerPatch::Type::kStringRelative) {
- uint32_t target_offset = string_index_to_offset_map_.Get(patch.TargetStringIndex());
+ uint32_t target_offset =
+ string_index_to_offset_map_.Get(patch.TargetStringIndex().index_);
patcher_->PatchPcRelativeReference(&patched_code_,
patch,
offset + patch.LiteralOffset(),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8a6b94e0ea..1f5981682b 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -430,7 +430,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex();
+ const uint32_t string_index = load->GetStringIndex().index_;
Register out = locations->Out().AsRegister<Register>();
Register temp = locations->GetTemp(0).AsRegister<Register>();
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -5946,7 +5946,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
__ BindTrackedLabel(&labels->movw_label);
__ movw(out, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -5965,7 +5965,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
Register temp = locations->GetTemp(0).AsRegister<Register>();
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
__ BindTrackedLabel(&labels->movw_label);
__ movw(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -5994,7 +5994,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
- __ LoadImmediate(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
@@ -7340,7 +7340,7 @@ CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativePatch(
}
Literal* CodeGeneratorARM::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- uint32_t string_index) {
+ dex::StringIndex string_index) {
return boot_image_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -7364,7 +7364,7 @@ Literal* CodeGeneratorARM::DeduplicateDexCacheAddressLiteral(uint32_t address) {
}
Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
- uint32_t string_index) {
+ dex::StringIndex string_index) {
jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
@@ -7436,7 +7436,7 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
uint32_t literal_offset = literal->GetLabel()->Position();
linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
target_string.dex_file,
- target_string.string_index));
+ target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index a4ccb57c1f..8230512825 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -485,11 +485,12 @@ class CodeGeneratorARM : public CodeGenerator {
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
- Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file, uint32_t string_index);
+ Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index);
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
Literal* DeduplicateDexCacheAddressLiteral(uint32_t address);
- Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, uint32_t string_index);
+ Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a78b3da455..ab6a33fbd9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -349,7 +349,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
__ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -4132,7 +4132,7 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
- const DexFile& dex_file, uint32_t string_index) {
+ const DexFile& dex_file, dex::StringIndex string_index) {
return boot_image_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4158,7 +4158,7 @@ vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateDexCacheAddress
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
- const DexFile& dex_file, uint32_t string_index) {
+ const DexFile& dex_file, dex::StringIndex string_index) {
jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
@@ -4246,7 +4246,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
linker_patches->push_back(LinkerPatch::StringPatch(literal->GetOffset(),
target_string.dex_file,
- target_string.string_index));
+ target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
@@ -4594,7 +4594,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex();
+ uint32_t string_index = load->GetStringIndex().index_;
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
@@ -4612,7 +4612,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBssEntry: {
// Add ADRP with its PC-relative String .bss entry patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex();
+ uint32_t string_index = load->GetStringIndex().index_;
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireX();
@@ -4653,7 +4653,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
// TODO: Re-add the compiler code to do string dex cache lookup again.
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), out.GetCode());
- __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex());
+ __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 1545fd3860..868c8b07ed 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -560,14 +560,15 @@ class CodeGeneratorARM64 : public CodeGenerator {
uint32_t element_offset,
vixl::aarch64::Label* adrp_label = nullptr);
- vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- uint32_t string_index);
+ vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageStringLiteral(
+ const DexFile& dex_file,
+ dex::StringIndex string_index);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
dex::TypeIndex type_index);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
- uint32_t string_index);
+ dex::StringIndex string_index);
void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 1b5138f6f2..1ca439e8cf 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -1322,11 +1322,10 @@ void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
}
break;
- // TODO(VIXL): https://android-review.googlesource.com/#/c/252265/
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -1346,13 +1345,20 @@ void InstructionCodeGeneratorARMVIXL::HandleCondition(HCondition* cond) {
return;
}
+ Location right = cond->GetLocations()->InAt(1);
vixl32::Register out = OutputRegister(cond);
vixl32::Label true_label, false_label;
switch (cond->InputAt(0)->GetType()) {
default: {
// Integer case.
- __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1));
+ if (right.IsRegister()) {
+ __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1));
+ } else {
+ DCHECK(right.IsConstant());
+ __ Cmp(InputRegisterAt(cond, 0),
+ CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ }
AssemblerAccurateScope aas(GetVIXLAssembler(),
kArmInstrMaxSizeInBytes * 3u,
CodeBufferCheckScope::kMaximumSize);
@@ -2776,15 +2782,8 @@ void InstructionCodeGeneratorARMVIXL::VisitRem(HRem* rem) {
void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -3956,15 +3955,8 @@ void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderARMVIXL::VisitNullCheck(HNullCheck* instruction) {
- // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4697,8 +4689,9 @@ void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instructi
}
void LocationsBuilderARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) {
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/ and related.
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5156,7 +5149,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
// TODO: Re-add the compiler code to do string dex cache lookup again.
DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+ __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
@@ -5200,14 +5193,27 @@ void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) {
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
-static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
- return kEmitCompilerReadBarrier &&
- (kUseBakerReadBarrier ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck);
+// Temp is used for read barrier.
+static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
+ if (kEmitCompilerReadBarrier &&
+ (kUseBakerReadBarrier ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ return 1;
+ }
+ return 0;
}
+// Interface case has 3 temps, one for holding the number of interfaces, one for the current
+// interface pointer, one for loading the current interface.
+// The other checks have one temp for loading the object's class.
+static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+ if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
+ return 3;
+ }
+ return 1 + NumberOfInstanceOfTemps(type_check_kind);
+}
void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
@@ -5238,11 +5244,7 @@ void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
// The "out" register is used as a temporary, so it overlaps with the inputs.
// Note that TypeCheckSlowPathARM uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
- // When read barriers are enabled, we need a temporary register for
- // some cases.
- if (TypeCheckNeedsATemporary(type_check_kind)) {
- locations->AddTemp(Location::RequiresRegister());
- }
+ locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
}
void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
@@ -5253,9 +5255,9 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
vixl32::Register cls = InputRegisterAt(instruction, 1);
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(instruction);
- Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
- locations->GetTemp(0) :
- Location::NoLocation();
+ const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ DCHECK_LE(num_temps, 1u);
+ Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -5276,7 +5278,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
out_loc,
obj_loc,
class_offset,
- maybe_temp_loc);
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
__ Cmp(out, cls);
// Classes must be equal for the instanceof to succeed.
__ B(ne, &zero);
@@ -5291,13 +5294,18 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
out_loc,
obj_loc,
class_offset,
- maybe_temp_loc);
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
vixl32::Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ out = out->super_class_
- GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ super_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done, /* far_target */ false);
__ Cmp(out, cls);
@@ -5315,14 +5323,19 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
out_loc,
obj_loc,
class_offset,
- maybe_temp_loc);
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Walk over the class hierarchy to find a match.
vixl32::Label loop, success;
__ Bind(&loop);
__ Cmp(out, cls);
__ B(eq, &success);
// /* HeapReference<Class> */ out = out->super_class_
- GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ super_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
__ CompareAndBranchIfNonZero(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -5340,14 +5353,19 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
out_loc,
obj_loc,
class_offset,
- maybe_temp_loc);
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Do an exact check.
vixl32::Label exact_check;
__ Cmp(out, cls);
__ B(eq, &exact_check);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ out = out->component_type_
- GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
+ GenerateReferenceLoadOneRegister(instruction,
+ out_loc,
+ component_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done, /* far_target */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
@@ -5360,12 +5378,14 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
}
case TypeCheckKind::kArrayCheck: {
+ // No read barrier since the slow path will retry upon failure.
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
- maybe_temp_loc);
+ maybe_temp_loc,
+ kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
@@ -5449,13 +5469,7 @@ void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- // Note that TypeCheckSlowPathARM uses this "temp" register too.
- locations->AddTemp(Location::RequiresRegister());
- // When read barriers are enabled, we need an additional temporary
- // register for some cases.
- if (TypeCheckNeedsATemporary(type_check_kind)) {
- locations->AddTemp(Location::RequiresRegister());
- }
+ locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
}
void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
@@ -5466,20 +5480,31 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
vixl32::Register cls = InputRegisterAt(instruction, 1);
Location temp_loc = locations->GetTemp(0);
vixl32::Register temp = RegisterFrom(temp_loc);
- Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
- locations->GetTemp(1) :
- Location::NoLocation();
- uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
- uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
- uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
-
- bool is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
+ const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ DCHECK_LE(num_temps, 3u);
+ Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
+ Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation();
+ const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
+ const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
+ const uint32_t object_array_data_offset =
+ mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
+ bool is_type_check_slow_path_fatal = false;
+ if (!kEmitCompilerReadBarrier) {
+ is_type_check_slow_path_fatal =
+ (type_check_kind == TypeCheckKind::kExactCheck ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+ !instruction->CanThrowIntoCatchBlock();
+ }
SlowPathCodeARMVIXL* type_check_slow_path =
new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
is_type_check_slow_path_fatal);
@@ -5491,12 +5516,17 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
__ CompareAndBranchIfZero(obj, &done, /* far_target */ false);
}
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kArrayCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
+
__ Cmp(temp, cls);
// Jump to slow path for throwing the exception or doing a
// more involved array check.
@@ -5505,12 +5535,24 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
+
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
vixl32::Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
- GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ super_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -5523,6 +5565,14 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
+
// Walk over the class hierarchy to find a match.
vixl32::Label loop;
__ Bind(&loop);
@@ -5530,7 +5580,11 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
__ B(eq, &done);
// /* HeapReference<Class> */ temp = temp->super_class_
- GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ super_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -5541,13 +5595,25 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
+
// Do an exact check.
__ Cmp(temp, cls);
__ B(eq, &done);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
- GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
+ GenerateReferenceLoadOneRegister(instruction,
+ temp_loc,
+ component_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
// If the component type is null, jump to the slow path to throw the exception.
__ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
// Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
@@ -5559,10 +5625,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
}
case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- // We always go into the type check slow path for the unresolved
- // and interface check cases.
- //
+ // We always go into the type check slow path for the unresolved check case.
// We cannot directly call the CheckCast runtime entry point
// without resorting to a type checking slow path here (i.e. by
// calling InvokeRuntime directly), as it would require to
@@ -5570,8 +5633,45 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
+
__ B(type_check_slow_path->GetEntryLabel());
break;
+
+ case TypeCheckKind::kInterfaceCheck: {
+ // Avoid read barriers to improve performance of the fast path. We can not get false
+ // positives by doing this.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
+
+ // /* HeapReference<Class> */ temp = temp->iftable_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ temp_loc,
+ temp_loc,
+ iftable_offset,
+ maybe_temp2_loc,
+ kWithoutReadBarrier);
+ // Iftable is never null.
+ __ Ldr(RegisterFrom(maybe_temp2_loc), MemOperand(temp, array_length_offset));
+ // Loop through the iftable and check if any class matches.
+ vixl32::Label start_loop;
+ __ Bind(&start_loop);
+ __ CompareAndBranchIfZero(RegisterFrom(maybe_temp2_loc),
+ type_check_slow_path->GetEntryLabel());
+ __ Ldr(RegisterFrom(maybe_temp3_loc), MemOperand(temp, object_array_data_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(RegisterFrom(maybe_temp3_loc));
+ // Go to next interface.
+ __ Add(temp, temp, Operand::From(2 * kHeapReferenceSize));
+ __ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2);
+ // Compare the classes and continue the loop if they do not match.
+ __ Cmp(cls, RegisterFrom(maybe_temp3_loc));
+ __ B(ne, &start_loop);
+ break;
+ }
}
__ Bind(&done);
@@ -5862,7 +5962,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister(
HInstruction* instruction ATTRIBUTE_UNUSED,
Location out,
uint32_t offset,
- Location maybe_temp ATTRIBUTE_UNUSED) {
+ Location maybe_temp ATTRIBUTE_UNUSED,
+ ReadBarrierOption read_barrier_option ATTRIBUTE_UNUSED) {
vixl32::Register out_reg = RegisterFrom(out);
if (kEmitCompilerReadBarrier) {
TODO_VIXL32(FATAL);
@@ -5879,7 +5980,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters(
Location out,
Location obj,
uint32_t offset,
- Location maybe_temp ATTRIBUTE_UNUSED) {
+ Location maybe_temp ATTRIBUTE_UNUSED,
+ ReadBarrierOption read_barrier_option ATTRIBUTE_UNUSED) {
vixl32::Register out_reg = RegisterFrom(out);
vixl32::Register obj_reg = RegisterFrom(obj);
if (kEmitCompilerReadBarrier) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 89fef43e46..bd91127121 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -422,7 +422,8 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
void GenerateReferenceLoadOneRegister(HInstruction* instruction,
Location out,
uint32_t offset,
- Location maybe_temp);
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -437,7 +438,8 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
Location out,
Location obj,
uint32_t offset,
- Location maybe_temp);
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 8f94834333..572d900909 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -280,7 +280,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
InvokeRuntimeCallingConvention calling_convention;
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex();
+ const uint32_t string_index = load->GetStringIndex().index_;
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1047,7 +1047,7 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
target_string.dex_file,
- target_string.string_index));
+ target_string.string_index.index_));
}
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
@@ -1110,7 +1110,7 @@ Literal* CodeGeneratorMIPS::DeduplicateMethodCodeLiteral(MethodReference target_
}
Literal* CodeGeneratorMIPS::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- uint32_t string_index) {
+ dex::StringIndex string_index) {
return boot_image_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -5743,7 +5743,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
DCHECK(!kEmitCompilerReadBarrier);
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
return; // No dex cache slow path.
}
@@ -5759,7 +5759,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
__ LoadFromOffset(kLoadWord, out, out, 0);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
@@ -5775,7 +5775,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
// TODO: Re-add the compiler code to do string dex cache lookup again.
DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+ __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index e225d20094..2273e52b06 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
#include "code_generator.h"
+#include "dex_file_types.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
@@ -452,7 +453,8 @@ class CodeGeneratorMIPS : public CodeGenerator {
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
- Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file, uint32_t string_index);
+ Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index);
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 02b01c85e5..b5e98714e6 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -234,7 +234,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 51e902a824..12aa03c4af 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -225,7 +225,7 @@ class LoadStringSlowPathX86 : public SlowPathCode {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
__ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -4607,7 +4607,7 @@ void CodeGeneratorX86::RecordSimplePatch() {
void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
DCHECK(GetCompilerOptions().IsBootImage());
- string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+ string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
__ Bind(&string_patches_.back().label);
}
@@ -4618,7 +4618,7 @@ void CodeGeneratorX86::RecordTypePatch(HLoadClass* load_class) {
Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
DCHECK(!GetCompilerOptions().IsBootImage());
- string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+ string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
return &string_patches_.back().label;
}
@@ -6253,10 +6253,11 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
}
}
-Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index) {
+Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index) {
jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
// Add a patch entry and return the label.
- jit_string_patches_.emplace_back(dex_file, dex_index);
+ jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
@@ -6313,7 +6314,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
// TODO: Re-add the compiler code to do string dex cache lookup again.
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
- __ movl(calling_convention.GetRegisterAt(0), Immediate(load->GetStringIndex()));
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(load->GetStringIndex().index_));
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
@@ -7755,7 +7756,8 @@ void CodeGeneratorX86::MoveFromReturnRegister(Location target, Primitive::Type t
void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, info.index));
+ const auto& it = jit_string_roots_.find(StringReference(&info.dex_file,
+ dex::StringIndex(info.index)));
DCHECK(it != jit_string_roots_.end());
size_t index_in_table = it->second;
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 16ea6b55d6..2ae3670bed 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -20,6 +20,7 @@
#include "arch/x86/instruction_set_features_x86.h"
#include "base/enums.h"
#include "code_generator.h"
+#include "dex_file_types.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
@@ -414,7 +415,7 @@ class CodeGeneratorX86 : public CodeGenerator {
void RecordTypePatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index);
+ Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 34673138bf..22f7f6b52b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -300,7 +300,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+ const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
// Custom calling convention: RAX serves as both input and output.
__ movl(CpuRegister(RAX), Immediate(string_index));
x86_64_codegen->InvokeRuntime(kQuickResolveString,
@@ -1106,7 +1106,7 @@ void CodeGeneratorX86_64::RecordSimplePatch() {
void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
DCHECK(GetCompilerOptions().IsBootImage());
- string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+ string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
__ Bind(&string_patches_.back().label);
}
@@ -1117,7 +1117,7 @@ void CodeGeneratorX86_64::RecordTypePatch(HLoadClass* load_class) {
Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
DCHECK(!GetCompilerOptions().IsBootImage());
- string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+ string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
return &string_patches_.back().label;
}
@@ -5660,10 +5660,11 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
}
}
-Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index) {
+Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index) {
jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
// Add a patch entry and return the label.
- jit_string_patches_.emplace_back(dex_file, dex_index);
+ jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
@@ -5714,7 +5715,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
// TODO: Re-add the compiler code to do string dex cache lookup again.
// Custom calling convention: RAX serves as both input and output.
- __ movl(CpuRegister(RAX), Immediate(load->GetStringIndex()));
+ __ movl(CpuRegister(RAX), Immediate(load->GetStringIndex().index_));
codegen_->InvokeRuntime(kQuickResolveString,
load,
load->GetDexPc());
@@ -7111,7 +7112,8 @@ void CodeGeneratorX86_64::MoveInt64ToAddress(const Address& addr_low,
void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, info.index));
+ const auto& it = jit_string_roots_.find(StringReference(&info.dex_file,
+ dex::StringIndex(info.index)));
DCHECK(it != jit_string_roots_.end());
size_t index_in_table = it->second;
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 0f70b15787..2f41f73da6 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -412,7 +412,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordTypePatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index);
+ Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/escape.cc b/compiler/optimizing/escape.cc
index c80e19ef15..9df5bf1017 100644
--- a/compiler/optimizing/escape.cc
+++ b/compiler/optimizing/escape.cc
@@ -23,16 +23,19 @@ namespace art {
void CalculateEscape(HInstruction* reference,
bool (*no_escape)(HInstruction*, HInstruction*),
/*out*/ bool* is_singleton,
- /*out*/ bool* is_singleton_and_non_escaping) {
+ /*out*/ bool* is_singleton_and_not_returned,
+ /*out*/ bool* is_singleton_and_not_deopt_visible) {
// For references not allocated in the method, don't assume anything.
if (!reference->IsNewInstance() && !reference->IsNewArray()) {
*is_singleton = false;
- *is_singleton_and_non_escaping = false;
+ *is_singleton_and_not_returned = false;
+ *is_singleton_and_not_deopt_visible = false;
return;
}
// Assume the best until proven otherwise.
*is_singleton = true;
- *is_singleton_and_non_escaping = true;
+ *is_singleton_and_not_returned = true;
+ *is_singleton_and_not_deopt_visible = true;
// Visit all uses to determine if this reference can escape into the heap,
// a method call, an alias, etc.
for (const HUseListNode<HInstruction*>& use : reference->GetUses()) {
@@ -45,7 +48,8 @@ void CalculateEscape(HInstruction* reference,
// for the uncommon cases. Similarly, null checks are eventually eliminated for explicit
// allocations, but if we see one before it is simplified, assume an alias.
*is_singleton = false;
- *is_singleton_and_non_escaping = false;
+ *is_singleton_and_not_returned = false;
+ *is_singleton_and_not_deopt_visible = false;
return;
} else if (user->IsPhi() || user->IsSelect() || user->IsInvoke() ||
(user->IsInstanceFieldSet() && (reference == user->InputAt(1))) ||
@@ -56,7 +60,8 @@ void CalculateEscape(HInstruction* reference,
// The reference is merged to HPhi/HSelect, passed to a callee, or stored to heap.
// Hence, the reference is no longer the only name that can refer to its value.
*is_singleton = false;
- *is_singleton_and_non_escaping = false;
+ *is_singleton_and_not_returned = false;
+ *is_singleton_and_not_deopt_visible = false;
return;
} else if ((user->IsUnresolvedInstanceFieldGet() && (reference == user->InputAt(0))) ||
(user->IsUnresolvedInstanceFieldSet() && (reference == user->InputAt(0)))) {
@@ -64,37 +69,35 @@ void CalculateEscape(HInstruction* reference,
// Note that we could optimize this case and still perform some optimizations until
// we hit the unresolved access, but the conservative assumption is the simplest.
*is_singleton = false;
- *is_singleton_and_non_escaping = false;
+ *is_singleton_and_not_returned = false;
+ *is_singleton_and_not_deopt_visible = false;
return;
} else if (user->IsReturn()) {
- *is_singleton_and_non_escaping = false;
+ *is_singleton_and_not_returned = false;
}
}
- // Need for further analysis?
- if (!*is_singleton_and_non_escaping) {
- return;
- }
-
- // Look at the environment uses and if it's for HDeoptimize, it's treated the
- // same as a return which escapes at the end of executing the compiled code.
- // Other environment uses are fine, as long as all client optimizations that
- // rely on this informations are disabled for debuggable.
+ // Look at the environment uses if it's for HDeoptimize. Other environment uses are fine,
+ // as long as client optimizations that rely on this information are disabled for debuggable.
for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
HEnvironment* user = use.GetUser();
if (user->GetHolder()->IsDeoptimize()) {
- *is_singleton_and_non_escaping = false;
+ *is_singleton_and_not_deopt_visible = false;
break;
}
}
}
-bool IsNonEscapingSingleton(HInstruction* reference,
- bool (*no_escape)(HInstruction*, HInstruction*)) {
- bool is_singleton = true;
- bool is_singleton_and_non_escaping = true;
- CalculateEscape(reference, no_escape, &is_singleton, &is_singleton_and_non_escaping);
- return is_singleton_and_non_escaping;
+bool DoesNotEscape(HInstruction* reference, bool (*no_escape)(HInstruction*, HInstruction*)) {
+ bool is_singleton = false;
+ bool is_singleton_and_not_returned = false;
+ bool is_singleton_and_not_deopt_visible = false; // not relevant for escape
+ CalculateEscape(reference,
+ no_escape,
+ &is_singleton,
+ &is_singleton_and_not_returned,
+ &is_singleton_and_not_deopt_visible);
+ return is_singleton_and_not_returned;
}
} // namespace art
diff --git a/compiler/optimizing/escape.h b/compiler/optimizing/escape.h
index 6514843247..75e37b0551 100644
--- a/compiler/optimizing/escape.h
+++ b/compiler/optimizing/escape.h
@@ -31,9 +31,18 @@ class HInstruction;
* allocation. The method assigns true to parameter 'is_singleton' if the reference
* is the only name that can refer to its value during the lifetime of the method,
* meaning that the reference is not aliased with something else, is not stored to
- * heap memory, and not passed to another method. The method assigns true to parameter
- * 'is_singleton_and_non_escaping' if the reference is a singleton and is not returned
- * to the caller or used as an environment local of an HDeoptimize instruction.
+ * heap memory, and not passed to another method. In addition, the method assigns
+ * true to parameter 'is_singleton_and_not_returned' if the reference is a singleton
+ * and not returned to the caller and to parameter 'is_singleton_and_not_deopt_visible'
+ * if the reference is a singleton and not used as an environment local of an
+ * HDeoptimize instruction (clients of the final value must run after BCE to ensure
+ * all such instructions have been introduced already).
+ *
+ * Note that being visible to a HDeoptimize instruction does not count for ordinary
+ * escape analysis, since switching between compiled code and interpreted code keeps
+ * non escaping references restricted to the lifetime of the method and the thread
+ * executing it. This property only concerns optimizations that are interested in
+ * escape analysis with respect to the *compiled* code (such as LSE).
*
* When set, the no_escape function is applied to any use of the allocation instruction
* prior to any built-in escape analysis. This allows clients to define better escape
@@ -45,14 +54,14 @@ class HInstruction;
void CalculateEscape(HInstruction* reference,
bool (*no_escape)(HInstruction*, HInstruction*),
/*out*/ bool* is_singleton,
- /*out*/ bool* is_singleton_and_non_escaping);
+ /*out*/ bool* is_singleton_and_not_returned,
+ /*out*/ bool* is_singleton_and_not_deopt_visible);
/*
- * Convenience method for testing singleton and non-escaping property at once.
+ * Convenience method for testing the singleton and not returned properties at once.
* Callers should be aware that this method invokes the full analysis at each call.
*/
-bool IsNonEscapingSingleton(HInstruction* reference,
- bool (*no_escape)(HInstruction*, HInstruction*));
+bool DoesNotEscape(HInstruction* reference, bool (*no_escape)(HInstruction*, HInstruction*));
} // namespace art
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 40de5ce0cc..b97581beb3 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -2625,7 +2625,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
}
case Instruction::CONST_STRING: {
- uint32_t string_index = instruction.VRegB_21c();
+ dex::StringIndex string_index(instruction.VRegB_21c());
AppendInstruction(
new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
@@ -2633,7 +2633,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
}
case Instruction::CONST_STRING_JUMBO: {
- uint32_t string_index = instruction.VRegB_31c();
+ dex::StringIndex string_index(instruction.VRegB_31c());
AppendInstruction(
new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 85b461dcf6..658b80468e 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -16,6 +16,7 @@
#include "instruction_simplifier.h"
+#include "escape.h"
#include "intrinsics.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change-inl.h"
@@ -107,6 +108,8 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void SimplifyStringCharAt(HInvoke* invoke);
void SimplifyStringIsEmptyOrLength(HInvoke* invoke);
void SimplifyNPEOnArgN(HInvoke* invoke, size_t);
+ void SimplifyReturnThis(HInvoke* invoke);
+ void SimplifyAllocationIntrinsic(HInvoke* invoke);
void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
OptimizingCompilerStats* stats_;
@@ -1864,11 +1867,61 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke
// is provably non-null, we can clear the flag.
void InstructionSimplifierVisitor::SimplifyNPEOnArgN(HInvoke* invoke, size_t n) {
HInstruction* arg = invoke->InputAt(n);
- if (!arg->CanBeNull()) {
+ if (invoke->CanThrow() && !arg->CanBeNull()) {
invoke->SetCanThrow(false);
}
}
+// Methods that return "this" can replace the returned value with the receiver.
+void InstructionSimplifierVisitor::SimplifyReturnThis(HInvoke* invoke) {
+ if (invoke->HasUses()) {
+ HInstruction* receiver = invoke->InputAt(0);
+ invoke->ReplaceWith(receiver);
+ RecordSimplification();
+ }
+}
+
+// Helper method for StringBuffer escape analysis.
+static bool NoEscapeForStringBufferReference(HInstruction* reference, HInstruction* user) {
+ if (user->IsInvokeStaticOrDirect()) {
+ // Any constructor on StringBuffer is okay.
+ return user->AsInvokeStaticOrDirect()->GetResolvedMethod()->IsConstructor() &&
+ user->InputAt(0) == reference;
+ } else if (user->IsInvokeVirtual()) {
+ switch (user->AsInvokeVirtual()->GetIntrinsic()) {
+ case Intrinsics::kStringBufferLength:
+ case Intrinsics::kStringBufferToString:
+ DCHECK_EQ(user->InputAt(0), reference);
+ return true;
+ case Intrinsics::kStringBufferAppend:
+ // Returns "this", so only okay if no further uses.
+ DCHECK_EQ(user->InputAt(0), reference);
+ DCHECK_NE(user->InputAt(1), reference);
+ return !user->HasUses();
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+// Certain allocation intrinsics are not removed by dead code elimination
+// because of potentially throwing an OOM exception or other side effects.
+// This method removes such intrinsics when special circumstances allow.
+void InstructionSimplifierVisitor::SimplifyAllocationIntrinsic(HInvoke* invoke) {
+ if (!invoke->HasUses()) {
+ // Instruction has no uses. If unsynchronized, we can remove right away, safely ignoring
+ // the potential OOM of course. Otherwise, we must ensure the receiver object of this
+ // call does not escape since only thread-local synchronization may be removed.
+ bool is_synchronized = invoke->GetIntrinsic() == Intrinsics::kStringBufferToString;
+ HInstruction* receiver = invoke->InputAt(0);
+ if (!is_synchronized || DoesNotEscape(receiver, NoEscapeForStringBufferReference)) {
+ invoke->GetBlock()->RemoveInstruction(invoke);
+ RecordSimplification();
+ }
+ }
+}
+
void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
uint32_t dex_pc = invoke->GetDexPc();
HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
@@ -1926,6 +1979,14 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
case Intrinsics::kStringStringIndexOfAfter:
SimplifyNPEOnArgN(instruction, 1); // 0th has own NullCheck
break;
+ case Intrinsics::kStringBufferAppend:
+ case Intrinsics::kStringBuilderAppend:
+ SimplifyReturnThis(instruction);
+ break;
+ case Intrinsics::kStringBufferToString:
+ case Intrinsics::kStringBuilderToString:
+ SimplifyAllocationIntrinsic(instruction);
+ break;
case Intrinsics::kUnsafeLoadFence:
SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny);
break;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 8234b2467d..8f64faeac0 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2613,6 +2613,12 @@ UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOfAfter);
+UNIMPLEMENTED_INTRINSIC(ARM, StringBufferAppend);
+UNIMPLEMENTED_INTRINSIC(ARM, StringBufferLength);
+UNIMPLEMENTED_INTRINSIC(ARM, StringBufferToString);
+UNIMPLEMENTED_INTRINSIC(ARM, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(ARM, StringBuilderLength);
+UNIMPLEMENTED_INTRINSIC(ARM, StringBuilderToString);
// 1.8.
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddInt)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 17a97da6cc..d8a896e926 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2781,6 +2781,12 @@ UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOfAfter);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferAppend);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferLength);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferToString);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderLength);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderToString);
// 1.8.
UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddInt)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index c8e3534164..9e724474d0 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -677,7 +677,10 @@ static void GenUnsafeGet(HInvoke* invoke,
vixl32::Register trg_lo = LowRegisterFrom(trg_loc);
vixl32::Register trg_hi = HighRegisterFrom(trg_loc);
if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) {
- __ Ldrexd(trg_lo, trg_hi, MemOperand(base, offset));
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ const vixl32::Register temp_reg = temps.Acquire();
+ __ Add(temp_reg, base, offset);
+ __ Ldrexd(trg_lo, trg_hi, MemOperand(temp_reg));
} else {
__ Ldrd(trg_lo, trg_hi, MemOperand(base, offset));
}
@@ -2703,6 +2706,12 @@ UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongLowestOneBit)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferAppend);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferLength);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferToString);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderLength);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderToString);
// 1.8.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddInt)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 7c81588cda..9b5d7a02dd 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2497,6 +2497,12 @@ UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh)
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferLength);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferToString);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderLength);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderToString);
// 1.8.
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 2d4f417b14..5a998861eb 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1949,6 +1949,12 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh)
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferAppend);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferLength);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferToString);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderLength);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderToString);
// 1.8.
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 06ab46f536..922c3bcac9 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3331,6 +3331,12 @@ UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
+UNIMPLEMENTED_INTRINSIC(X86, StringBufferAppend);
+UNIMPLEMENTED_INTRINSIC(X86, StringBufferLength);
+UNIMPLEMENTED_INTRINSIC(X86, StringBufferToString);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderLength);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderToString);
// 1.8.
UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddInt)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 2ea8670100..05d270a4e6 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -3000,6 +3000,12 @@ UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferAppend);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferLength);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferToString);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderLength);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderToString);
// 1.8.
UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddInt)
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index edecf17f33..2856c3ea11 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -37,8 +37,13 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
: reference_(reference),
position_(pos),
is_singleton_(true),
- is_singleton_and_non_escaping_(true) {
- CalculateEscape(reference_, nullptr, &is_singleton_, &is_singleton_and_non_escaping_);
+ is_singleton_and_not_returned_(true),
+ is_singleton_and_not_deopt_visible_(true) {
+ CalculateEscape(reference_,
+ nullptr,
+ &is_singleton_,
+ &is_singleton_and_not_returned_,
+ &is_singleton_and_not_deopt_visible_);
}
HInstruction* GetReference() const {
@@ -59,19 +64,17 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
// Returns true if reference_ is a singleton and not returned to the caller or
// used as an environment local of an HDeoptimize instruction.
// The allocation and stores into reference_ may be eliminated for such cases.
- bool IsSingletonAndNonEscaping() const {
- return is_singleton_and_non_escaping_;
+ bool IsSingletonAndRemovable() const {
+ return is_singleton_and_not_returned_ && is_singleton_and_not_deopt_visible_;
}
private:
HInstruction* const reference_;
- const size_t position_; // position in HeapLocationCollector's ref_info_array_.
- bool is_singleton_; // can only be referred to by a single name in the method.
+ const size_t position_; // position in HeapLocationCollector's ref_info_array_.
- // reference_ is singleton and does not escape in the end either by
- // returning to the caller, or being used as an environment local of an
- // HDeoptimize instruction.
- bool is_singleton_and_non_escaping_;
+ bool is_singleton_; // can only be referred to by a single name in the method,
+ bool is_singleton_and_not_returned_; // and not returned to caller,
+ bool is_singleton_and_not_deopt_visible_; // and not used as an environment local of HDeoptimize.
DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
};
@@ -623,7 +626,7 @@ class LSEVisitor : public HGraphVisitor {
bool from_all_predecessors = true;
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
HInstruction* singleton_ref = nullptr;
- if (ref_info->IsSingletonAndNonEscaping()) {
+ if (ref_info->IsSingletonAndRemovable()) {
// We do more analysis of liveness when merging heap values for such
// cases since stores into such references may potentially be eliminated.
singleton_ref = ref_info->GetReference();
@@ -796,7 +799,7 @@ class LSEVisitor : public HGraphVisitor {
} else if (index != nullptr) {
// For array element, don't eliminate stores since it can be easily aliased
// with non-constant index.
- } else if (ref_info->IsSingletonAndNonEscaping()) {
+ } else if (ref_info->IsSingletonAndRemovable()) {
// Store into a field of a singleton that's not returned. The value cannot be
// killed due to aliasing/invocation. It can be redundant since future loads can
// directly get the value set by this instruction. The value can still be killed due to
@@ -970,7 +973,7 @@ class LSEVisitor : public HGraphVisitor {
// new_instance isn't used for field accesses. No need to process it.
return;
}
- if (ref_info->IsSingletonAndNonEscaping() &&
+ if (ref_info->IsSingletonAndRemovable() &&
!new_instance->IsFinalizable() &&
!new_instance->NeedsAccessCheck()) {
singleton_new_instances_.push_back(new_instance);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index eebc49c991..7ab04e15fc 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2072,6 +2072,8 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
#undef INSTRUCTION_TYPE_CHECK
// Returns whether the instruction can be moved within the graph.
+ // TODO: this method is used by LICM and GVN with possibly different
+ // meanings? split and rename?
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
@@ -3789,7 +3791,7 @@ class HInvoke : public HInstruction {
bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
- bool CanBeMoved() const OVERRIDE { return IsIntrinsic(); }
+ bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_;
@@ -4181,6 +4183,19 @@ class HInvokeVirtual FINAL : public HInvoke {
kVirtual),
vtable_index_(vtable_index) {}
+ bool CanBeNull() const OVERRIDE {
+ switch (GetIntrinsic()) {
+ case Intrinsics::kThreadCurrentThread:
+ case Intrinsics::kStringBufferAppend:
+ case Intrinsics::kStringBufferToString:
+ case Intrinsics::kStringBuilderAppend:
+ case Intrinsics::kStringBuilderToString:
+ return false;
+ default:
+ return HInvoke::CanBeNull();
+ }
+ }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
// TODO: Add implicit null checks in intrinsics.
return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
@@ -5698,7 +5713,7 @@ class HLoadString FINAL : public HInstruction {
};
HLoadString(HCurrentMethod* current_method,
- uint32_t string_index,
+ dex::StringIndex string_index,
const DexFile& dex_file,
uint32_t dex_pc)
: HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc),
@@ -5717,7 +5732,7 @@ class HLoadString FINAL : public HInstruction {
void SetLoadKindWithStringReference(LoadKind load_kind,
const DexFile& dex_file,
- uint32_t string_index) {
+ dex::StringIndex string_index) {
DCHECK(HasStringReference(load_kind));
load_data_.dex_file_ = &dex_file;
string_index_ = string_index;
@@ -5730,7 +5745,7 @@ class HLoadString FINAL : public HInstruction {
const DexFile& GetDexFile() const;
- uint32_t GetStringIndex() const {
+ dex::StringIndex GetStringIndex() const {
DCHECK(HasStringReference(GetLoadKind()) || /* For slow paths. */ !IsInDexCache());
return string_index_;
}
@@ -5744,7 +5759,7 @@ class HLoadString FINAL : public HInstruction {
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
- size_t ComputeHashCode() const OVERRIDE { return string_index_; }
+ size_t ComputeHashCode() const OVERRIDE { return string_index_.index_; }
// Will call the runtime if we need to load the string through
// the dex cache and the string is not guaranteed to be there yet.
@@ -5823,7 +5838,7 @@ class HLoadString FINAL : public HInstruction {
// String index serves also as the hash code and it's also needed for slow-paths,
// so it must not be overwritten with other load data.
- uint32_t string_index_;
+ dex::StringIndex string_index_;
union {
const DexFile* dex_file_; // For string reference.
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index a127708ab0..daf160a483 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -267,7 +267,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
DCHECK(!load_string->IsInDexCache());
const DexFile& dex_file = load_string->GetDexFile();
- uint32_t string_index = load_string->GetStringIndex();
+ dex::StringIndex string_index = load_string->GetStringIndex();
HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
uint64_t address = 0u; // String or dex cache element address.
diff --git a/compiler/utils/atomic_method_ref_map-inl.h b/compiler/utils/atomic_method_ref_map-inl.h
new file mode 100644
index 0000000000..70ea028b17
--- /dev/null
+++ b/compiler/utils/atomic_method_ref_map-inl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
+#define ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
+
+#include "atomic_method_ref_map.h"
+
+#include "dex_file-inl.h"
+
+namespace art {
+
+template <typename T>
+inline typename AtomicMethodRefMap<T>::InsertResult AtomicMethodRefMap<T>::Insert(
+ MethodReference ref,
+ const T& expected,
+ const T& desired) {
+ ElementArray* const array = GetArray(ref.dex_file);
+ if (array == nullptr) {
+ return kInsertResultInvalidDexFile;
+ }
+ return (*array)[ref.dex_method_index].CompareExchangeStrongSequentiallyConsistent(
+ expected, desired)
+ ? kInsertResultSuccess
+ : kInsertResultCASFailure;
+}
+
+template <typename T>
+inline bool AtomicMethodRefMap<T>::Get(MethodReference ref, T* out) const {
+ const ElementArray* const array = GetArray(ref.dex_file);
+ if (array == nullptr) {
+ return kInsertResultInvalidDexFile;
+ }
+ *out = (*array)[ref.dex_method_index].LoadRelaxed();
+ return true;
+}
+
+template <typename T>
+inline void AtomicMethodRefMap<T>::AddDexFile(const DexFile* dex_file) {
+ arrays_.Put(dex_file, std::move(ElementArray(dex_file->NumMethodIds())));
+}
+
+template <typename T>
+inline typename AtomicMethodRefMap<T>::ElementArray* AtomicMethodRefMap<T>::GetArray(
+ const DexFile* dex_file) {
+ auto it = arrays_.find(dex_file);
+ return (it != arrays_.end()) ? &it->second : nullptr;
+}
+
+template <typename T>
+inline const typename AtomicMethodRefMap<T>::ElementArray* AtomicMethodRefMap<T>::GetArray(
+ const DexFile* dex_file) const {
+ auto it = arrays_.find(dex_file);
+ return (it != arrays_.end()) ? &it->second : nullptr;
+}
+
+template <typename T> template <typename Visitor>
+inline void AtomicMethodRefMap<T>::Visit(const Visitor& visitor) {
+ for (auto& pair : arrays_) {
+ const DexFile* dex_file = pair.first;
+ const ElementArray& elements = pair.second;
+ for (size_t i = 0; i < elements.size(); ++i) {
+ visitor(MethodReference(dex_file, i), elements[i].LoadRelaxed());
+ }
+ }
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
diff --git a/compiler/utils/atomic_method_ref_map.h b/compiler/utils/atomic_method_ref_map.h
new file mode 100644
index 0000000000..11ab211817
--- /dev/null
+++ b/compiler/utils/atomic_method_ref_map.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
+#define ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
+
+#include "base/dchecked_vector.h"
+#include "method_reference.h"
+#include "safe_map.h"
+
+namespace art {
+
+class DexFile;
+
+// Used by CompilerCallbacks to track verification information from the Runtime.
+template <typename T>
+class AtomicMethodRefMap {
+ public:
+ explicit AtomicMethodRefMap() {}
+ ~AtomicMethodRefMap() {}
+
+ // Atomically swap the element in if the existing value matches expected.
+ enum InsertResult {
+ kInsertResultInvalidDexFile,
+ kInsertResultCASFailure,
+ kInsertResultSuccess,
+ };
+ InsertResult Insert(MethodReference ref, const T& expected, const T& desired);
+
+ // Retreive an item, returns false if the dex file is not added.
+ bool Get(MethodReference ref, T* out) const;
+
+ // Dex files must be added before method references belonging to them can be used as keys. Not
+ // thread safe.
+ void AddDexFile(const DexFile* dex_file);
+
+ bool HaveDexFile(const DexFile* dex_file) const {
+ return arrays_.find(dex_file) != arrays_.end();
+ }
+
+ // Visit all of the dex files and elements.
+ template <typename Visitor>
+ void Visit(const Visitor& visitor);
+
+ private:
+ // Verified methods. The method array is fixed to avoid needing a lock to extend it.
+ using ElementArray = dchecked_vector<Atomic<T>>;
+ using DexFileArrays = SafeMap<const DexFile*, ElementArray>;
+
+ const ElementArray* GetArray(const DexFile* dex_file) const;
+ ElementArray* GetArray(const DexFile* dex_file);
+
+ DexFileArrays arrays_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
diff --git a/compiler/utils/atomic_method_ref_map_test.cc b/compiler/utils/atomic_method_ref_map_test.cc
new file mode 100644
index 0000000000..9e5bf4bbe1
--- /dev/null
+++ b/compiler/utils/atomic_method_ref_map_test.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "atomic_method_ref_map-inl.h"
+
+#include <memory>
+
+#include "common_runtime_test.h"
+#include "dex_file-inl.h"
+#include "method_reference.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+
+class AtomicMethodRefMapTest : public CommonRuntimeTest {};
+
+TEST_F(AtomicMethodRefMapTest, RunTests) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("Interfaces"));
+ ASSERT_TRUE(dex != nullptr);
+ using Map = AtomicMethodRefMap<int>;
+ Map map;
+ int value = 123;
+ // Error case: Not already inserted.
+ EXPECT_FALSE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_FALSE(map.HaveDexFile(dex.get()));
+ // Error case: Dex file not registered.
+ EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, 1) == Map::kInsertResultInvalidDexFile);
+ map.AddDexFile(dex.get());
+ EXPECT_TRUE(map.HaveDexFile(dex.get()));
+ EXPECT_GT(dex->NumMethodIds(), 10u);
+ // After we have added the get should succeed but return the default value.
+ EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_EQ(value, 0);
+ // Actually insert an item and make sure we can retreive it.
+ static const int kInsertValue = 44;
+ EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, kInsertValue) ==
+ Map::kInsertResultSuccess);
+ EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_EQ(value, kInsertValue);
+ static const int kInsertValue2 = 123;
+ EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 2), 0, kInsertValue2) ==
+ Map::kInsertResultSuccess);
+ EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_EQ(value, kInsertValue);
+ EXPECT_TRUE(map.Get(MethodReference(dex.get(), 2), &value));
+ EXPECT_EQ(value, kInsertValue2);
+ // Error case: Incorrect expected value for CAS.
+ EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, kInsertValue + 1) ==
+ Map::kInsertResultCASFailure);
+ // Correctly overwrite the value and verify.
+ EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), kInsertValue, kInsertValue + 1) ==
+ Map::kInsertResultSuccess);
+ EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_EQ(value, kInsertValue + 1);
+}
+
+} // namespace art
diff --git a/compiler/utils/string_reference_test.cc b/compiler/utils/string_reference_test.cc
index 0fd9e5ba53..90335eb048 100644
--- a/compiler/utils/string_reference_test.cc
+++ b/compiler/utils/string_reference_test.cc
@@ -18,6 +18,7 @@
#include <memory>
+#include "dex_file_types.h"
#include "gtest/gtest.h"
#include "utils/test_dex_file_builder.h"
@@ -34,15 +35,15 @@ TEST(StringReference, ValueComparator) {
builder1.AddString("String1");
std::unique_ptr<const DexFile> dex_file1 = builder1.Build("dummy location 1");
ASSERT_EQ(1u, dex_file1->NumStringIds());
- ASSERT_STREQ("String1", dex_file1->GetStringData(dex_file1->GetStringId(0)));
- StringReference sr1(dex_file1.get(), 0);
+ ASSERT_STREQ("String1", dex_file1->GetStringData(dex_file1->GetStringId(dex::StringIndex(0))));
+ StringReference sr1(dex_file1.get(), dex::StringIndex(0));
TestDexFileBuilder builder2;
builder2.AddString("String2");
std::unique_ptr<const DexFile> dex_file2 = builder2.Build("dummy location 2");
ASSERT_EQ(1u, dex_file2->NumStringIds());
- ASSERT_STREQ("String2", dex_file2->GetStringData(dex_file2->GetStringId(0)));
- StringReference sr2(dex_file2.get(), 0);
+ ASSERT_STREQ("String2", dex_file2->GetStringData(dex_file2->GetStringId(dex::StringIndex(0))));
+ StringReference sr2(dex_file2.get(), dex::StringIndex(0));
StringReferenceValueComparator cmp;
EXPECT_TRUE(cmp(sr1, sr2)); // "String1" < "String2" is true.
@@ -80,7 +81,8 @@ TEST(StringReference, ValueComparator2) {
std::unique_ptr<const DexFile> dex_file1 = builder1.Build("dummy location 1");
ASSERT_EQ(arraysize(kDexFile1Strings), dex_file1->NumStringIds());
for (size_t index = 0; index != arraysize(kDexFile1Strings); ++index) {
- ASSERT_STREQ(kDexFile1Strings[index], dex_file1->GetStringData(dex_file1->GetStringId(index)));
+ ASSERT_STREQ(kDexFile1Strings[index],
+ dex_file1->GetStringData(dex_file1->GetStringId(dex::StringIndex(index))));
}
TestDexFileBuilder builder2;
@@ -90,14 +92,15 @@ TEST(StringReference, ValueComparator2) {
std::unique_ptr<const DexFile> dex_file2 = builder2.Build("dummy location 1");
ASSERT_EQ(arraysize(kDexFile2Strings), dex_file2->NumStringIds());
for (size_t index = 0; index != arraysize(kDexFile2Strings); ++index) {
- ASSERT_STREQ(kDexFile2Strings[index], dex_file2->GetStringData(dex_file2->GetStringId(index)));
+ ASSERT_STREQ(kDexFile2Strings[index],
+ dex_file2->GetStringData(dex_file2->GetStringId(dex::StringIndex(index))));
}
StringReferenceValueComparator cmp;
for (size_t index1 = 0; index1 != arraysize(kDexFile1Strings); ++index1) {
for (size_t index2 = 0; index2 != arraysize(kDexFile2Strings); ++index2) {
- StringReference sr1(dex_file1.get(), index1);
- StringReference sr2(dex_file2.get(), index2);
+ StringReference sr1(dex_file1.get(), dex::StringIndex(index1));
+ StringReference sr2(dex_file2.get(), dex::StringIndex(index2));
EXPECT_EQ(expectedCmp12[index1][index2], cmp(sr1, sr2)) << index1 << " " << index2;
EXPECT_EQ(expectedCmp21[index2][index1], cmp(sr2, sr1)) << index1 << " " << index2;
}
diff --git a/compiler/utils/test_dex_file_builder_test.cc b/compiler/utils/test_dex_file_builder_test.cc
index 922f8b1dfa..c76739b3b1 100644
--- a/compiler/utils/test_dex_file_builder_test.cc
+++ b/compiler/utils/test_dex_file_builder_test.cc
@@ -49,7 +49,8 @@ TEST(TestDexFileBuilderTest, SimpleTest) {
};
ASSERT_EQ(arraysize(expected_strings), dex_file->NumStringIds());
for (size_t i = 0; i != arraysize(expected_strings); ++i) {
- EXPECT_STREQ(expected_strings[i], dex_file->GetStringData(dex_file->GetStringId(i))) << i;
+ EXPECT_STREQ(expected_strings[i],
+ dex_file->GetStringData(dex_file->GetStringId(dex::StringIndex(i)))) << i;
}
static const char* const expected_types[] = {
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 525a2ee293..90fe6da438 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -460,20 +460,20 @@ TEST_F(VerifierDepsTest, StringToId) {
ScopedObjectAccess soa(Thread::Current());
LoadDexFile(&soa);
- uint32_t id_Main1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
- ASSERT_LT(id_Main1, primary_dex_file_->NumStringIds());
+ dex::StringIndex id_Main1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
+ ASSERT_LT(id_Main1.index_, primary_dex_file_->NumStringIds());
ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Main1));
- uint32_t id_Main2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
- ASSERT_LT(id_Main2, primary_dex_file_->NumStringIds());
+ dex::StringIndex id_Main2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
+ ASSERT_LT(id_Main2.index_, primary_dex_file_->NumStringIds());
ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Main2));
- uint32_t id_Lorem1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
- ASSERT_GE(id_Lorem1, primary_dex_file_->NumStringIds());
+ dex::StringIndex id_Lorem1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
+ ASSERT_GE(id_Lorem1.index_, primary_dex_file_->NumStringIds());
ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Lorem1));
- uint32_t id_Lorem2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
- ASSERT_GE(id_Lorem2, primary_dex_file_->NumStringIds());
+ dex::StringIndex id_Lorem2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
+ ASSERT_GE(id_Lorem2.index_, primary_dex_file_->NumStringIds());
ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Lorem2));
ASSERT_EQ(id_Main1, id_Main2);
@@ -1306,9 +1306,10 @@ TEST_F(VerifierDepsTest, VerifyDeps) {
bool found = false;
for (const auto& entry : deps->fields_) {
if (!entry.IsResolved()) {
+ constexpr dex::StringIndex kStringIndexZero(0); // We know there is a class there.
deps->fields_.insert(VerifierDeps::FieldResolution(0 /* we know there is a field there */,
VerifierDeps::kUnresolvedMarker - 1,
- 0 /* we know there is a class there */));
+ kStringIndexZero));
found = true;
break;
}
@@ -1341,7 +1342,7 @@ TEST_F(VerifierDepsTest, VerifyDeps) {
VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
bool found = false;
for (const auto& entry : deps->fields_) {
- static constexpr uint32_t kNewTypeIndex = 0;
+ constexpr dex::StringIndex kNewTypeIndex(0);
if (entry.GetDeclaringClassIndex() != kNewTypeIndex) {
deps->fields_.insert(VerifierDeps::FieldResolution(entry.GetDexFieldIndex(),
entry.GetAccessFlags(),
@@ -1384,9 +1385,10 @@ TEST_F(VerifierDepsTest, VerifyDeps) {
std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
for (const auto& entry : *methods) {
if (!entry.IsResolved()) {
+ constexpr dex::StringIndex kStringIndexZero(0); // We know there is a class there.
methods->insert(VerifierDeps::MethodResolution(0 /* we know there is a method there */,
VerifierDeps::kUnresolvedMarker - 1,
- 0 /* we know there is a class there */));
+ kStringIndexZero));
found = true;
break;
}
@@ -1421,7 +1423,7 @@ TEST_F(VerifierDepsTest, VerifyDeps) {
bool found = false;
std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
for (const auto& entry : *methods) {
- static constexpr uint32_t kNewTypeIndex = 0;
+ constexpr dex::StringIndex kNewTypeIndex(0);
if (entry.IsResolved() && entry.GetDeclaringClassIndex() != kNewTypeIndex) {
methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
entry.GetAccessFlags(),