summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.bp3
-rw-r--r--compiler/dex/quick_compiler_callbacks.cc8
-rw-r--r--compiler/dex/quick_compiler_callbacks.h4
-rw-r--r--compiler/dex/verification_results.cc31
-rw-r--r--compiler/dex/verification_results.h4
-rw-r--r--compiler/driver/compiler_driver.cc56
-rw-r--r--compiler/driver/compiler_driver.h4
-rw-r--r--compiler/oat_writer.cc183
-rw-r--r--compiler/oat_writer.h6
-rw-r--r--compiler/optimizing/code_generator_mips.cc5
-rw-r--r--compiler/optimizing/code_generator_mips64.cc5
-rw-r--r--compiler/optimizing/nodes_vector.h142
-rw-r--r--compiler/optimizing/nodes_vector_test.cc335
-rw-r--r--compiler/utils/atomic_dex_ref_map-inl.h (renamed from compiler/utils/atomic_method_ref_map-inl.h)34
-rw-r--r--compiler/utils/atomic_dex_ref_map.h (renamed from compiler/utils/atomic_method_ref_map.h)20
-rw-r--r--compiler/utils/atomic_dex_ref_map_test.cc (renamed from compiler/utils/atomic_method_ref_map_test.cc)32
-rw-r--r--compiler/verifier_deps_test.cc6
17 files changed, 708 insertions, 170 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 75086f797b..b721d210fe 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -351,6 +351,7 @@ art_cc_test {
"optimizing/live_interval_test.cc",
"optimizing/loop_optimization_test.cc",
"optimizing/nodes_test.cc",
+ "optimizing/nodes_vector_test.cc",
"optimizing/parallel_move_test.cc",
"optimizing/pretty_printer_test.cc",
"optimizing/reference_type_propagation_test.cc",
@@ -359,7 +360,7 @@ art_cc_test {
"optimizing/ssa_test.cc",
"optimizing/stack_map_test.cc",
"optimizing/suspend_check_test.cc",
- "utils/atomic_method_ref_map_test.cc",
+ "utils/atomic_dex_ref_map_test.cc",
"utils/dedupe_set_test.cc",
"utils/intrusive_forward_list_test.cc",
"utils/string_reference_test.cc",
diff --git a/compiler/dex/quick_compiler_callbacks.cc b/compiler/dex/quick_compiler_callbacks.cc
index 932eb51aee..b1006b2f0b 100644
--- a/compiler/dex/quick_compiler_callbacks.cc
+++ b/compiler/dex/quick_compiler_callbacks.cc
@@ -22,11 +22,15 @@
namespace art {
void QuickCompilerCallbacks::MethodVerified(verifier::MethodVerifier* verifier) {
- verification_results_->ProcessVerifiedMethod(verifier);
+ if (verification_results_ != nullptr) {
+ verification_results_->ProcessVerifiedMethod(verifier);
+ }
}
void QuickCompilerCallbacks::ClassRejected(ClassReference ref) {
- verification_results_->AddRejectedClass(ref);
+ if (verification_results_ != nullptr) {
+ verification_results_->AddRejectedClass(ref);
+ }
}
} // namespace art
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index db0fdaa72f..2100522f10 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -30,9 +30,7 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
CompilerCallbacks::CallbackMode mode)
: CompilerCallbacks(mode),
verification_results_(verification_results),
- verifier_deps_(nullptr) {
- CHECK(verification_results != nullptr);
- }
+ verifier_deps_(nullptr) {}
~QuickCompilerCallbacks() { }
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 04ceca0513..beb3439e62 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -24,7 +24,7 @@
#include "runtime.h"
#include "thread.h"
#include "thread-current-inl.h"
-#include "utils/atomic_method_ref_map-inl.h"
+#include "utils/atomic_dex_ref_map-inl.h"
#include "verified_method.h"
#include "verifier/method_verifier-inl.h"
@@ -38,7 +38,7 @@ VerificationResults::VerificationResults(const CompilerOptions* compiler_options
VerificationResults::~VerificationResults() {
WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
STLDeleteValues(&verified_methods_);
- atomic_verified_methods_.Visit([](const MethodReference& ref ATTRIBUTE_UNUSED,
+ atomic_verified_methods_.Visit([](const DexFileReference& ref ATTRIBUTE_UNUSED,
const VerifiedMethod* method) {
delete method;
});
@@ -46,22 +46,28 @@ VerificationResults::~VerificationResults() {
void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
DCHECK(method_verifier != nullptr);
+ if (!compiler_options_->IsAnyCompilationEnabled()) {
+ // Verified methods are only required for quickening and compilation.
+ return;
+ }
MethodReference ref = method_verifier->GetMethodReference();
std::unique_ptr<const VerifiedMethod> verified_method(VerifiedMethod::Create(method_verifier));
if (verified_method == nullptr) {
// We'll punt this later.
return;
}
- AtomicMap::InsertResult result = atomic_verified_methods_.Insert(ref,
- /*expected*/ nullptr,
- verified_method.get());
+ AtomicMap::InsertResult result = atomic_verified_methods_.Insert(
+ DexFileReference(ref.dex_file, ref.dex_method_index),
+ /*expected*/ nullptr,
+ verified_method.get());
const VerifiedMethod* existing = nullptr;
bool inserted;
if (result != AtomicMap::kInsertResultInvalidDexFile) {
inserted = (result == AtomicMap::kInsertResultSuccess);
if (!inserted) {
// Rare case.
- CHECK(atomic_verified_methods_.Get(ref, &existing));
+ CHECK(atomic_verified_methods_.Get(DexFileReference(ref.dex_file, ref.dex_method_index),
+ &existing));
CHECK_NE(verified_method.get(), existing);
}
} else {
@@ -98,7 +104,8 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) {
const VerifiedMethod* ret = nullptr;
- if (atomic_verified_methods_.Get(ref, &ret)) {
+ DCHECK(compiler_options_->IsAnyCompilationEnabled());
+ if (atomic_verified_methods_.Get(DexFileReference(ref.dex_file, ref.dex_method_index), &ret)) {
return ret;
}
ReaderMutexLock mu(Thread::Current(), verified_methods_lock_);
@@ -112,7 +119,9 @@ void VerificationResults::CreateVerifiedMethodFor(MethodReference ref) {
// at runtime.
std::unique_ptr<VerifiedMethod> verified_method = std::make_unique<VerifiedMethod>(
/* encountered_error_types */ 0, /* has_runtime_throw */ false);
- if (atomic_verified_methods_.Insert(ref, /*expected*/ nullptr, verified_method.get()) ==
+ if (atomic_verified_methods_.Insert(DexFileReference(ref.dex_file, ref.dex_method_index),
+ /*expected*/ nullptr,
+ verified_method.get()) ==
AtomicMap::InsertResult::kInsertResultSuccess) {
verified_method.release();
}
@@ -145,7 +154,7 @@ bool VerificationResults::IsCandidateForCompilation(MethodReference&,
}
void VerificationResults::AddDexFile(const DexFile* dex_file) {
- atomic_verified_methods_.AddDexFile(dex_file);
+ atomic_verified_methods_.AddDexFile(dex_file, dex_file->NumMethodIds());
WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
// There can be some verified methods that are already registered for the dex_file since we set
// up well known classes earlier. Remove these and put them in the array so that we don't
@@ -153,7 +162,9 @@ void VerificationResults::AddDexFile(const DexFile* dex_file) {
for (auto it = verified_methods_.begin(); it != verified_methods_.end(); ) {
MethodReference ref = it->first;
if (ref.dex_file == dex_file) {
- CHECK(atomic_verified_methods_.Insert(ref, nullptr, it->second) ==
+ CHECK(atomic_verified_methods_.Insert(DexFileReference(ref.dex_file, ref.dex_method_index),
+ nullptr,
+ it->second) ==
AtomicMap::kInsertResultSuccess);
it = verified_methods_.erase(it);
} else {
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 22749fa621..5a03599de0 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -26,7 +26,7 @@
#include "class_reference.h"
#include "method_reference.h"
#include "safe_map.h"
-#include "utils/atomic_method_ref_map.h"
+#include "utils/atomic_dex_ref_map.h"
namespace art {
@@ -64,7 +64,7 @@ class VerificationResults {
private:
// Verified methods. The method array is fixed to avoid needing a lock to extend it.
- using AtomicMap = AtomicMethodRefMap<const VerifiedMethod*>;
+ using AtomicMap = AtomicDexRefMap<const VerifiedMethod*>;
using VerifiedMethodMap = SafeMap<MethodReference,
const VerifiedMethod*,
MethodReferenceComparator>;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index bb64755c9e..83d7a3d4cc 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -71,7 +71,7 @@
#include "thread_pool.h"
#include "trampolines/trampoline_compiler.h"
#include "transaction.h"
-#include "utils/atomic_method_ref_map-inl.h"
+#include "utils/atomic_dex_ref_map-inl.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "utils/swap_space.h"
#include "vdex_file.h"
@@ -321,7 +321,7 @@ CompilerDriver::CompilerDriver(
}
CompilerDriver::~CompilerDriver() {
- compiled_methods_.Visit([this](const MethodReference& ref ATTRIBUTE_UNUSED,
+ compiled_methods_.Visit([this](const DexFileReference& ref ATTRIBUTE_UNUSED,
CompiledMethod* method) {
if (method != nullptr) {
CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, method);
@@ -514,8 +514,9 @@ static void CompileMethod(Thread* self,
// TODO: Refactor the compilation to avoid having to distinguish the two passes
// here. That should be done on a higher level. http://b/29089975
if (driver->GetCurrentDexToDexMethods()->IsBitSet(method_idx)) {
- const VerifiedMethod* verified_method =
- driver->GetVerificationResults()->GetVerifiedMethod(method_ref);
+ VerificationResults* results = driver->GetVerificationResults();
+ DCHECK(results != nullptr);
+ const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
// Do not optimize if a VerifiedMethod is missing. SafeCast elision,
// for example, relies on it.
compiled_method = optimizer::ArtCompileDEX(
@@ -576,12 +577,12 @@ static void CompileMethod(Thread* self,
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
} else {
- const VerifiedMethod* verified_method =
- driver->GetVerificationResults()->GetVerifiedMethod(method_ref);
+ VerificationResults* results = driver->GetVerificationResults();
+ DCHECK(results != nullptr);
+ const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
bool compile = compilation_enabled &&
// Basic checks, e.g., not <clinit>.
- driver->GetVerificationResults()
- ->IsCandidateForCompilation(method_ref, access_flags) &&
+ results->IsCandidateForCompilation(method_ref, access_flags) &&
// Did not fail to create VerifiedMethod metadata.
verified_method != nullptr &&
// Do not have failures that should punt to the interpreter.
@@ -890,17 +891,18 @@ void CompilerDriver::PreCompile(jobject class_loader,
TimingLogger* timings) {
CheckThreadPools();
- for (const DexFile* dex_file : dex_files) {
- // Can be already inserted if the caller is CompileOne. This happens for gtests.
- if (!compiled_methods_.HaveDexFile(dex_file)) {
- compiled_methods_.AddDexFile(dex_file);
- }
- }
-
LoadImageClasses(timings);
VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
if (compiler_options_->IsAnyCompilationEnabled()) {
+ // Avoid adding the dex files in the case where we aren't going to add compiled methods.
+ // This reduces RAM usage for this case.
+ for (const DexFile* dex_file : dex_files) {
+ // Can be already inserted if the caller is CompileOne. This happens for gtests.
+ if (!compiled_methods_.HaveDexFile(dex_file)) {
+ compiled_methods_.AddDexFile(dex_file, dex_file->NumMethodIds());
+ }
+ }
// Resolve eagerly to prepare for compilation.
Resolve(class_loader, dex_files, timings);
VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false);
@@ -2245,7 +2247,7 @@ class InitializeClassVisitor : public CompilationVisitor {
const bool is_boot_image = manager_->GetCompiler()->GetCompilerOptions().IsBootImage();
const bool is_app_image = manager_->GetCompiler()->GetCompilerOptions().IsAppImage();
- mirror::Class::Status old_status = klass->GetStatus();;
+ mirror::Class::Status old_status = klass->GetStatus();
// Only try to initialize classes that were successfully verified.
if (klass->IsVerified()) {
// Don't initialize classes in boot space when compiling app image
@@ -2363,6 +2365,14 @@ class InitializeClassVisitor : public CompilationVisitor {
}
}
}
+ // If the class still isn't initialized, at least try some checks that initialization
+ // would do so they can be skipped at runtime.
+ if (!klass->IsInitialized() &&
+ manager_->GetClassLinker()->ValidateSuperClassDescriptors(klass)) {
+ old_status = mirror::Class::kStatusSuperclassValidated;
+ } else {
+ soa.Self()->ClearException();
+ }
soa.Self()->AssertNoPendingException();
}
}
@@ -2838,9 +2848,10 @@ void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref,
size_t non_relative_linker_patch_count) {
DCHECK(GetCompiledMethod(method_ref) == nullptr)
<< method_ref.dex_file->PrettyMethod(method_ref.dex_method_index);
- MethodTable::InsertResult result = compiled_methods_.Insert(method_ref,
- /*expected*/ nullptr,
- compiled_method);
+ MethodTable::InsertResult result = compiled_methods_.Insert(
+ DexFileReference(method_ref.dex_file, method_ref.dex_method_index),
+ /*expected*/ nullptr,
+ compiled_method);
CHECK(result == MethodTable::kInsertResultSuccess);
non_relative_linker_patch_count_.FetchAndAddRelaxed(non_relative_linker_patch_count);
DCHECK(GetCompiledMethod(method_ref) != nullptr)
@@ -2860,13 +2871,14 @@ bool CompilerDriver::GetCompiledClass(ClassReference ref, mirror::Class::Status*
void CompilerDriver::RecordClassStatus(ClassReference ref, mirror::Class::Status status) {
switch (status) {
- case mirror::Class::kStatusNotReady:
case mirror::Class::kStatusErrorResolved:
case mirror::Class::kStatusErrorUnresolved:
+ case mirror::Class::kStatusNotReady:
+ case mirror::Class::kStatusResolved:
case mirror::Class::kStatusRetryVerificationAtRuntime:
case mirror::Class::kStatusVerified:
+ case mirror::Class::kStatusSuperclassValidated:
case mirror::Class::kStatusInitialized:
- case mirror::Class::kStatusResolved:
break; // Expected states.
default:
LOG(FATAL) << "Unexpected class status for class "
@@ -2887,7 +2899,7 @@ void CompilerDriver::RecordClassStatus(ClassReference ref, mirror::Class::Status
CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const {
CompiledMethod* compiled_method = nullptr;
- compiled_methods_.Get(ref, &compiled_method);
+ compiled_methods_.Get(DexFileReference(ref.dex_file, ref.dex_method_index), &compiled_method);
return compiled_method;
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index e9e73787e4..a3272d331d 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -39,7 +39,7 @@
#include "os.h"
#include "safe_map.h"
#include "thread_pool.h"
-#include "utils/atomic_method_ref_map.h"
+#include "utils/atomic_dex_ref_map.h"
#include "utils/dex_cache_arrays_layout.h"
namespace art {
@@ -489,7 +489,7 @@ class CompilerDriver {
mutable Mutex compiled_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ClassStateTable compiled_classes_ GUARDED_BY(compiled_classes_lock_);
- typedef AtomicMethodRefMap<CompiledMethod*> MethodTable;
+ typedef AtomicDexRefMap<CompiledMethod*> MethodTable;
private:
// All method references that this compiler has compiled.
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 7cb3166cde..54b35202bd 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -158,26 +158,61 @@ class OatWriter::DexFileSource {
const void* source_;
};
+// OatClassHeader is the header only part of the oat class that is required even when compilation
+// is not enabled.
+class OatWriter::OatClassHeader {
+ public:
+ OatClassHeader(uint32_t offset,
+ uint32_t num_non_null_compiled_methods,
+ uint32_t num_methods,
+ mirror::Class::Status status)
+ : status_(status),
+ offset_(offset) {
+ // We just arbitrarily say that 0 methods means kOatClassNoneCompiled and that we won't use
+ // kOatClassAllCompiled unless there is at least one compiled method. This means in an
+ // interpreter only system, we can assert that all classes are kOatClassNoneCompiled.
+ if (num_non_null_compiled_methods == 0) {
+ type_ = kOatClassNoneCompiled;
+ } else if (num_non_null_compiled_methods == num_methods) {
+ type_ = kOatClassAllCompiled;
+ } else {
+ type_ = kOatClassSomeCompiled;
+ }
+ }
+
+ bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const;
+
+ static size_t SizeOf() {
+ return sizeof(status_) + sizeof(type_);
+ }
+
+ // Data to write.
+ static_assert(mirror::Class::Status::kStatusMax < (1 << 16), "class status won't fit in 16bits");
+ int16_t status_;
+
+ static_assert(OatClassType::kOatClassMax < (1 << 16), "oat_class type won't fit in 16bits");
+ uint16_t type_;
+
+ // Offset of start of OatClass from beginning of OatHeader. It is
+ // used to validate file position when writing.
+ uint32_t offset_;
+};
+
+// The actual oat class body contains the information about compiled methods. It is only required
+// for compiler filters that have any compilation.
class OatWriter::OatClass {
public:
- OatClass(size_t offset,
- const dchecked_vector<CompiledMethod*>& compiled_methods,
+ OatClass(const dchecked_vector<CompiledMethod*>& compiled_methods,
uint32_t num_non_null_compiled_methods,
- mirror::Class::Status status);
+ uint16_t oat_class_type);
OatClass(OatClass&& src) = default;
- size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const;
- size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const;
size_t SizeOf() const;
- bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const;
+ bool Write(OatWriter* oat_writer, OutputStream* out) const;
CompiledMethod* GetCompiledMethod(size_t class_def_method_index) const {
return compiled_methods_[class_def_method_index];
}
- // Offset of start of OatClass from beginning of OatHeader. It is
- // used to validate file position when writing.
- size_t offset_;
-
// CompiledMethods for each class_def_method_index, or null if no method is available.
dchecked_vector<CompiledMethod*> compiled_methods_;
@@ -188,13 +223,6 @@ class OatWriter::OatClass {
dchecked_vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
// Data to write.
-
- static_assert(mirror::Class::Status::kStatusMax < (1 << 16), "class status won't fit in 16bits");
- int16_t status_;
-
- static_assert(OatClassType::kOatClassMax < (1 << 16), "oat_class type won't fit in 16bits");
- uint16_t type_;
-
uint32_t method_bitmap_size_;
// bit vector indexed by ClassDef method index. When
@@ -482,6 +510,11 @@ dchecked_vector<std::string> OatWriter::GetSourceLocations() const {
return locations;
}
+bool OatWriter::MayHaveCompiledMethods() const {
+ return CompilerFilter::IsAnyCompilationEnabled(
+ GetCompilerDriver()->GetCompilerOptions().GetCompilerFilter());
+}
+
bool OatWriter::WriteAndOpenDexFiles(
File* vdex_file,
OutputStream* oat_rodata,
@@ -663,7 +696,10 @@ class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
DexMethodVisitor::StartClass(dex_file, class_def_index);
- DCHECK_LT(oat_class_index_, writer_->oat_classes_.size());
+ if (kIsDebugBuild && writer_->MayHaveCompiledMethods()) {
+ // There are no oat classes if there aren't any compiled methods.
+ CHECK_LT(oat_class_index_, writer_->oat_classes_.size());
+ }
method_offsets_index_ = 0u;
return true;
}
@@ -726,7 +762,11 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
for (const OatDexFile& oat_dex_file : writer_->oat_dex_files_) {
num_classes += oat_dex_file.class_offsets_.size();
}
- writer_->oat_classes_.reserve(num_classes);
+ // If we aren't compiling only reserve headers.
+ writer_->oat_class_headers_.reserve(num_classes);
+ if (writer->MayHaveCompiledMethods()) {
+ writer->oat_classes_.reserve(num_classes);
+ }
compiled_methods_.reserve(256u);
// If there are any classes, the class offsets allocation aligns the offset.
DCHECK(num_classes == 0u || IsAligned<4u>(offset));
@@ -760,7 +800,8 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
mirror::Class::Status status;
bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
if (!found) {
- if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
+ VerificationResults* results = writer_->compiler_driver_->GetVerificationResults();
+ if (results != nullptr && results->IsClassRejected(class_ref)) {
// The oat class status is used only for verification of resolved classes,
// so use kStatusErrorResolved whether the class was resolved or unresolved
// during compile-time verification.
@@ -770,11 +811,19 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
}
}
- writer_->oat_classes_.emplace_back(offset_,
- compiled_methods_,
- num_non_null_compiled_methods_,
- status);
- offset_ += writer_->oat_classes_.back().SizeOf();
+ writer_->oat_class_headers_.emplace_back(offset_,
+ num_non_null_compiled_methods_,
+ compiled_methods_.size(),
+ status);
+ OatClassHeader& header = writer_->oat_class_headers_.back();
+ offset_ += header.SizeOf();
+ if (writer_->MayHaveCompiledMethods()) {
+ writer_->oat_classes_.emplace_back(compiled_methods_,
+ num_non_null_compiled_methods_,
+ header.type_);
+ offset_ += writer_->oat_classes_.back().SizeOf();
+ }
+
return DexMethodVisitor::EndClass();
}
@@ -1671,7 +1720,7 @@ bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
if (UNLIKELY(!visitor->StartClass(dex_file, class_def_index))) {
return false;
}
- if (compiler_driver_->GetCompilerOptions().IsAnyCompilationEnabled()) {
+ if (MayHaveCompiledMethods()) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data != nullptr) { // ie not an empty class, such as a marker interface
@@ -1739,21 +1788,21 @@ size_t OatWriter::InitOatClasses(size_t offset) {
offset = visitor.GetOffset();
// Update oat_dex_files_.
- auto oat_class_it = oat_classes_.begin();
+ auto oat_class_it = oat_class_headers_.begin();
for (OatDexFile& oat_dex_file : oat_dex_files_) {
for (uint32_t& class_offset : oat_dex_file.class_offsets_) {
- DCHECK(oat_class_it != oat_classes_.end());
+ DCHECK(oat_class_it != oat_class_headers_.end());
class_offset = oat_class_it->offset_;
++oat_class_it;
}
}
- CHECK(oat_class_it == oat_classes_.end());
+ CHECK(oat_class_it == oat_class_headers_.end());
return offset;
}
size_t OatWriter::InitOatMaps(size_t offset) {
- if (!compiler_driver_->GetCompilerOptions().IsAnyCompilationEnabled()) {
+ if (!MayHaveCompiledMethods()) {
return offset;
}
{
@@ -2291,14 +2340,24 @@ size_t OatWriter::WriteClassOffsets(OutputStream* out, size_t file_offset, size_
}
size_t OatWriter::WriteClasses(OutputStream* out, size_t file_offset, size_t relative_offset) {
- for (OatClass& oat_class : oat_classes_) {
+ const bool may_have_compiled = MayHaveCompiledMethods();
+ if (may_have_compiled) {
+ CHECK_EQ(oat_class_headers_.size(), oat_classes_.size());
+ }
+ for (size_t i = 0; i < oat_class_headers_.size(); ++i) {
// If there are any classes, the class offsets allocation aligns the offset.
DCHECK_ALIGNED(relative_offset, 4u);
DCHECK_OFFSET();
- if (!oat_class.Write(this, out, oat_data_offset_)) {
+ if (!oat_class_headers_[i].Write(this, out, oat_data_offset_)) {
return 0u;
}
- relative_offset += oat_class.SizeOf();
+ relative_offset += oat_class_headers_[i].SizeOf();
+ if (may_have_compiled) {
+ if (!oat_classes_[i].Write(this, out)) {
+ return 0u;
+ }
+ relative_offset += oat_classes_[i].SizeOf();
+ }
}
return relative_offset;
}
@@ -3181,37 +3240,21 @@ bool OatWriter::OatDexFile::WriteClassOffsets(OatWriter* oat_writer, OutputStrea
return true;
}
-OatWriter::OatClass::OatClass(size_t offset,
- const dchecked_vector<CompiledMethod*>& compiled_methods,
+OatWriter::OatClass::OatClass(const dchecked_vector<CompiledMethod*>& compiled_methods,
uint32_t num_non_null_compiled_methods,
- mirror::Class::Status status)
+ uint16_t oat_class_type)
: compiled_methods_(compiled_methods) {
- uint32_t num_methods = compiled_methods.size();
+ const uint32_t num_methods = compiled_methods.size();
CHECK_LE(num_non_null_compiled_methods, num_methods);
- offset_ = offset;
oat_method_offsets_offsets_from_oat_class_.resize(num_methods);
- // Since both kOatClassNoneCompiled and kOatClassAllCompiled could
- // apply when there are 0 methods, we just arbitrarily say that 0
- // methods means kOatClassNoneCompiled and that we won't use
- // kOatClassAllCompiled unless there is at least one compiled
- // method. This means in an interpretter only system, we can assert
- // that all classes are kOatClassNoneCompiled.
- if (num_non_null_compiled_methods == 0) {
- type_ = kOatClassNoneCompiled;
- } else if (num_non_null_compiled_methods == num_methods) {
- type_ = kOatClassAllCompiled;
- } else {
- type_ = kOatClassSomeCompiled;
- }
-
- status_ = status;
method_offsets_.resize(num_non_null_compiled_methods);
method_headers_.resize(num_non_null_compiled_methods);
- uint32_t oat_method_offsets_offset_from_oat_class = sizeof(type_) + sizeof(status_);
- if (type_ == kOatClassSomeCompiled) {
+ uint32_t oat_method_offsets_offset_from_oat_class = OatClassHeader::SizeOf();
+ // We only create this instance if there are at least some compiled.
+ if (oat_class_type == kOatClassSomeCompiled) {
method_bitmap_.reset(new BitVector(num_methods, false, Allocator::GetMallocAllocator()));
method_bitmap_size_ = method_bitmap_->GetSizeOf();
oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
@@ -3228,38 +3271,22 @@ OatWriter::OatClass::OatClass(size_t offset,
} else {
oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
oat_method_offsets_offset_from_oat_class += sizeof(OatMethodOffsets);
- if (type_ == kOatClassSomeCompiled) {
+ if (oat_class_type == kOatClassSomeCompiled) {
method_bitmap_->SetBit(i);
}
}
}
}
-size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatHeader(
- size_t class_def_method_index_) const {
- uint32_t method_offset = GetOatMethodOffsetsOffsetFromOatClass(class_def_method_index_);
- if (method_offset == 0) {
- return 0;
- }
- return offset_ + method_offset;
-}
-
-size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatClass(
- size_t class_def_method_index_) const {
- return oat_method_offsets_offsets_from_oat_class_[class_def_method_index_];
-}
-
size_t OatWriter::OatClass::SizeOf() const {
- return sizeof(status_)
- + sizeof(type_)
- + ((method_bitmap_size_ == 0) ? 0 : sizeof(method_bitmap_size_))
+ return ((method_bitmap_size_ == 0) ? 0 : sizeof(method_bitmap_size_))
+ method_bitmap_size_
+ (sizeof(method_offsets_[0]) * method_offsets_.size());
}
-bool OatWriter::OatClass::Write(OatWriter* oat_writer,
- OutputStream* out,
- const size_t file_offset) const {
+bool OatWriter::OatClassHeader::Write(OatWriter* oat_writer,
+ OutputStream* out,
+ const size_t file_offset) const {
DCHECK_OFFSET_();
if (!out->WriteFully(&status_, sizeof(status_))) {
PLOG(ERROR) << "Failed to write class status to " << out->GetLocation();
@@ -3272,9 +3299,11 @@ bool OatWriter::OatClass::Write(OatWriter* oat_writer,
return false;
}
oat_writer->size_oat_class_type_ += sizeof(type_);
+ return true;
+}
+bool OatWriter::OatClass::Write(OatWriter* oat_writer, OutputStream* out) const {
if (method_bitmap_size_ != 0) {
- CHECK_EQ(kOatClassSomeCompiled, type_);
if (!out->WriteFully(&method_bitmap_size_, sizeof(method_bitmap_size_))) {
PLOG(ERROR) << "Failed to write method bitmap size to " << out->GetLocation();
return false;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 024a3e80ca..7b3c31ca80 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -239,12 +239,13 @@ class OatWriter {
return ArrayRef<const debug::MethodDebugInfo>(method_info_);
}
- const CompilerDriver* GetCompilerDriver() {
+ const CompilerDriver* GetCompilerDriver() const {
return compiler_driver_;
}
private:
class DexFileSource;
+ class OatClassHeader;
class OatClass;
class OatDexFile;
@@ -327,6 +328,8 @@ class OatWriter {
void SetMultiOatRelativePatcherAdjustment();
void CloseSources();
+ bool MayHaveCompiledMethods() const;
+
enum class WriteState {
kAddingDexFileSources,
kPrepareLayout,
@@ -410,6 +413,7 @@ class OatWriter {
// data to write
std::unique_ptr<OatHeader> oat_header_;
dchecked_vector<OatDexFile> oat_dex_files_;
+ dchecked_vector<OatClassHeader> oat_class_headers_;
dchecked_vector<OatClass> oat_classes_;
std::unique_ptr<const std::vector<uint8_t>> jni_dlsym_lookup_;
std::unique_ptr<const std::vector<uint8_t>> quick_generic_jni_trampoline_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index be8f9e9cf8..23d188d630 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -7859,8 +7859,11 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes care
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index cf6b3d5805..454a2ddc14 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -5578,8 +5578,11 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes care
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 5dbe29b4fa..6261171a00 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -46,6 +46,10 @@ class Alignment {
return "ALIGN(" + std::to_string(base_) + "," + std::to_string(offset_) + ")";
}
+ bool operator==(const Alignment& other) const {
+ return base_ == other.base_ && offset_ == other.offset_;
+ }
+
private:
size_t base_;
size_t offset_;
@@ -96,6 +100,19 @@ class HVecOperation : public HVariableInputSizeInstruction {
return GetPackedField<TypeField>();
}
+ // Assumes vector nodes cannot be moved by default. Each concrete implementation
+ // that can be moved should override this method and return true.
+ bool CanBeMoved() const OVERRIDE { return false; }
+
+ // Tests if all data of a vector node (vector length and packed type) is equal.
+ // Each concrete implementation that adds more fields should test equality of
+ // those fields in its own method *and* call all super methods.
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsVecOperation());
+ const HVecOperation* o = other->AsVecOperation();
+ return GetVectorLength() == o->GetVectorLength() && GetPackedType() == o->GetPackedType();
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(VecOperation);
protected:
@@ -189,6 +206,12 @@ class HVecMemoryOperation : public HVecOperation {
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsVecMemoryOperation());
+ const HVecMemoryOperation* o = other->AsVecMemoryOperation();
+ return HVecOperation::InstructionDataEquals(o) && GetAlignment() == o->GetAlignment();
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(VecMemoryOperation);
private:
@@ -231,7 +254,13 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation {
: HVecUnaryOperation(arena, scalar, packed_type, vector_length, dex_pc) {
DCHECK(!scalar->IsVecOperation());
}
+
+ // A replicate needs to stay in place, since SIMD registers are not
+ // kept alive across vector loop boundaries (yet).
+ bool CanBeMoved() const OVERRIDE { return false; }
+
DECLARE_INSTRUCTION(VecReplicateScalar);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecReplicateScalar);
};
@@ -251,7 +280,10 @@ class HVecSumReduce FINAL : public HVecUnaryOperation {
// TODO: probably integral promotion
Primitive::Type GetType() const OVERRIDE { return GetPackedType(); }
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecSumReduce);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecSumReduce);
};
@@ -273,6 +305,8 @@ class HVecCnv FINAL : public HVecUnaryOperation {
Primitive::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); }
Primitive::Type GetResultType() const { return GetPackedType(); }
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecCnv);
private:
@@ -291,7 +325,11 @@ class HVecNeg FINAL : public HVecUnaryOperation {
: HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecNeg);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecNeg);
};
@@ -308,7 +346,11 @@ class HVecAbs FINAL : public HVecUnaryOperation {
: HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecAbs);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecAbs);
};
@@ -326,7 +368,11 @@ class HVecNot FINAL : public HVecUnaryOperation {
: HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
DCHECK(input->IsVecOperation());
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecNot);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecNot);
};
@@ -349,7 +395,11 @@ class HVecAdd FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecAdd);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecAdd);
};
@@ -378,6 +428,16 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
bool IsUnsigned() const { return GetPackedFlag<kFieldHAddIsUnsigned>(); }
bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsVecHalvingAdd());
+ const HVecHalvingAdd* o = other->AsVecHalvingAdd();
+ return HVecOperation::InstructionDataEquals(o) &&
+ IsUnsigned() == o->IsUnsigned() &&
+ IsRounded() == o->IsRounded();
+ }
+
DECLARE_INSTRUCTION(VecHalvingAdd);
private:
@@ -404,7 +464,11 @@ class HVecSub FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecSub);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecSub);
};
@@ -423,7 +487,11 @@ class HVecMul FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecMul);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecMul);
};
@@ -442,7 +510,11 @@ class HVecDiv FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecDiv);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecDiv);
};
@@ -466,6 +538,14 @@ class HVecMin FINAL : public HVecBinaryOperation {
bool IsUnsigned() const { return GetPackedFlag<kFieldMinOpIsUnsigned>(); }
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsVecMin());
+ const HVecMin* o = other->AsVecMin();
+ return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
+ }
+
DECLARE_INSTRUCTION(VecMin);
private:
@@ -496,6 +576,14 @@ class HVecMax FINAL : public HVecBinaryOperation {
bool IsUnsigned() const { return GetPackedFlag<kFieldMaxOpIsUnsigned>(); }
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsVecMax());
+ const HVecMax* o = other->AsVecMax();
+ return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
+ }
+
DECLARE_INSTRUCTION(VecMax);
private:
@@ -520,7 +608,11 @@ class HVecAnd FINAL : public HVecBinaryOperation {
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecAnd);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecAnd);
};
@@ -538,7 +630,11 @@ class HVecAndNot FINAL : public HVecBinaryOperation {
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecAndNot);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecAndNot);
};
@@ -556,7 +652,11 @@ class HVecOr FINAL : public HVecBinaryOperation {
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecOr);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecOr);
};
@@ -574,7 +674,11 @@ class HVecXor FINAL : public HVecBinaryOperation {
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecXor);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecXor);
};
@@ -592,7 +696,11 @@ class HVecShl FINAL : public HVecBinaryOperation {
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecShl);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecShl);
};
@@ -610,7 +718,11 @@ class HVecShr FINAL : public HVecBinaryOperation {
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecShr);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecShr);
};
@@ -628,7 +740,11 @@ class HVecUShr FINAL : public HVecBinaryOperation {
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(VecUShr);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecUShr);
};
@@ -656,7 +772,13 @@ class HVecSetScalars FINAL : public HVecOperation {
SetRawInputAt(0, scalars[i]);
}
}
+
+ // Setting scalars needs to stay in place, since SIMD registers are not
+ // kept alive across vector loop boundaries (yet).
+ bool CanBeMoved() const OVERRIDE { return false; }
+
DECLARE_INSTRUCTION(VecSetScalars);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecSetScalars);
};
@@ -697,7 +819,9 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- return op_kind_ == other->AsVecMultiplyAccumulate()->op_kind_;
+ DCHECK(other->IsVecMultiplyAccumulate());
+ const HVecMultiplyAccumulate* o = other->AsVecMultiplyAccumulate();
+ return HVecOperation::InstructionDataEquals(o) && GetOpKind() == o->GetOpKind();
}
InstructionKind GetOpKind() const { return op_kind_; }
@@ -732,10 +856,19 @@ class HVecLoad FINAL : public HVecMemoryOperation {
SetRawInputAt(1, index);
SetPackedFlag<kFieldIsStringCharAt>(is_string_char_at);
}
- DECLARE_INSTRUCTION(VecLoad);
bool IsStringCharAt() const { return GetPackedFlag<kFieldIsStringCharAt>(); }
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsVecLoad());
+ const HVecLoad* o = other->AsVecLoad();
+ return HVecMemoryOperation::InstructionDataEquals(o) && IsStringCharAt() == o->IsStringCharAt();
+ }
+
+ DECLARE_INSTRUCTION(VecLoad);
+
private:
// Additional packed bits.
static constexpr size_t kFieldIsStringCharAt = HVecOperation::kNumberOfVectorOpPackedBits;
@@ -767,7 +900,12 @@ class HVecStore FINAL : public HVecMemoryOperation {
SetRawInputAt(1, index);
SetRawInputAt(2, value);
}
+
+ // A store needs to stay in place.
+ bool CanBeMoved() const OVERRIDE { return false; }
+
DECLARE_INSTRUCTION(VecStore);
+
private:
DISALLOW_COPY_AND_ASSIGN(HVecStore);
};
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
new file mode 100644
index 0000000000..0238ea4602
--- /dev/null
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/arena_allocator.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+namespace art {
+
+/**
+ * Fixture class for testing vector nodes.
+ */
+class NodesVectorTest : public CommonCompilerTest {
+ public:
+ NodesVectorTest()
+ : pool_(),
+ allocator_(&pool_),
+ graph_(CreateGraph(&allocator_)) {
+ BuildGraph();
+ }
+
+ ~NodesVectorTest() { }
+
+ void BuildGraph() {
+ graph_->SetNumberOfVRegs(1);
+ entry_block_ = new (&allocator_) HBasicBlock(graph_);
+ exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block_);
+ graph_->AddBlock(exit_block_);
+ graph_->SetEntryBlock(entry_block_);
+ graph_->SetExitBlock(exit_block_);
+ parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ Primitive::kPrimInt);
+ entry_block_->AddInstruction(parameter_);
+ }
+
+ // General building fields.
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+
+ HBasicBlock* entry_block_;
+ HBasicBlock* exit_block_;
+
+ HInstruction* parameter_;
+};
+
+//
+// The actual vector nodes tests.
+//
+
+TEST(NodesVector, Alignment) {
+ EXPECT_TRUE(Alignment(1, 0).IsAlignedAt(1));
+ EXPECT_FALSE(Alignment(1, 0).IsAlignedAt(2));
+
+ EXPECT_TRUE(Alignment(2, 0).IsAlignedAt(1));
+ EXPECT_TRUE(Alignment(2, 1).IsAlignedAt(1));
+ EXPECT_TRUE(Alignment(2, 0).IsAlignedAt(2));
+ EXPECT_FALSE(Alignment(2, 1).IsAlignedAt(2));
+ EXPECT_FALSE(Alignment(2, 0).IsAlignedAt(4));
+ EXPECT_FALSE(Alignment(2, 1).IsAlignedAt(4));
+
+ EXPECT_TRUE(Alignment(4, 0).IsAlignedAt(1));
+ EXPECT_TRUE(Alignment(4, 2).IsAlignedAt(1));
+ EXPECT_TRUE(Alignment(4, 0).IsAlignedAt(2));
+ EXPECT_TRUE(Alignment(4, 2).IsAlignedAt(2));
+ EXPECT_TRUE(Alignment(4, 0).IsAlignedAt(4));
+ EXPECT_FALSE(Alignment(4, 2).IsAlignedAt(4));
+ EXPECT_FALSE(Alignment(4, 0).IsAlignedAt(8));
+ EXPECT_FALSE(Alignment(4, 2).IsAlignedAt(8));
+
+ EXPECT_TRUE(Alignment(16, 0).IsAlignedAt(1));
+ EXPECT_TRUE(Alignment(16, 0).IsAlignedAt(2));
+ EXPECT_TRUE(Alignment(16, 0).IsAlignedAt(4));
+ EXPECT_TRUE(Alignment(16, 8).IsAlignedAt(8));
+ EXPECT_TRUE(Alignment(16, 0).IsAlignedAt(16));
+ EXPECT_FALSE(Alignment(16, 1).IsAlignedAt(16));
+ EXPECT_FALSE(Alignment(16, 7).IsAlignedAt(16));
+ EXPECT_FALSE(Alignment(16, 0).IsAlignedAt(32));
+}
+
+TEST(NodesVector, AlignmentEQ) {
+ EXPECT_TRUE(Alignment(2, 0) == Alignment(2, 0));
+ EXPECT_TRUE(Alignment(2, 1) == Alignment(2, 1));
+ EXPECT_TRUE(Alignment(4, 0) == Alignment(4, 0));
+ EXPECT_TRUE(Alignment(4, 2) == Alignment(4, 2));
+
+ EXPECT_FALSE(Alignment(4, 0) == Alignment(2, 0));
+ EXPECT_FALSE(Alignment(4, 0) == Alignment(4, 1));
+ EXPECT_FALSE(Alignment(4, 0) == Alignment(8, 0));
+}
+
+TEST(NodesVector, AlignmentString) {
+ EXPECT_STREQ("ALIGN(1,0)", Alignment(1, 0).ToString().c_str());
+
+ EXPECT_STREQ("ALIGN(2,0)", Alignment(2, 0).ToString().c_str());
+ EXPECT_STREQ("ALIGN(2,1)", Alignment(2, 1).ToString().c_str());
+
+ EXPECT_STREQ("ALIGN(16,0)", Alignment(16, 0).ToString().c_str());
+ EXPECT_STREQ("ALIGN(16,1)", Alignment(16, 1).ToString().c_str());
+ EXPECT_STREQ("ALIGN(16,8)", Alignment(16, 8).ToString().c_str());
+ EXPECT_STREQ("ALIGN(16,9)", Alignment(16, 9).ToString().c_str());
+}
+
+TEST_F(NodesVectorTest, VectorOperationProperties) {
+ HVecOperation* v0 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4);
+ HVecOperation* v1 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4);
+ HVecOperation* v2 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 2);
+ HVecOperation* v3 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimShort, 4);
+ HVecOperation* v4 = new (&allocator_)
+ HVecStore(&allocator_, parameter_, parameter_, v0, Primitive::kPrimInt, 4);
+
+ EXPECT_TRUE(v0->Equals(v0));
+ EXPECT_TRUE(v1->Equals(v1));
+ EXPECT_TRUE(v2->Equals(v2));
+ EXPECT_TRUE(v3->Equals(v3));
+ EXPECT_TRUE(v4->Equals(v4));
+
+ EXPECT_TRUE(v0->Equals(v1));
+ EXPECT_FALSE(v0->Equals(v2)); // different vector lengths
+ EXPECT_FALSE(v0->Equals(v3)); // different packed types
+ EXPECT_FALSE(v0->Equals(v4)); // different kinds
+
+ EXPECT_TRUE(v1->Equals(v0)); // switch operands
+ EXPECT_FALSE(v4->Equals(v0));
+
+ EXPECT_EQ(4u, v0->GetVectorLength());
+ EXPECT_EQ(4u, v1->GetVectorLength());
+ EXPECT_EQ(2u, v2->GetVectorLength());
+ EXPECT_EQ(4u, v3->GetVectorLength());
+ EXPECT_EQ(4u, v4->GetVectorLength());
+
+ EXPECT_EQ(Primitive::kPrimDouble, v0->GetType());
+ EXPECT_EQ(Primitive::kPrimDouble, v1->GetType());
+ EXPECT_EQ(Primitive::kPrimDouble, v2->GetType());
+ EXPECT_EQ(Primitive::kPrimDouble, v3->GetType());
+ EXPECT_EQ(Primitive::kPrimDouble, v4->GetType());
+
+ EXPECT_EQ(Primitive::kPrimInt, v0->GetPackedType());
+ EXPECT_EQ(Primitive::kPrimInt, v1->GetPackedType());
+ EXPECT_EQ(Primitive::kPrimInt, v2->GetPackedType());
+ EXPECT_EQ(Primitive::kPrimShort, v3->GetPackedType());
+ EXPECT_EQ(Primitive::kPrimInt, v4->GetPackedType());
+
+ EXPECT_EQ(16u, v0->GetVectorNumberOfBytes());
+ EXPECT_EQ(16u, v1->GetVectorNumberOfBytes());
+ EXPECT_EQ(8u, v2->GetVectorNumberOfBytes());
+ EXPECT_EQ(8u, v3->GetVectorNumberOfBytes());
+ EXPECT_EQ(16u, v4->GetVectorNumberOfBytes());
+
+ EXPECT_FALSE(v0->CanBeMoved());
+ EXPECT_FALSE(v1->CanBeMoved());
+ EXPECT_FALSE(v2->CanBeMoved());
+ EXPECT_FALSE(v3->CanBeMoved());
+ EXPECT_FALSE(v4->CanBeMoved());
+}
+
+TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) {
+ HVecLoad* v0 = new (&allocator_)
+ HVecLoad(&allocator_, parameter_, parameter_, Primitive::kPrimInt, 4, /*is_string_char_at*/ false);
+ HVecLoad* v1 = new (&allocator_)
+ HVecLoad(&allocator_, parameter_, parameter_, Primitive::kPrimInt, 4, /*is_string_char_at*/ false);
+ HVecLoad* v2 = new (&allocator_)
+ HVecLoad(&allocator_, parameter_, parameter_, Primitive::kPrimInt, 4, /*is_string_char_at*/ true);
+
+ EXPECT_TRUE(v0->CanBeMoved());
+ EXPECT_TRUE(v1->CanBeMoved());
+ EXPECT_TRUE(v2->CanBeMoved());
+
+ EXPECT_FALSE(v0->IsStringCharAt());
+ EXPECT_FALSE(v1->IsStringCharAt());
+ EXPECT_TRUE(v2->IsStringCharAt());
+
+ EXPECT_TRUE(v0->Equals(v0));
+ EXPECT_TRUE(v1->Equals(v1));
+ EXPECT_TRUE(v2->Equals(v2));
+
+ EXPECT_TRUE(v0->Equals(v1));
+ EXPECT_FALSE(v0->Equals(v2));
+
+ EXPECT_TRUE(v0->GetAlignment() == Alignment(4, 0));
+ EXPECT_TRUE(v1->GetAlignment() == Alignment(4, 0));
+ EXPECT_TRUE(v2->GetAlignment() == Alignment(4, 0));
+
+ v1->SetAlignment(Alignment(8, 0));
+
+ EXPECT_TRUE(v1->GetAlignment() == Alignment(8, 0));
+
+ EXPECT_FALSE(v0->Equals(v1)); // no longer equal
+}
+
+TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
+ HVecOperation* v0 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4);
+
+ HVecMin* v1 = new (&allocator_)
+ HVecMin(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true);
+ HVecMin* v2 = new (&allocator_)
+ HVecMin(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false);
+ HVecMin* v3 = new (&allocator_)
+ HVecMin(&allocator_, v0, v0, Primitive::kPrimInt, 2, /*is_unsigned*/ true);
+
+ EXPECT_FALSE(v0->CanBeMoved());
+ EXPECT_TRUE(v1->CanBeMoved());
+ EXPECT_TRUE(v2->CanBeMoved());
+ EXPECT_TRUE(v3->CanBeMoved());
+
+ EXPECT_TRUE(v1->IsUnsigned());
+ EXPECT_FALSE(v2->IsUnsigned());
+ EXPECT_TRUE(v3->IsUnsigned());
+
+ EXPECT_TRUE(v1->Equals(v1));
+ EXPECT_TRUE(v2->Equals(v2));
+ EXPECT_TRUE(v3->Equals(v3));
+
+ EXPECT_FALSE(v1->Equals(v2)); // different signs
+ EXPECT_FALSE(v1->Equals(v3)); // different vector lengths
+}
+
+TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
+ HVecOperation* v0 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4);
+
+ HVecMax* v1 = new (&allocator_)
+ HVecMax(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true);
+ HVecMax* v2 = new (&allocator_)
+ HVecMax(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false);
+ HVecMax* v3 = new (&allocator_)
+ HVecMax(&allocator_, v0, v0, Primitive::kPrimInt, 2, /*is_unsigned*/ true);
+
+ EXPECT_FALSE(v0->CanBeMoved());
+ EXPECT_TRUE(v1->CanBeMoved());
+ EXPECT_TRUE(v2->CanBeMoved());
+ EXPECT_TRUE(v3->CanBeMoved());
+
+ EXPECT_TRUE(v1->IsUnsigned());
+ EXPECT_FALSE(v2->IsUnsigned());
+ EXPECT_TRUE(v3->IsUnsigned());
+
+ EXPECT_TRUE(v1->Equals(v1));
+ EXPECT_TRUE(v2->Equals(v2));
+ EXPECT_TRUE(v3->Equals(v3));
+
+ EXPECT_FALSE(v1->Equals(v2)); // different signs
+ EXPECT_FALSE(v1->Equals(v3)); // different vector lengths
+}
+
+TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
+ HVecOperation* v0 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4);
+
+ HVecHalvingAdd* v1 = new (&allocator_) HVecHalvingAdd(
+ &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true, /*is_rounded*/ true);
+ HVecHalvingAdd* v2 = new (&allocator_) HVecHalvingAdd(
+ &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true, /*is_rounded*/ false);
+ HVecHalvingAdd* v3 = new (&allocator_) HVecHalvingAdd(
+ &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false, /*is_rounded*/ true);
+ HVecHalvingAdd* v4 = new (&allocator_) HVecHalvingAdd(
+ &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false, /*is_rounded*/ false);
+ HVecHalvingAdd* v5 = new (&allocator_) HVecHalvingAdd(
+ &allocator_, v0, v0, Primitive::kPrimInt, 2, /*is_unsigned*/ true, /*is_rounded*/ true);
+
+ EXPECT_FALSE(v0->CanBeMoved());
+ EXPECT_TRUE(v1->CanBeMoved());
+ EXPECT_TRUE(v2->CanBeMoved());
+ EXPECT_TRUE(v3->CanBeMoved());
+ EXPECT_TRUE(v4->CanBeMoved());
+ EXPECT_TRUE(v5->CanBeMoved());
+
+ EXPECT_TRUE(v1->Equals(v1));
+ EXPECT_TRUE(v2->Equals(v2));
+ EXPECT_TRUE(v3->Equals(v3));
+ EXPECT_TRUE(v4->Equals(v4));
+ EXPECT_TRUE(v5->Equals(v5));
+
+ EXPECT_TRUE(v1->IsUnsigned() && v1->IsRounded());
+ EXPECT_TRUE(v2->IsUnsigned() && !v2->IsRounded());
+ EXPECT_TRUE(!v3->IsUnsigned() && v3->IsRounded());
+ EXPECT_TRUE(!v4->IsUnsigned() && !v4->IsRounded());
+ EXPECT_TRUE(v5->IsUnsigned() && v5->IsRounded());
+
+ EXPECT_FALSE(v1->Equals(v2)); // different attributes
+ EXPECT_FALSE(v1->Equals(v3)); // different attributes
+ EXPECT_FALSE(v1->Equals(v4)); // different attributes
+ EXPECT_FALSE(v1->Equals(v5)); // different vector lengths
+}
+
+TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) {
+ HVecOperation* v0 = new (&allocator_)
+ HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4);
+
+ HVecMultiplyAccumulate* v1 = new (&allocator_)
+ HVecMultiplyAccumulate(&allocator_, HInstruction::kAdd, v0, v0, v0, Primitive::kPrimInt, 4);
+ HVecMultiplyAccumulate* v2 = new (&allocator_)
+ HVecMultiplyAccumulate(&allocator_, HInstruction::kSub, v0, v0, v0, Primitive::kPrimInt, 4);
+ HVecMultiplyAccumulate* v3 = new (&allocator_)
+ HVecMultiplyAccumulate(&allocator_, HInstruction::kAdd, v0, v0, v0, Primitive::kPrimInt, 2);
+
+ EXPECT_FALSE(v0->CanBeMoved());
+ EXPECT_TRUE(v1->CanBeMoved());
+ EXPECT_TRUE(v2->CanBeMoved());
+ EXPECT_TRUE(v3->CanBeMoved());
+
+ EXPECT_EQ(HInstruction::kAdd, v1->GetOpKind());
+ EXPECT_EQ(HInstruction::kSub, v2->GetOpKind());
+ EXPECT_EQ(HInstruction::kAdd, v3->GetOpKind());
+
+ EXPECT_TRUE(v1->Equals(v1));
+ EXPECT_TRUE(v2->Equals(v2));
+ EXPECT_TRUE(v3->Equals(v3));
+
+ EXPECT_FALSE(v1->Equals(v2)); // different operators
+ EXPECT_FALSE(v1->Equals(v3)); // different vector lengths
+}
+
+} // namespace art
diff --git a/compiler/utils/atomic_method_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h
index ad3a099eb6..c41d8fc071 100644
--- a/compiler/utils/atomic_method_ref_map-inl.h
+++ b/compiler/utils/atomic_dex_ref_map-inl.h
@@ -14,72 +14,72 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
-#define ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
+#ifndef ART_COMPILER_UTILS_ATOMIC_DEX_REF_MAP_INL_H_
+#define ART_COMPILER_UTILS_ATOMIC_DEX_REF_MAP_INL_H_
-#include "atomic_method_ref_map.h"
+#include "atomic_dex_ref_map.h"
#include "dex_file-inl.h"
namespace art {
template <typename T>
-inline typename AtomicMethodRefMap<T>::InsertResult AtomicMethodRefMap<T>::Insert(
- MethodReference ref,
+inline typename AtomicDexRefMap<T>::InsertResult AtomicDexRefMap<T>::Insert(
+ DexFileReference ref,
const T& expected,
const T& desired) {
ElementArray* const array = GetArray(ref.dex_file);
if (array == nullptr) {
return kInsertResultInvalidDexFile;
}
- return (*array)[ref.dex_method_index].CompareExchangeStrongSequentiallyConsistent(
- expected, desired)
+ DCHECK_LT(ref.index, array->size());
+ return (*array)[ref.index].CompareExchangeStrongSequentiallyConsistent(expected, desired)
? kInsertResultSuccess
: kInsertResultCASFailure;
}
template <typename T>
-inline bool AtomicMethodRefMap<T>::Get(MethodReference ref, T* out) const {
+inline bool AtomicDexRefMap<T>::Get(DexFileReference ref, T* out) const {
const ElementArray* const array = GetArray(ref.dex_file);
if (array == nullptr) {
return false;
}
- *out = (*array)[ref.dex_method_index].LoadRelaxed();
+ *out = (*array)[ref.index].LoadRelaxed();
return true;
}
template <typename T>
-inline void AtomicMethodRefMap<T>::AddDexFile(const DexFile* dex_file) {
- arrays_.Put(dex_file, std::move(ElementArray(dex_file->NumMethodIds())));
+inline void AtomicDexRefMap<T>::AddDexFile(const DexFile* dex_file, size_t max_index) {
+ arrays_.Put(dex_file, std::move(ElementArray(max_index)));
}
template <typename T>
-inline typename AtomicMethodRefMap<T>::ElementArray* AtomicMethodRefMap<T>::GetArray(
+inline typename AtomicDexRefMap<T>::ElementArray* AtomicDexRefMap<T>::GetArray(
const DexFile* dex_file) {
auto it = arrays_.find(dex_file);
return (it != arrays_.end()) ? &it->second : nullptr;
}
template <typename T>
-inline const typename AtomicMethodRefMap<T>::ElementArray* AtomicMethodRefMap<T>::GetArray(
+inline const typename AtomicDexRefMap<T>::ElementArray* AtomicDexRefMap<T>::GetArray(
const DexFile* dex_file) const {
auto it = arrays_.find(dex_file);
return (it != arrays_.end()) ? &it->second : nullptr;
}
template <typename T> template <typename Visitor>
-inline void AtomicMethodRefMap<T>::Visit(const Visitor& visitor) {
+inline void AtomicDexRefMap<T>::Visit(const Visitor& visitor) {
for (auto& pair : arrays_) {
const DexFile* dex_file = pair.first;
const ElementArray& elements = pair.second;
for (size_t i = 0; i < elements.size(); ++i) {
- visitor(MethodReference(dex_file, i), elements[i].LoadRelaxed());
+ visitor(DexFileReference(dex_file, i), elements[i].LoadRelaxed());
}
}
}
template <typename T>
-inline void AtomicMethodRefMap<T>::ClearEntries() {
+inline void AtomicDexRefMap<T>::ClearEntries() {
for (auto& it : arrays_) {
for (auto& element : it.second) {
element.StoreRelaxed(nullptr);
@@ -89,4 +89,4 @@ inline void AtomicMethodRefMap<T>::ClearEntries() {
} // namespace art
-#endif // ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
+#endif // ART_COMPILER_UTILS_ATOMIC_DEX_REF_MAP_INL_H_
diff --git a/compiler/utils/atomic_method_ref_map.h b/compiler/utils/atomic_dex_ref_map.h
index fed848f563..2da4ffa27b 100644
--- a/compiler/utils/atomic_method_ref_map.h
+++ b/compiler/utils/atomic_dex_ref_map.h
@@ -14,11 +14,11 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
-#define ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
+#ifndef ART_COMPILER_UTILS_ATOMIC_DEX_REF_MAP_H_
+#define ART_COMPILER_UTILS_ATOMIC_DEX_REF_MAP_H_
#include "base/dchecked_vector.h"
-#include "method_reference.h"
+#include "dex_file.h"
#include "safe_map.h"
namespace art {
@@ -27,10 +27,10 @@ class DexFile;
// Used by CompilerCallbacks to track verification information from the Runtime.
template <typename T>
-class AtomicMethodRefMap {
+class AtomicDexRefMap {
public:
- explicit AtomicMethodRefMap() {}
- ~AtomicMethodRefMap() {}
+ explicit AtomicDexRefMap() {}
+ ~AtomicDexRefMap() {}
// Atomically swap the element in if the existing value matches expected.
enum InsertResult {
@@ -38,14 +38,14 @@ class AtomicMethodRefMap {
kInsertResultCASFailure,
kInsertResultSuccess,
};
- InsertResult Insert(MethodReference ref, const T& expected, const T& desired);
+ InsertResult Insert(DexFileReference ref, const T& expected, const T& desired);
// Retreive an item, returns false if the dex file is not added.
- bool Get(MethodReference ref, T* out) const;
+ bool Get(DexFileReference ref, T* out) const;
// Dex files must be added before method references belonging to them can be used as keys. Not
// thread safe.
- void AddDexFile(const DexFile* dex_file);
+ void AddDexFile(const DexFile* dex_file, size_t max_index);
bool HaveDexFile(const DexFile* dex_file) const {
return arrays_.find(dex_file) != arrays_.end();
@@ -70,4 +70,4 @@ class AtomicMethodRefMap {
} // namespace art
-#endif // ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
+#endif // ART_COMPILER_UTILS_ATOMIC_DEX_REF_MAP_H_
diff --git a/compiler/utils/atomic_method_ref_map_test.cc b/compiler/utils/atomic_dex_ref_map_test.cc
index 9e5bf4bbe1..ae19a9c6da 100644
--- a/compiler/utils/atomic_method_ref_map_test.cc
+++ b/compiler/utils/atomic_dex_ref_map_test.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "atomic_method_ref_map-inl.h"
+#include "atomic_dex_ref_map-inl.h"
#include <memory>
@@ -25,46 +25,46 @@
namespace art {
-class AtomicMethodRefMapTest : public CommonRuntimeTest {};
+class AtomicDexRefMapTest : public CommonRuntimeTest {};
-TEST_F(AtomicMethodRefMapTest, RunTests) {
+TEST_F(AtomicDexRefMapTest, RunTests) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> dex(OpenTestDexFile("Interfaces"));
ASSERT_TRUE(dex != nullptr);
- using Map = AtomicMethodRefMap<int>;
+ using Map = AtomicDexRefMap<int>;
Map map;
int value = 123;
// Error case: Not already inserted.
- EXPECT_FALSE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_FALSE(map.Get(DexFileReference(dex.get(), 1), &value));
EXPECT_FALSE(map.HaveDexFile(dex.get()));
// Error case: Dex file not registered.
- EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, 1) == Map::kInsertResultInvalidDexFile);
- map.AddDexFile(dex.get());
+ EXPECT_TRUE(map.Insert(DexFileReference(dex.get(), 1), 0, 1) == Map::kInsertResultInvalidDexFile);
+ map.AddDexFile(dex.get(), dex->NumMethodIds());
EXPECT_TRUE(map.HaveDexFile(dex.get()));
EXPECT_GT(dex->NumMethodIds(), 10u);
// After we have added the get should succeed but return the default value.
- EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_TRUE(map.Get(DexFileReference(dex.get(), 1), &value));
EXPECT_EQ(value, 0);
// Actually insert an item and make sure we can retreive it.
static const int kInsertValue = 44;
- EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, kInsertValue) ==
+ EXPECT_TRUE(map.Insert(DexFileReference(dex.get(), 1), 0, kInsertValue) ==
Map::kInsertResultSuccess);
- EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_TRUE(map.Get(DexFileReference(dex.get(), 1), &value));
EXPECT_EQ(value, kInsertValue);
static const int kInsertValue2 = 123;
- EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 2), 0, kInsertValue2) ==
+ EXPECT_TRUE(map.Insert(DexFileReference(dex.get(), 2), 0, kInsertValue2) ==
Map::kInsertResultSuccess);
- EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_TRUE(map.Get(DexFileReference(dex.get(), 1), &value));
EXPECT_EQ(value, kInsertValue);
- EXPECT_TRUE(map.Get(MethodReference(dex.get(), 2), &value));
+ EXPECT_TRUE(map.Get(DexFileReference(dex.get(), 2), &value));
EXPECT_EQ(value, kInsertValue2);
// Error case: Incorrect expected value for CAS.
- EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, kInsertValue + 1) ==
+ EXPECT_TRUE(map.Insert(DexFileReference(dex.get(), 1), 0, kInsertValue + 1) ==
Map::kInsertResultCASFailure);
// Correctly overwrite the value and verify.
- EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), kInsertValue, kInsertValue + 1) ==
+ EXPECT_TRUE(map.Insert(DexFileReference(dex.get(), 1), kInsertValue, kInsertValue + 1) ==
Map::kInsertResultSuccess);
- EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+ EXPECT_TRUE(map.Get(DexFileReference(dex.get(), 1), &value));
EXPECT_EQ(value, kInsertValue + 1);
}
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 7e616a7af0..686da2136f 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -33,7 +33,7 @@
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
-#include "utils/atomic_method_ref_map-inl.h"
+#include "utils/atomic_dex_ref_map-inl.h"
#include "verifier/method_verifier-inl.h"
namespace art {
@@ -97,9 +97,9 @@ class VerifierDepsTest : public CommonCompilerTest {
callbacks_->SetVerifierDeps(nullptr);
// Clear entries in the verification results to avoid hitting a DCHECK that
// we always succeed inserting a new entry after verifying.
- AtomicMethodRefMap<const VerifiedMethod*>* map =
+ AtomicDexRefMap<const VerifiedMethod*>* map =
&compiler_driver_->GetVerificationResults()->atomic_verified_methods_;
- map->Visit([](const MethodReference& ref ATTRIBUTE_UNUSED, const VerifiedMethod* method) {
+ map->Visit([](const DexFileReference& ref ATTRIBUTE_UNUSED, const VerifiedMethod* method) {
delete method;
});
map->ClearEntries();