summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/compiler.h3
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc7
-rw-r--r--compiler/dex/dex_to_dex_compiler.h7
-rw-r--r--compiler/driver/compiler_driver-inl.h13
-rw-r--r--compiler/driver/compiler_driver.cc123
-rw-r--r--compiler/driver/compiler_driver.h14
-rw-r--r--compiler/driver/compiler_driver_test.cc1
-rw-r--r--compiler/driver/dex_compilation_unit.cc2
-rw-r--r--compiler/driver/dex_compilation_unit.h8
-rw-r--r--compiler/image_writer.cc11
-rw-r--r--compiler/image_writer.h10
-rw-r--r--compiler/oat_writer.cc8
-rw-r--r--compiler/optimizing/builder.h15
-rw-r--r--compiler/optimizing/code_generator_arm64.cc393
-rw-r--r--compiler/optimizing/code_generator_arm64.h13
-rw-r--r--compiler/optimizing/code_generator_mips.cc58
-rw-r--r--compiler/optimizing/code_generator_mips64.cc62
-rw-r--r--compiler/optimizing/inliner.cc45
-rw-r--r--compiler/optimizing/instruction_builder.cc47
-rw-r--r--compiler/optimizing/instruction_builder.h11
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc50
-rw-r--r--compiler/optimizing/intrinsics_mips.cc55
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc49
-rw-r--r--compiler/optimizing/optimizing_compiler.cc26
-rw-r--r--compiler/optimizing/reference_type_propagation.cc44
-rw-r--r--compiler/optimizing/reference_type_propagation.h3
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc1
-rw-r--r--compiler/optimizing/ssa_builder.cc6
-rw-r--r--compiler/optimizing/ssa_builder.h3
-rw-r--r--compiler/optimizing/stack_map_stream.cc225
-rw-r--r--compiler/optimizing/stack_map_stream.h52
-rw-r--r--compiler/optimizing/stack_map_test.cc94
32 files changed, 942 insertions, 517 deletions
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 2ca0b77a73..908d3669ed 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -27,6 +27,7 @@ namespace jit {
class JitCodeCache;
}
namespace mirror {
+ class ClassLoader;
class DexCache;
}
@@ -63,7 +64,7 @@ class Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const = 0;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index d4f6545c59..76aeaa55d7 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -284,16 +284,13 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
}
uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(unit_.GetClassLoader())));
ClassLinker* class_linker = unit_.GetClassLinker();
ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(
GetDexFile(),
method_idx,
unit_.GetDexCache(),
- class_loader,
+ unit_.GetClassLoader(),
/* referrer */ nullptr,
kVirtual);
@@ -330,7 +327,7 @@ CompiledMethod* ArtCompileDEX(
InvokeType invoke_type ATTRIBUTE_UNUSED,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level) {
DCHECK(driver != nullptr);
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 0a00d45297..00c596d60e 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
#include "dex_file.h"
+#include "handle.h"
#include "invoke_type.h"
namespace art {
@@ -25,6 +26,10 @@ namespace art {
class CompiledMethod;
class CompilerDriver;
+namespace mirror {
+class ClassLoader;
+} // namespace mirror
+
namespace optimizer {
enum class DexToDexCompilationLevel {
@@ -40,7 +45,7 @@ CompiledMethod* ArtCompileDEX(CompilerDriver* driver,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index f296851ebf..582330611d 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -31,17 +31,12 @@
namespace art {
-inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit) {
- return soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader()).Ptr();
-}
-
inline mirror::Class* CompilerDriver::ResolveClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, dex::TypeIndex cls_index,
const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
mirror::Class* cls = mUnit->GetClassLinker()->ResolveType(
*mUnit->GetDexFile(), cls_index, dex_cache, class_loader);
DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending());
@@ -56,7 +51,7 @@ inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
const DexFile::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
@@ -87,7 +82,7 @@ inline ArtField* CompilerDriver::ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx,
is_static);
}
@@ -139,7 +134,7 @@ inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
- DCHECK_EQ(class_loader.Get(), GetClassLoader(soa, mUnit));
+ DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
ArtMethod* resolved_method =
check_incompatible_class_change
? mUnit->GetClassLinker()->ResolveMethod<ClassLinker::kForceICCECheck>(
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7af850a263..b738d5ce7e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -583,7 +583,7 @@ static void CompileMethod(Thread* self,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled,
@@ -624,9 +624,6 @@ static void CompileMethod(Thread* self,
// Look-up the ArtMethod associated with this code_item (if any)
// -- It is later used to lookup any [optimization] annotations for this method.
ScopedObjectAccess soa(self);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader_handle(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(class_loader)));
// TODO: Lookup annotation from DexFile directly without resolving method.
ArtMethod* method =
@@ -634,7 +631,7 @@ static void CompileMethod(Thread* self,
dex_file,
method_idx,
dex_cache,
- class_loader_handle,
+ class_loader,
/* referrer */ nullptr,
invoke_type);
@@ -681,9 +678,14 @@ static void CompileMethod(Thread* self,
if (compile) {
// NOTE: if compiler declines to compile this method, it will return null.
- compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type,
- class_def_idx, method_idx, class_loader,
- dex_file, dex_cache);
+ compiled_method = driver->GetCompiler()->Compile(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_cache);
}
if (compiled_method == nullptr &&
dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) {
@@ -730,12 +732,14 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
uint32_t method_idx = method->GetDexMethodIndex();
uint32_t access_flags = method->GetAccessFlags();
InvokeType invoke_type = method->GetInvokeType();
- StackHandleScope<1> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(method->GetDeclaringClass()->GetClassLoader()));
{
ScopedObjectAccessUnchecked soa(self);
ScopedLocalRef<jobject> local_class_loader(
- soa.Env(), soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
+ soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get()));
jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
// Find the dex_file
dex_file = method->GetDexFile();
@@ -769,7 +773,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_to_dex_compilation_level,
true,
@@ -795,7 +799,7 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_to_dex_compilation_level,
true,
@@ -1070,22 +1074,30 @@ bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_r
class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
- explicit ResolveCatchBlockExceptionsClassVisitor(
- std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve)
- : exceptions_to_resolve_(exceptions_to_resolve) {}
+ ResolveCatchBlockExceptionsClassVisitor() : classes_() {}
virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ classes_.push_back(c);
+ return true;
+ }
+
+ void FindExceptionTypesToResolve(
+ std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : c->GetMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m);
+ for (ObjPtr<mirror::Class> klass : classes_) {
+ for (ArtMethod& method : klass->GetMethods(pointer_size)) {
+ FindExceptionTypesToResolveForMethod(&method, exceptions_to_resolve);
+ }
}
- return true;
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle)
+ void FindExceptionTypesToResolveForMethod(
+ ArtMethod* method,
+ std::set<std::pair<dex::TypeIndex, const DexFile*>>* exceptions_to_resolve)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
+ const DexFile::CodeItem* code_item = method->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
}
@@ -1105,9 +1117,9 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
dex::TypeIndex encoded_catch_handler_handlers_type_idx =
dex::TypeIndex(DecodeUnsignedLeb128(&encoded_catch_handler_list));
// Add to set of types to resolve if not already in the dex cache resolved types
- if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
- exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx,
- method_handle->GetDexFile());
+ if (!method->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+ exceptions_to_resolve->emplace(encoded_catch_handler_handlers_type_idx,
+ method->GetDexFile());
}
// ignore address associated with catch handler
DecodeUnsignedLeb128(&encoded_catch_handler_list);
@@ -1119,7 +1131,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
}
}
- std::set<std::pair<dex::TypeIndex, const DexFile*>>& exceptions_to_resolve_;
+ std::vector<ObjPtr<mirror::Class>> classes_;
};
class RecordImageClassesVisitor : public ClassVisitor {
@@ -1173,8 +1185,14 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
- ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types);
- class_linker->VisitClasses(&visitor);
+ {
+ // Thread suspension is not allowed while ResolveCatchBlockExceptionsClassVisitor
+ // is using a std::vector<ObjPtr<mirror::Class>>.
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ ResolveCatchBlockExceptionsClassVisitor visitor;
+ class_linker->VisitClasses(&visitor);
+ visitor.FindExceptionTypesToResolve(&unresolved_exception_types);
+ }
for (const auto& exception_type : unresolved_exception_types) {
dex::TypeIndex exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
@@ -1425,19 +1443,14 @@ void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodRefere
dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index);
}
-bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx) {
- // Get type from dex cache assuming it was populated by the verifier
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class) {
if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Unknown class needs access checks.
}
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx);
bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible.
if (!is_accessible) {
- mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -1454,12 +1467,9 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx,
return is_accessible;
}
-bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx,
+bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class,
bool* finalizable) {
- // Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
// Be conservative.
@@ -1467,10 +1477,8 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
return false; // Unknown class needs access checks.
}
*finalizable = resolved_class->IsFinalizable();
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx);
bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible.
if (!is_accessible) {
- mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -1514,9 +1522,7 @@ ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
mirror::Class* referrer_class;
Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
{
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader_handle(
- hs.NewHandle(soa.Decode<mirror::ClassLoader>(mUnit->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader_handle = mUnit->GetClassLoader();
resolved_field = ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, false);
referrer_class = resolved_field != nullptr
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr;
@@ -2588,10 +2594,18 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_direct_method_idx = method_idx;
- CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled, dex_cache);
+ CompileMethod(soa.Self(),
+ driver,
+ it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
// Compile virtual methods
@@ -2605,10 +2619,17 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_virtual_method_idx = method_idx;
- CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled, dex_cache);
+ CompileMethod(soa.Self(),
+ driver, it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
DCHECK(!it.HasNext());
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 5b4c751c4a..1e5c43d833 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -187,16 +187,14 @@ class CompilerDriver {
REQUIRES(!requires_constructor_barrier_lock_);
// Are runtime access checks necessary in the compiled code?
- bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx)
+ bool CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class)
REQUIRES_SHARED(Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
// out_is_finalizable is set to whether the type is finalizable.
- bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx,
- Handle<mirror::DexCache> dex_cache,
- dex::TypeIndex type_idx,
+ bool CanAccessInstantiableTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
+ ObjPtr<mirror::Class> resolved_class,
bool* out_is_finalizable)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -370,10 +368,6 @@ class CompilerDriver {
uint32_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
private:
void PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 1e4ca16844..e4b66ebc5a 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -101,6 +101,7 @@ class CompilerDriverTest : public CommonCompilerTest {
};
// Disabled due to 10 second runtime on host
+// TODO: Update the test for hash-based dex cache arrays. Bug: 30627598
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
CompileAll(nullptr);
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index 47b19297e5..7e8e812c4a 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -21,7 +21,7 @@
namespace art {
-DexCompilationUnit::DexCompilationUnit(jobject class_loader,
+DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
const DexFile::CodeItem* code_item,
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 854927d747..24a9a5b653 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -34,7 +34,7 @@ class VerifiedMethod;
class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
public:
- DexCompilationUnit(jobject class_loader,
+ DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
const DexFile::CodeItem* code_item,
@@ -44,7 +44,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const VerifiedMethod* verified_method,
Handle<mirror::DexCache> dex_cache);
- jobject GetClassLoader() const {
+ Handle<mirror::ClassLoader> GetClassLoader() const {
return class_loader_;
}
@@ -113,7 +113,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
}
private:
- const jobject class_loader_;
+ const Handle<mirror::ClassLoader> class_loader_;
ClassLinker* const class_linker_;
@@ -125,7 +125,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const uint32_t access_flags_;
const VerifiedMethod* verified_method_;
- Handle<mirror::DexCache> dex_cache_;
+ const Handle<mirror::DexCache> dex_cache_;
std::string symbol_;
};
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index c72edb18a3..3e9ae0834c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -940,9 +940,11 @@ void ImageWriter::PruneNonImageClasses() {
}
ObjPtr<mirror::DexCache> dex_cache = self->DecodeJObject(data.weak_root)->AsDexCache();
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- Class* klass = dex_cache->GetResolvedType(dex::TypeIndex(i));
+ mirror::TypeDexCachePair pair =
+ dex_cache->GetResolvedTypes()[i].load(std::memory_order_relaxed);
+ mirror::Class* klass = pair.object.Read();
if (klass != nullptr && !KeepClass(klass)) {
- dex_cache->SetResolvedType(dex::TypeIndex(i), nullptr);
+ dex_cache->ClearResolvedType(dex::TypeIndex(pair.index));
}
}
ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
@@ -1922,8 +1924,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
// above comment for intern tables.
ClassTable temp_class_table;
temp_class_table.ReadFromMemory(class_table_memory_ptr);
- CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
- mirror::ClassLoader* class_loader = compile_app_image_ ? *class_loaders_.begin() : nullptr;
+ ObjPtr<mirror::ClassLoader> class_loader = GetClassLoader();
CHECK_EQ(temp_class_table.NumZygoteClasses(class_loader),
table->NumNonZygoteClasses(class_loader) + table->NumZygoteClasses(class_loader));
UnbufferedRootVisitor visitor(&root_visitor, RootInfo(kRootUnknown));
@@ -2213,7 +2214,7 @@ void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache,
orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
ImageAddressVisitor(this));
}
- GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
if (orig_types != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
NativeLocationInImage(orig_types),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index cc7df1ce21..bdc7146632 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -51,8 +51,13 @@ class ImageSpace;
} // namespace space
} // namespace gc
+namespace mirror {
+class ClassLoader;
+} // namespace mirror
+
class ClassLoaderVisitor;
class ClassTable;
+class ImtConflictTable;
static constexpr int kInvalidFd = -1;
@@ -79,6 +84,11 @@ class ImageWriter FINAL {
return true;
}
+ ObjPtr<mirror::ClassLoader> GetClassLoader() {
+ CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
+ return compile_app_image_ ? *class_loaders_.begin() : nullptr;
+ }
+
template <typename T>
T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (object == nullptr || IsInBootImage(object)) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 7c0cdbf270..0ea11255a8 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1060,6 +1060,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
WriteCodeMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset,
size_t relative_offset) SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
: OatDexMethodVisitor(writer, relative_offset),
+ class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
out_(out),
file_offset_(file_offset),
soa_(Thread::Current()),
@@ -1245,6 +1246,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
private:
+ ObjPtr<mirror::ClassLoader> class_loader_;
OutputStream* const out_;
const size_t file_offset_;
const ScopedObjectAccess soa_;
@@ -1303,10 +1305,12 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(writer_->HasImage());
ObjPtr<mirror::DexCache> dex_cache = GetDexCache(patch.TargetTypeDexFile());
- mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
+ ObjPtr<mirror::Class> type =
+ ClassLinker::LookupResolvedType(patch.TargetTypeIndex(), dex_cache, class_loader_);
CHECK(type != nullptr);
- return type;
+ return type.Ptr();
}
mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index e4ad4222fb..3a4c9dbd16 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -54,7 +54,10 @@ class HGraphBuilder : public ValueObject {
compiler_driver_(driver),
compilation_stats_(compiler_stats),
block_builder_(graph, dex_file, code_item),
- ssa_builder_(graph, dex_compilation_unit->GetDexCache(), handles),
+ ssa_builder_(graph,
+ dex_compilation_unit->GetClassLoader(),
+ dex_compilation_unit->GetDexCache(),
+ handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -80,10 +83,12 @@ class HGraphBuilder : public ValueObject {
code_item_(code_item),
dex_compilation_unit_(nullptr),
compiler_driver_(nullptr),
- null_dex_cache_(),
compilation_stats_(nullptr),
block_builder_(graph, nullptr, code_item),
- ssa_builder_(graph, null_dex_cache_, handles),
+ ssa_builder_(graph,
+ handles->NewHandle<mirror::ClassLoader>(nullptr),
+ handles->NewHandle<mirror::DexCache>(nullptr),
+ handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -96,7 +101,7 @@ class HGraphBuilder : public ValueObject {
/* code_generator */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
- null_dex_cache_,
+ handles->NewHandle<mirror::DexCache>(nullptr),
handles) {}
GraphAnalysisResult BuildGraph();
@@ -117,8 +122,6 @@ class HGraphBuilder : public ValueObject {
CompilerDriver* const compiler_driver_;
- ScopedNullHandle<mirror::DexCache> null_dex_cache_;
-
OptimizingCompilerStats* compilation_stats_;
HBasicBlockBuilder block_builder_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 27b42536ef..e6032d2381 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -34,6 +34,9 @@
#include "utils/stack_checks.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+using vixl::EmissionCheckScope;
#ifdef __
#error "ARM64 Codegen VIXL macro-assembler macro already defined."
@@ -613,10 +616,9 @@ void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
// We are about to use the assembler to place literals directly. Make sure we have enough
// underlying code buffer and we have generated the jump table with right size.
- vixl::CodeBufferCheckScope scope(codegen->GetVIXLAssembler(),
- num_entries * sizeof(int32_t),
- vixl::CodeBufferCheckScope::kReserveBufferSpace,
- vixl::CodeBufferCheckScope::kExactSize);
+ EmissionCheckScope scope(codegen->GetVIXLAssembler(),
+ num_entries * sizeof(int32_t),
+ CodeBufferCheckScope::kExactSize);
__ Bind(&table_start_);
const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
@@ -1279,7 +1281,6 @@ void ParallelMoveResolverARM64::EmitMove(size_t index) {
void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
- BlockPoolsScope block_pools(masm);
__ Bind(&frame_entry_label_);
bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
@@ -1288,8 +1289,14 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
Register temp = temps.AcquireX();
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
__ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
- __ Ldr(wzr, MemOperand(temp, 0));
- RecordPcInfo(nullptr, 0);
+ {
+ // Ensure that between load and RecordPcInfo there are no pools emitted.
+ ExactAssemblyScope eas(GetVIXLAssembler(),
+ kInstructionSize,
+ CodeBufferCheckScope::kExactSize);
+ __ ldr(wzr, MemOperand(temp, 0));
+ RecordPcInfo(nullptr, 0);
+ }
}
if (!HasEmptyFrame()) {
@@ -1324,7 +1331,6 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
}
void CodeGeneratorARM64::GenerateFrameExit() {
- BlockPoolsScope block_pools(GetVIXLAssembler());
GetAssembler()->cfi().RememberState();
if (!HasEmptyFrame()) {
int frame_size = GetFrameSize();
@@ -1651,7 +1657,6 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
const MemOperand& src,
bool needs_null_check) {
MacroAssembler* masm = GetVIXLAssembler();
- BlockPoolsScope block_pools(masm);
UseScratchRegisterScope temps(masm);
Register temp_base = temps.AcquireX();
Primitive::Type type = instruction->GetType();
@@ -1661,58 +1666,79 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
// TODO(vixl): Let the MacroAssembler handle MemOperand.
__ Add(temp_base, src.GetBaseRegister(), OperandFromMemOperand(src));
- MemOperand base = MemOperand(temp_base);
- switch (type) {
- case Primitive::kPrimBoolean:
- __ Ldarb(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- break;
- case Primitive::kPrimByte:
- __ Ldarb(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
- break;
- case Primitive::kPrimChar:
- __ Ldarh(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- break;
- case Primitive::kPrimShort:
- __ Ldarh(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
- break;
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimLong:
- DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
- __ Ldar(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- DCHECK(dst.IsFPRegister());
- DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
-
- Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
- __ Ldar(temp, base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ MemOperand base = MemOperand(temp_base);
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarb(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ break;
+ case Primitive::kPrimByte:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarb(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
+ break;
+ case Primitive::kPrimChar:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarh(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ break;
+ case Primitive::kPrimShort:
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldarh(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldar(Register(dst), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ DCHECK(dst.IsFPRegister());
+ DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
+
+ Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ ldar(temp, base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ __ Fmov(FPRegister(dst), temp);
+ break;
}
- __ Fmov(FPRegister(dst), temp);
- break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
}
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
}
}
@@ -1741,9 +1767,12 @@ void CodeGeneratorARM64::Store(Primitive::Type type,
}
}
-void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
+void CodeGeneratorARM64::StoreRelease(HInstruction* instruction,
+ Primitive::Type type,
CPURegister src,
- const MemOperand& dst) {
+ const MemOperand& dst,
+ bool needs_null_check) {
+ MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp_base = temps.AcquireX();
@@ -1754,20 +1783,39 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
Operand op = OperandFromMemOperand(dst);
__ Add(temp_base, dst.GetBaseRegister(), op);
MemOperand base = MemOperand(temp_base);
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- __ Stlrb(Register(src), base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlrb(Register(src), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- __ Stlrh(Register(src), base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlrh(Register(src), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
- __ Stlr(Register(src), base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlr(Register(src), base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
@@ -1781,8 +1829,13 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
temp_src = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
__ Fmov(temp_src, FPRegister(src));
}
-
- __ Stlr(temp_src, base);
+ {
+ ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ stlr(temp_src, base);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
break;
}
case Primitive::kPrimVoid:
@@ -1795,9 +1848,15 @@ void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(entrypoint, instruction, slow_path);
- GenerateInvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value());
- if (EntrypointRequiresStackMap(entrypoint)) {
- RecordPcInfo(instruction, dex_pc, slow_path);
+
+ __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value()));
+ {
+ // Ensure the pc position is recorded immediately after the `blr` instruction.
+ ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
+ __ blr(lr);
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
}
@@ -1805,11 +1864,6 @@ void CodeGeneratorARM64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point
HInstruction* instruction,
SlowPathCode* slow_path) {
ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
- GenerateInvokeRuntime(entry_point_offset);
-}
-
-void CodeGeneratorARM64::GenerateInvokeRuntime(int32_t entry_point_offset) {
- BlockPoolsScope block_pools(GetVIXLAssembler());
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
}
@@ -1976,7 +2030,6 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
Location out = locations->Out();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
Primitive::Type field_type = field_info.GetFieldType();
- BlockPoolsScope block_pools(GetVIXLAssembler());
MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
if (field_type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -2003,6 +2056,8 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
codegen_->LoadAcquire(
instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
} else {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
codegen_->Load(field_type, OutputCPURegister(instruction), field);
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -2032,7 +2087,6 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info,
bool value_can_be_null) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
- BlockPoolsScope block_pools(GetVIXLAssembler());
Register obj = InputRegisterAt(instruction, 0);
CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 1);
@@ -2054,9 +2108,11 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
}
if (field_info.IsVolatile()) {
- codegen_->StoreRelease(field_type, source, HeapOperand(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ codegen_->StoreRelease(
+ instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true);
} else {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
codegen_->Store(field_type, source, HeapOperand(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -2342,10 +2398,7 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate*
masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize;
if (prev->IsLoadOrStore()) {
// Make sure we emit only exactly one nop.
- vixl::CodeBufferCheckScope scope(masm,
- kInstructionSize,
- vixl::CodeBufferCheckScope::kReserveBufferSpace,
- vixl::CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope scope(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
__ nop();
}
}
@@ -2401,8 +2454,6 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
instruction->IsStringCharAt();
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
- // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
- BlockPoolsScope block_pools(masm);
// The read barrier instrumentation of object ArrayGet instructions
// does not support the HIntermediateAddress instruction.
@@ -2424,15 +2475,21 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
if (maybe_compressed_char_at) {
uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
length = temps.AcquireW();
- if (instruction->GetArray()->IsIntermediateAddress()) {
- DCHECK_LT(count_offset, offset);
- int64_t adjusted_offset = static_cast<int64_t>(count_offset) - static_cast<int64_t>(offset);
- // Note that `adjusted_offset` is negative, so this will be a LDUR.
- __ Ldr(length, MemOperand(obj.X(), adjusted_offset));
- } else {
- __ Ldr(length, HeapOperand(obj, count_offset));
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+
+ if (instruction->GetArray()->IsIntermediateAddress()) {
+ DCHECK_LT(count_offset, offset);
+ int64_t adjusted_offset =
+ static_cast<int64_t>(count_offset) - static_cast<int64_t>(offset);
+ // Note that `adjusted_offset` is negative, so this will be a LDUR.
+ __ Ldr(length, MemOperand(obj.X(), adjusted_offset));
+ } else {
+ __ Ldr(length, HeapOperand(obj, count_offset));
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (index.IsConstant()) {
if (maybe_compressed_char_at) {
@@ -2482,6 +2539,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
}
}
if (!maybe_compressed_char_at) {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
codegen_->Load(type, OutputCPURegister(instruction), source);
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -2509,9 +2568,12 @@ void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
vixl::aarch64::Register out = OutputRegister(instruction);
- BlockPoolsScope block_pools(GetVIXLAssembler());
- __ Ldr(out, HeapOperand(InputRegisterAt(instruction, 0), offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Ldr(out, HeapOperand(InputRegisterAt(instruction, 0), offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
// Mask out compression flag from String's array length.
if (mirror::kUseStringCompression && instruction->IsStringLength()) {
__ Lsr(out.W(), out.W(), 1u);
@@ -2552,7 +2614,6 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
MemOperand destination = HeapOperand(array);
MacroAssembler* masm = GetVIXLAssembler();
- BlockPoolsScope block_pools(masm);
if (!needs_write_barrier) {
DCHECK(!may_need_runtime_call_for_type_check);
@@ -2579,8 +2640,12 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
LSL,
Primitive::ComponentSizeShift(value_type));
}
- codegen_->Store(value_type, value, destination);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ codegen_->Store(value_type, value, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
} else {
DCHECK(!instruction->GetArray()->IsIntermediateAddress());
vixl::aarch64::Label done;
@@ -2613,8 +2678,13 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
if (!index.IsConstant()) {
__ Add(temp, array, offset);
}
- __ Str(wzr, destination);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools
+ // emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Str(wzr, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
__ B(&done);
__ Bind(&non_zero);
}
@@ -2629,8 +2699,12 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
Register temp2 = temps.AcquireSameSizeAs(array);
// /* HeapReference<Class> */ temp = array->klass_
- __ Ldr(temp, HeapOperand(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Ldr(temp, HeapOperand(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
GetAssembler()->MaybeUnpoisonHeapReference(temp);
// /* HeapReference<Class> */ temp = temp->component_type_
@@ -2671,10 +2745,14 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
if (!index.IsConstant()) {
__ Add(temp, array, offset);
}
- __ Str(source, destination);
+ {
+ // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ __ Str(source, destination);
- if (!may_need_runtime_call_for_type_check) {
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (!may_need_runtime_call_for_type_check) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
}
}
@@ -3969,19 +4047,25 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope scratch_scope(masm);
- BlockPoolsScope block_pools(masm);
scratch_scope.Exclude(ip1);
__ Mov(ip1, invoke->GetDexMethodIndex());
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
if (receiver.IsStackSlot()) {
__ Ldr(temp.W(), StackOperandFrom(receiver));
- // /* HeapReference<Class> */ temp = temp->klass_
- __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
+ {
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // /* HeapReference<Class> */ temp = temp->klass_
+ __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ }
} else {
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
// /* HeapReference<Class> */ temp = receiver->klass_
__ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
}
- codegen_->MaybeRecordImplicitNullCheck(invoke);
+
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// However this is not required in practice, as this is an
@@ -3998,10 +4082,16 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
__ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
- // lr();
- __ Blr(lr);
- DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+
+ {
+ // Ensure the pc position is recorded immediately after the `blr` instruction.
+ ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
+
+ // lr();
+ __ blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ }
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4113,8 +4203,16 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
__ Ldr(lr, MemOperand(
XRegisterFrom(callee_method),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value()));
- // lr()
- __ Blr(lr);
+ {
+ // To ensure that the pc position is recorded immediately after the `blr` instruction
+ // BLR must be the last instruction emitted in this function.
+ // Recording the pc will occur right after returning from this function.
+ ExactAssemblyScope eas(GetVIXLAssembler(),
+ kInstructionSize,
+ CodeBufferCheckScope::kExactSize);
+ // lr()
+ __ blr(lr);
+ }
break;
}
@@ -4134,12 +4232,15 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te
Offset class_offset = mirror::Object::ClassOffset();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
- BlockPoolsScope block_pools(GetVIXLAssembler());
-
DCHECK(receiver.IsRegister());
- // /* HeapReference<Class> */ temp = receiver->klass_
- __ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset));
- MaybeRecordImplicitNullCheck(invoke);
+
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // /* HeapReference<Class> */ temp = receiver->klass_
+ __ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset));
+ MaybeRecordImplicitNullCheck(invoke);
+ }
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// intermediate/temporary reference and because the current
@@ -4151,8 +4252,14 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
__ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
- // lr();
- __ Blr(lr);
+ {
+ // To ensure that the pc position is recorded immediately after the `blr` instruction
+ // BLR should be the last instruction emitted in this function.
+ // Recording the pc will occur right after returning from this function.
+ ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
+ // lr();
+ __ blr(lr);
+ }
}
void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
@@ -4365,7 +4472,9 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
return;
}
- BlockPoolsScope block_pools(GetVIXLAssembler());
+ // Ensure that between the BLR (emitted by GenerateStaticOrDirectCall) and RecordPcInfo there
+ // are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
@@ -4377,6 +4486,9 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
return;
}
+ // Ensure that between the BLR (emitted by GenerateVirtualCall) and RecordPcInfo there
+ // are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -4872,8 +4984,15 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
__ Ldr(XRegisterFrom(temp), MemOperand(tr, QUICK_ENTRY_POINT(pNewEmptyString)));
__ Ldr(lr, MemOperand(XRegisterFrom(temp), code_offset.Int32Value()));
- __ Blr(lr);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+
+ {
+ // Ensure the pc position is recorded immediately after the `blr` instruction.
+ ExactAssemblyScope eas(GetVIXLAssembler(),
+ kInstructionSize,
+ CodeBufferCheckScope::kExactSize);
+ __ blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ }
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
@@ -4917,11 +5036,13 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
if (CanMoveNullCheckToUser(instruction)) {
return;
}
-
- BlockPoolsScope block_pools(GetVIXLAssembler());
- Location obj = instruction->GetLocations()->InAt(0);
- __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
- RecordPcInfo(instruction, instruction->GetDexPc());
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ Location obj = instruction->GetLocations()->InAt(0);
+ __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
+ RecordPcInfo(instruction, instruction->GetDexPc());
+ }
}
void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
@@ -5658,10 +5779,14 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
DCHECK(obj.IsW());
uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- // /* int32_t */ monitor = obj->monitor_
- __ Ldr(temp, HeapOperand(obj, monitor_offset));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // /* int32_t */ monitor = obj->monitor_
+ __ Ldr(temp, HeapOperand(obj, monitor_offset));
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
}
// /* LockWord */ lock_word = LockWord(monitor)
static_assert(sizeof(LockWord) == sizeof(int32_t),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f6cb90a63a..5faf29a90f 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -43,6 +43,11 @@ class CodeGeneratorARM64;
// Use a local definition to prevent copying mistakes.
static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
+// These constants are used as an approximate margin when emission of veneer and literal pools
+// must be blocked.
+static constexpr int kMaxMacroInstructionSizeInBytes = 15 * vixl::aarch64::kInstructionSize;
+static constexpr int kInvokeCodeMarginSizeInBytes = 6 * kMaxMacroInstructionSizeInBytes;
+
static const vixl::aarch64::Register kParameterCoreRegisters[] = {
vixl::aarch64::x1,
vixl::aarch64::x2,
@@ -486,9 +491,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::CPURegister dst,
const vixl::aarch64::MemOperand& src,
bool needs_null_check);
- void StoreRelease(Primitive::Type type,
+ void StoreRelease(HInstruction* instruction,
+ Primitive::Type type,
vixl::aarch64::CPURegister src,
- const vixl::aarch64::MemOperand& dst);
+ const vixl::aarch64::MemOperand& dst,
+ bool needs_null_check);
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -502,8 +509,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
HInstruction* instruction,
SlowPathCode* slow_path);
- void GenerateInvokeRuntime(int32_t entry_point_offset);
-
ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 0677dad078..c9dde7cc55 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1914,6 +1914,8 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
auto null_checker = GetImplicitNullChecker(instruction);
Primitive::Type type = instruction->GetType();
+ const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+ instruction->IsStringCharAt();
switch (type) {
case Primitive::kPrimBoolean: {
Register out = locations->Out().AsRegister<Register>();
@@ -1957,14 +1959,54 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimChar: {
Register out = locations->Out().AsRegister<Register>();
+ if (maybe_compressed_char_at) {
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
+ __ Sll(TMP, TMP, 31); // Extract compression flag into the most significant bit of TMP.
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
+ }
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset, null_checker);
+ int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+ if (maybe_compressed_char_at) {
+ MipsLabel uncompressed_load, done;
+ __ Bnez(TMP, &uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedByte,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_1));
+ __ B(&done);
+ __ Bind(&uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2));
+ __ Bind(&done);
+ } else {
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2),
+ null_checker);
+ }
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
- __ Addu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
+ Register index_reg = index.AsRegister<Register>();
+ if (maybe_compressed_char_at) {
+ MipsLabel uncompressed_load, done;
+ __ Bnez(TMP, &uncompressed_load);
+ __ Addu(TMP, obj, index_reg);
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ __ B(&done);
+ __ Bind(&uncompressed_load);
+ __ Sll(TMP, index_reg, TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ __ Bind(&done);
+ } else {
+ __ Sll(TMP, index_reg, TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
+ }
}
break;
}
@@ -2046,6 +2088,10 @@ void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out compression flag from String's array length.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ Srl(out, out, 1u);
+ }
}
Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 4c8dabfede..5be0da4011 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1490,6 +1490,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
Primitive::Type type = instruction->GetType();
+ const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+ instruction->IsStringCharAt();
switch (type) {
case Primitive::kPrimBoolean: {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
@@ -1533,14 +1535,54 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimChar: {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ if (maybe_compressed_char_at) {
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ __ LoadFromOffset(kLoadWord, TMP, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ Dext(TMP, TMP, 0, 1);
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
+ }
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+ int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+ if (maybe_compressed_char_at) {
+ Mips64Label uncompressed_load, done;
+ __ Bnezc(TMP, &uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedByte,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_1));
+ __ Bc(&done);
+ __ Bind(&uncompressed_load);
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2));
+ __ Bind(&done);
+ } else {
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ out,
+ obj,
+ data_offset + (const_index << TIMES_2));
+ }
} else {
- __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
- __ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ GpuRegister index_reg = index.AsRegister<GpuRegister>();
+ if (maybe_compressed_char_at) {
+ Mips64Label uncompressed_load, done;
+ __ Bnezc(TMP, &uncompressed_load);
+ __ Daddu(TMP, obj, index_reg);
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ __ Bc(&done);
+ __ Bind(&uncompressed_load);
+ __ Dsll(TMP, index_reg, TIMES_2);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ __ Bind(&done);
+ } else {
+ __ Dsll(TMP, index_reg, TIMES_2);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ }
}
break;
}
@@ -1608,7 +1650,9 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (!maybe_compressed_char_at) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
}
void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
@@ -1624,6 +1668,10 @@ void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction)
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out compression flag from String's array length.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ Srl(out, out, 1u);
+ }
}
void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index f0afccb782..b56ef0f866 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -198,9 +198,9 @@ static uint32_t FindMethodIndexIn(ArtMethod* method,
}
static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
- const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
+ const DexCompilationUnit& compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ const DexFile& dex_file = *compilation_unit.GetDexFile();
dex::TypeIndex index;
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
@@ -209,22 +209,19 @@ static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
DCHECK(cls->IsProxyClass()) << cls->PrettyClass();
// TODO: deal with proxy classes.
} else if (IsSameDexFile(cls->GetDexFile(), dex_file)) {
- DCHECK_EQ(cls->GetDexCache(), dex_cache.Get());
+ DCHECK_EQ(cls->GetDexCache(), compilation_unit.GetDexCache().Get());
index = cls->GetDexTypeIndex();
- // Update the dex cache to ensure the class is in. The generated code will
- // consider it is. We make it safe by updating the dex cache, as other
- // dex files might also load the class, and there is no guarantee the dex
- // cache of the dex file of the class will be updated.
- if (dex_cache->GetResolvedType(index) == nullptr) {
- dex_cache->SetResolvedType(index, cls);
- }
} else {
index = cls->FindTypeIndexInOtherDexFile(dex_file);
- // We cannot guarantee the entry in the dex cache will resolve to the same class,
+ // We cannot guarantee the entry will resolve to the same class,
// as there may be different class loaders. So only return the index if it's
- // the right class in the dex cache already.
- if (index.IsValid() && dex_cache->GetResolvedType(index) != cls) {
- index = dex::TypeIndex::Invalid();
+ // the right class already resolved with the class loader.
+ if (index.IsValid()) {
+ ObjPtr<mirror::Class> resolved = ClassLinker::LookupResolvedType(
+ index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
+ if (resolved != cls) {
+ index = dex::TypeIndex::Invalid();
+ }
}
}
@@ -451,9 +448,8 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
dex::TypeIndex class_index = FindClassIndexIn(
- GetMonomorphicType(classes), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
<< " from inline cache is not inlined because its class is not"
@@ -496,6 +492,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -590,7 +587,6 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
PointerSize pointer_size = class_linker->GetImagePointerSize();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
bool all_targets_inlined = true;
bool one_target_inlined = false;
@@ -612,8 +608,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- dex::TypeIndex class_index = FindClassIndexIn(
- handle.Get(), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ dex::TypeIndex class_index = FindClassIndexIn(handle.Get(), caller_compilation_unit_);
HInstruction* return_replacement = nullptr;
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
@@ -669,6 +664,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guards typed.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -863,6 +859,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
// Run type propagation to get the guard typed.
ReferenceTypePropagation rtp_fixup(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -931,6 +928,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
// Actual return value has a more specific type than the method's declared
// return type. Run RTP again on the outer graph to propagate it.
ReferenceTypePropagation(graph_,
+ outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
@@ -1183,7 +1181,11 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex
/* dex_pc */ 0);
if (iget->GetType() == Primitive::kPrimNot) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
- ReferenceTypePropagation rtp(graph_, dex_cache, handles_, /* is_first_run */ false);
+ ReferenceTypePropagation rtp(graph_,
+ outer_compilation_unit_.GetClassLoader(),
+ dex_cache,
+ handles_,
+ /* is_first_run */ false);
rtp.Visit(iget);
}
return iget;
@@ -1229,7 +1231,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDeclaringClass()->GetClassLoader()));
DexCompilationUnit dex_compilation_unit(
- class_loader.ToJObject(),
+ class_loader,
class_linker,
callee_dex_file,
code_item,
@@ -1346,6 +1348,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
// are more specific than the declared ones, run RTP again on the inner graph.
if (run_rtp || ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
ReferenceTypePropagation(callee_graph,
+ outer_compilation_unit_.GetClassLoader(),
dex_compilation_unit.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index a1c391f455..3aaf2ca102 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -669,11 +669,10 @@ static InvokeType GetInvokeTypeFromOpCode(Instruction::Code opcode) {
ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType invoke_type) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
// We fetch the referenced class eagerly (that is, the class pointed by in the MethodId
// at method_idx), as `CanAccessResolvedMethod` expects it be be in the dex cache.
@@ -1260,9 +1259,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
static mirror::Class* GetClassFrom(CompilerDriver* driver,
const DexCompilationUnit& compilation_unit) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(compilation_unit.GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
@@ -1278,10 +1275,9 @@ mirror::Class* HInstructionBuilder::GetCompilingClass() const {
bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1317,8 +1313,7 @@ ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static,
StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
ArtField* resolved_field = class_linker->ResolveField(*dex_compilation_unit_->GetDexFile(),
@@ -1635,10 +1630,8 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_));
@@ -1722,17 +1715,9 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
}
}
-bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index,
- Handle<mirror::DexCache> dex_cache,
- bool* finalizable) const {
- return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index, finalizable);
-}
-
bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, bool* finalizable) const {
- ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- return NeedsAccessCheck(type_index, dex_cache, finalizable);
+ return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks(
+ LookupReferrerClass(), LookupResolvedType(type_index, *dex_compilation_unit_), finalizable);
}
bool HInstructionBuilder::CanDecodeQuickenedInfo() const {
@@ -2772,4 +2757,18 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
return true;
} // NOLINT(readability/fn_size)
+ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType(
+ dex::TypeIndex type_index,
+ const DexCompilationUnit& compilation_unit) const {
+ return ClassLinker::LookupResolvedType(
+ type_index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
+}
+
+ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
+ // TODO: Cache the result in a Handle<mirror::Class>.
+ const DexFile::MethodId& method_id =
+ dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
+ return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
+}
+
} // namespace art
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 3bb680ce44..e735a0c46d 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -106,11 +106,8 @@ class HInstructionBuilder : public ValueObject {
// Returns whether the current method needs access check for the type.
// Output parameter finalizable is set to whether the type is finalizable.
- bool NeedsAccessCheck(dex::TypeIndex type_index,
- Handle<mirror::DexCache> dex_cache,
- /*out*/bool* finalizable) const
+ bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const
REQUIRES_SHARED(Locks::mutator_lock_);
- bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const;
template<typename T>
void Unop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
@@ -300,6 +297,12 @@ class HInstructionBuilder : public ValueObject {
// be found.
ArtField* ResolveField(uint16_t field_idx, bool is_static, bool is_put);
+ ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_index,
+ const DexCompilationUnit& compilation_unit) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_);
+
ArenaAllocator* const arena_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index bbf826ce7e..1047d3beb6 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -115,13 +115,18 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
MoveArguments(invoke_, codegen);
- if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- LocationFrom(kArtMethodRegister));
- } else {
- codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), LocationFrom(kArtMethodRegister));
+ {
+ // Ensure that between the BLR (emitted by Generate*Call) and RecordPcInfo there
+ // are no pools emitted.
+ vixl::EmissionCheckScope guard(codegen->GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
+ if (invoke_->IsInvokeStaticOrDirect()) {
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+ LocationFrom(kArtMethodRegister));
+ } else {
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), LocationFrom(kArtMethodRegister));
+ }
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
}
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -980,11 +985,12 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongVolatile(HInvoke* invoke)
CreateIntIntIntIntToVoid(arena_, invoke);
}
-static void GenUnsafePut(LocationSummary* locations,
+static void GenUnsafePut(HInvoke* invoke,
Primitive::Type type,
bool is_volatile,
bool is_ordered,
CodeGeneratorARM64* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
MacroAssembler* masm = codegen->GetVIXLAssembler();
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
@@ -1007,7 +1013,7 @@ static void GenUnsafePut(LocationSummary* locations,
}
if (is_volatile || is_ordered) {
- codegen->StoreRelease(type, source, mem_op);
+ codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false);
} else {
codegen->Store(type, source, mem_op);
}
@@ -1020,63 +1026,63 @@ static void GenUnsafePut(LocationSummary* locations,
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimInt,
/* is_volatile */ false,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimInt,
/* is_volatile */ false,
/* is_ordered */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimInt,
/* is_volatile */ true,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimNot,
/* is_volatile */ false,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimNot,
/* is_volatile */ false,
/* is_ordered */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimNot,
/* is_volatile */ true,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimLong,
/* is_volatile */ false,
/* is_ordered */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimLong,
/* is_volatile */ false,
/* is_ordered */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(),
+ GenUnsafePut(invoke,
Primitive::kPrimLong,
/* is_volatile */ true,
/* is_ordered */ false,
@@ -2825,9 +2831,13 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) {
}
__ Cbnz(temp0, slow_path->GetEntryLabel());
- // Fast path.
- __ Ldr(out, HeapOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
+ {
+ // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ vixl::EmissionCheckScope guard(codegen_->GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+ // Fast path.
+ __ Ldr(out, HeapOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ }
codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out);
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 6cf9b83d44..64a68403e9 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2004,31 +2004,48 @@ void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) {
__ Lw(temp2, arg, class_offset);
__ Bne(temp1, temp2, &return_false);
- // Load lengths of this and argument strings.
+ // Load `count` fields of this and argument strings.
__ Lw(temp1, str, count_offset);
__ Lw(temp2, arg, count_offset);
- // Check if lengths are equal, return false if they're not.
+ // Check if `count` fields are equal, return false if they're not.
+ // Also compares the compression style, if differs return false.
__ Bne(temp1, temp2, &return_false);
- // Return true if both strings are empty.
+ // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
__ Beqz(temp1, &return_true);
// Don't overwrite input registers
__ Move(TMP, str);
__ Move(temp3, arg);
- // Assertions that must hold in order to compare strings 2 characters at a time.
+ // Assertions that must hold in order to compare strings 4 bytes at a time.
DCHECK_ALIGNED(value_offset, 4);
static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
- // Loop to compare strings 2 characters at a time starting at the beginning of the string.
- // Ok to do this because strings are zero-padded.
+ // For string compression, calculate the number of bytes to compare (not chars).
+ if (mirror::kUseStringCompression) {
+ // Extract compression flag.
+ if (IsR2OrNewer()) {
+ __ Ext(temp2, temp1, 0, 1);
+ } else {
+ __ Sll(temp2, temp1, 31);
+ __ Srl(temp2, temp2, 31);
+ }
+ __ Srl(temp1, temp1, 1); // Extract length.
+ __ Sllv(temp1, temp1, temp2); // Double the byte count if uncompressed.
+ }
+
+ // Loop to compare strings 4 bytes at a time starting at the beginning of the string.
+ // Ok to do this because strings are zero-padded to kObjectAlignment.
__ Bind(&loop);
__ Lw(out, TMP, value_offset);
__ Lw(temp2, temp3, value_offset);
__ Bne(out, temp2, &return_false);
__ Addiu(TMP, TMP, 4);
__ Addiu(temp3, temp3, 4);
- __ Addiu(temp1, temp1, -2);
+ // With string compression, we have compared 4 bytes, otherwise 2 chars.
+ __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -4 : -2);
__ Bgtz(temp1, &loop);
// Return true and exit the function.
@@ -2578,6 +2595,30 @@ void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
__ Addu(dstPtr, dstPtr, AT);
}
+ if (mirror::kUseStringCompression) {
+ MipsLabel uncompressed_copy, compressed_loop;
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ // Load count field and extract compression flag.
+ __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
+ __ Sll(TMP, TMP, 31);
+
+ // If string is uncompressed, use memcpy() path.
+ __ Bnez(TMP, &uncompressed_copy);
+
+ // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
+ __ Addu(srcPtr, srcObj, srcBegin);
+ __ Bind(&compressed_loop);
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset);
+ __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0);
+ __ Addiu(numChrs, numChrs, -1);
+ __ Addiu(srcPtr, srcPtr, 1);
+ __ Addiu(dstPtr, dstPtr, 2);
+ __ Bnez(numChrs, &compressed_loop);
+
+ __ B(&done);
+ __ Bind(&uncompressed_copy);
+ }
+
// Calculate source address.
__ Addiu(srcPtr, srcObj, value_offset);
if (IsR6()) {
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 00a1fa11bb..3888828722 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1607,31 +1607,42 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) {
__ Lw(temp2, arg, class_offset);
__ Bnec(temp1, temp2, &return_false);
- // Load lengths of this and argument strings.
+ // Load `count` fields of this and argument strings.
__ Lw(temp1, str, count_offset);
__ Lw(temp2, arg, count_offset);
- // Check if lengths are equal, return false if they're not.
+ // Check if `count` fields are equal, return false if they're not.
+ // Also compares the compression style, if differs return false.
__ Bnec(temp1, temp2, &return_false);
- // Return true if both strings are empty.
+ // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+ static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+ "Expecting 0=compressed, 1=uncompressed");
__ Beqzc(temp1, &return_true);
// Don't overwrite input registers
__ Move(TMP, str);
__ Move(temp3, arg);
- // Assertions that must hold in order to compare strings 4 characters at a time.
+ // Assertions that must hold in order to compare strings 8 bytes at a time.
DCHECK_ALIGNED(value_offset, 8);
static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
- // Loop to compare strings 4 characters at a time starting at the beginning of the string.
- // Ok to do this because strings are zero-padded to be 8-byte aligned.
+ if (mirror::kUseStringCompression) {
+ // For string compression, calculate the number of bytes to compare (not chars).
+ __ Dext(temp2, temp1, 0, 1); // Extract compression flag.
+ __ Srl(temp1, temp1, 1); // Extract length.
+ __ Sllv(temp1, temp1, temp2); // Double the byte count if uncompressed.
+ }
+
+ // Loop to compare strings 8 bytes at a time starting at the beginning of the string.
+ // Ok to do this because strings are zero-padded to kObjectAlignment.
__ Bind(&loop);
__ Ld(out, TMP, value_offset);
__ Ld(temp2, temp3, value_offset);
__ Bnec(out, temp2, &return_false);
__ Daddiu(TMP, TMP, 8);
__ Daddiu(temp3, temp3, 8);
- __ Addiu(temp1, temp1, -4);
+ // With string compression, we have compared 8 bytes, otherwise 4 chars.
+ __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -8 : -4);
__ Bgtzc(temp1, &loop);
// Return true and exit the function.
@@ -1912,6 +1923,30 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
__ Daddiu(dstPtr, dstObj, data_offset);
__ Dlsa(dstPtr, dstBegin, dstPtr, char_shift);
+ if (mirror::kUseStringCompression) {
+ Mips64Label uncompressed_copy, compressed_loop;
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ // Load count field and extract compression flag.
+ __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
+ __ Dext(TMP, TMP, 0, 1);
+
+ // If string is uncompressed, use memcpy() path.
+ __ Bnezc(TMP, &uncompressed_copy);
+
+ // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
+ __ Daddu(srcPtr, srcObj, srcBegin);
+ __ Bind(&compressed_loop);
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset);
+ __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0);
+ __ Daddiu(numChrs, numChrs, -1);
+ __ Daddiu(srcPtr, srcPtr, 1);
+ __ Daddiu(dstPtr, dstPtr, 2);
+ __ Bnezc(numChrs, &compressed_loop);
+
+ __ Bc(&done);
+ __ Bind(&uncompressed_copy);
+ }
+
// Calculate source address.
__ Daddiu(srcPtr, srcObj, value_offset);
__ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 727ca7d893..0375c66e42 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -306,7 +306,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const OVERRIDE;
@@ -375,7 +375,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -875,7 +875,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject class_loader,
+ Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -946,11 +946,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
const uint8_t* interpreter_metadata = nullptr;
if (method == nullptr) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(class_loader)));
method = compiler_driver->ResolveMethod(
- soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
+ soa, dex_cache, class_loader, &dex_compilation_unit, method_idx, invoke_type);
}
// For AOT compilation, we may not get a method, for example if its class is erroneous.
// JIT should always have a method.
@@ -959,16 +956,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
graph->SetArtMethod(method);
ScopedObjectAccess soa(Thread::Current());
interpreter_metadata = method->GetQuickenedInfo(class_linker->GetImagePointerSize());
- dex::TypeIndex type_index = method->GetDeclaringClass()->GetDexTypeIndex();
-
- // Update the dex cache if the type is not in it yet. Note that under AOT,
- // the verifier must have set it, but under JIT, there's no guarantee, as we
- // don't necessarily run the verifier.
- // The compiler and the compiler driver assume the compiling class is
- // in the dex cache.
- if (dex_cache->GetResolvedType(type_index) == nullptr) {
- dex_cache->SetResolvedType(type_index, method->GetDeclaringClass());
- }
}
std::unique_ptr<CodeGenerator> codegen(
@@ -1049,7 +1036,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- jobject jclass_loader,
+ Handle<mirror::ClassLoader> jclass_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
@@ -1163,7 +1150,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
DCHECK(method->IsCompilable());
- jobject jclass_loader = class_loader.ToJObject();
const DexFile* dex_file = method->GetDexFile();
const uint16_t class_def_idx = method->GetClassDefIndex();
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
@@ -1187,7 +1173,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
invoke_type,
class_def_idx,
method_idx,
- jclass_loader,
+ class_loader,
*dex_file,
dex_cache,
method,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index c55fccc7d3..6e332ca59b 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -65,11 +65,13 @@ ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetThrowabl
class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
public:
RTPVisitor(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
HandleCache* handle_cache,
ArenaVector<HInstruction*>* worklist,
bool is_first_run)
: HGraphDelegateVisitor(graph),
+ class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handle_cache),
worklist_(worklist),
@@ -101,6 +103,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
bool is_exact);
private:
+ Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> hint_dex_cache_;
HandleCache* handle_cache_;
ArenaVector<HInstruction*>* worklist_;
@@ -108,11 +111,13 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
};
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
const char* name)
: HOptimization(graph, name),
+ class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
@@ -147,7 +152,12 @@ void ReferenceTypePropagation::ValidateTypes() {
}
void ReferenceTypePropagation::Visit(HInstruction* instruction) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ RTPVisitor visitor(graph_,
+ class_loader_,
+ hint_dex_cache_,
+ &handle_cache_,
+ &worklist_,
+ is_first_run_);
instruction->Accept(&visitor);
}
@@ -321,7 +331,12 @@ void ReferenceTypePropagation::Run() {
}
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ RTPVisitor visitor(graph_,
+ class_loader_,
+ hint_dex_cache_,
+ &handle_cache_,
+ &worklist_,
+ is_first_run_);
// Handle Phis first as there might be instructions in the same block who depend on them.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
VisitPhi(it.Current()->AsPhi());
@@ -542,8 +557,9 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
- // Get type from dex cache assuming it was populated by the verifier.
- SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
+ ObjPtr<mirror::Class> klass =
+ ClassLinker::LookupResolvedType(type_idx, dex_cache, class_loader_.Get());
+ SetClassAsTypeInfo(instr, klass, is_exact);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
@@ -556,25 +572,13 @@ void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
}
-static mirror::Class* GetClassFromDexCache(Thread* self,
- const DexFile& dex_file,
- dex::TypeIndex type_idx,
- Handle<mirror::DexCache> hint_dex_cache)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
- // Get type from dex cache assuming it was populated by the verifier.
- return dex_cache->GetResolvedType(type_idx);
-}
-
void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
// We check if the existing type is valid: the inliner may have set it.
if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
- instr->GetDexFile(),
- instr->GetTypeIndex(),
- hint_dex_cache_);
- SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false);
+ UpdateReferenceTypeInfo(instr,
+ instr->GetTypeIndex(),
+ instr->GetDexFile(),
+ /* is_exact */ false);
}
}
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 4663471729..215e96786b 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -33,6 +33,7 @@ namespace art {
class ReferenceTypePropagation : public HOptimization {
public:
ReferenceTypePropagation(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
@@ -105,6 +106,8 @@ class ReferenceTypePropagation : public HOptimization {
void ValidateTypes();
+ Handle<mirror::ClassLoader> class_loader_;
+
// Note: hint_dex_cache_ is usually, but not necessarily, the dex cache associated with
// graph_->GetDexFile(). Since we may look up also in other dex files, it's used only
// as a hint, to reduce the number of calls to the costly ClassLinker::FindDexCache().
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index b061c871b0..84a4bab1a9 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -38,6 +38,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
+ Handle<mirror::ClassLoader>(),
Handle<mirror::DexCache>(),
handles,
true,
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 487e4dd498..50ab11bc23 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -499,7 +499,11 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// 4) Compute type of reference type instructions. The pass assumes that
// NullConstant has been fixed up.
- ReferenceTypePropagation(graph_, dex_cache_, handles_, /* is_first_run */ true).Run();
+ ReferenceTypePropagation(graph_,
+ class_loader_,
+ dex_cache_,
+ handles_,
+ /* is_first_run */ true).Run();
// 5) HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 45dac54115..978f113ec4 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -48,9 +48,11 @@ namespace art {
class SsaBuilder : public ValueObject {
public:
SsaBuilder(HGraph* graph,
+ Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
: graph_(graph),
+ class_loader_(class_loader),
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
@@ -115,6 +117,7 @@ class SsaBuilder : public ValueObject {
void RemoveRedundantUninitializedStrings();
HGraph* graph_;
+ Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> dex_cache_;
VariableSizedHandleScope* const handles_;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index f8e01b7537..1bcc8e1ace 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -38,19 +38,14 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
current_entry_.native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
current_entry_.register_mask = register_mask;
current_entry_.sp_mask = sp_mask;
- current_entry_.num_dex_registers = num_dex_registers;
current_entry_.inlining_depth = inlining_depth;
- current_entry_.dex_register_locations_start_index = dex_register_locations_.size();
current_entry_.inline_infos_start_index = inline_infos_.size();
- current_entry_.dex_register_map_hash = 0;
- current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound;
current_entry_.stack_mask_index = 0;
- if (num_dex_registers != 0) {
- current_entry_.live_dex_registers_mask =
- ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
- } else {
- current_entry_.live_dex_registers_mask = nullptr;
- }
+ current_entry_.dex_register_entry.num_dex_registers = num_dex_registers;
+ current_entry_.dex_register_entry.locations_start_index = dex_register_locations_.size();
+ current_entry_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
+ ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
+ : nullptr;
if (sp_mask != nullptr) {
stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
@@ -65,7 +60,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
}
void StackMapStream::EndStackMapEntry() {
- current_entry_.same_dex_register_map_as_ = FindEntryWithTheSameDexMap();
+ current_entry_.dex_register_map_index = AddDexRegisterMapEntry(current_entry_.dex_register_entry);
stack_maps_.push_back(current_entry_);
current_entry_ = StackMapEntry();
}
@@ -91,19 +86,15 @@ void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t
dex_register_locations_.push_back(index);
location_catalog_entries_indices_.Insert(std::make_pair(location, index));
}
-
- if (in_inline_frame_) {
- // TODO: Support sharing DexRegisterMap across InlineInfo.
- DCHECK_LT(current_dex_register_, current_inline_info_.num_dex_registers);
- current_inline_info_.live_dex_registers_mask->SetBit(current_dex_register_);
- } else {
- DCHECK_LT(current_dex_register_, current_entry_.num_dex_registers);
- current_entry_.live_dex_registers_mask->SetBit(current_dex_register_);
- current_entry_.dex_register_map_hash += (1 <<
- (current_dex_register_ % (sizeof(current_entry_.dex_register_map_hash) * kBitsPerByte)));
- current_entry_.dex_register_map_hash += static_cast<uint32_t>(value);
- current_entry_.dex_register_map_hash += static_cast<uint32_t>(kind);
- }
+ DexRegisterMapEntry* const entry = in_inline_frame_
+ ? &current_inline_info_.dex_register_entry
+ : &current_entry_.dex_register_entry;
+ DCHECK_LT(current_dex_register_, entry->num_dex_registers);
+ entry->live_dex_registers_mask->SetBit(current_dex_register_);
+ entry->hash += (1 <<
+ (current_dex_register_ % (sizeof(DexRegisterMapEntry::hash) * kBitsPerByte)));
+ entry->hash += static_cast<uint32_t>(value);
+ entry->hash += static_cast<uint32_t>(kind);
}
current_dex_register_++;
}
@@ -124,20 +115,19 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
current_inline_info_.method_index = method->GetDexMethodIndexUnchecked();
}
current_inline_info_.dex_pc = dex_pc;
- current_inline_info_.num_dex_registers = num_dex_registers;
- current_inline_info_.dex_register_locations_start_index = dex_register_locations_.size();
- if (num_dex_registers != 0) {
- current_inline_info_.live_dex_registers_mask =
- ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
- } else {
- current_inline_info_.live_dex_registers_mask = nullptr;
- }
+ current_inline_info_.dex_register_entry.num_dex_registers = num_dex_registers;
+ current_inline_info_.dex_register_entry.locations_start_index = dex_register_locations_.size();
+ current_inline_info_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
+ ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
+ : nullptr;
current_dex_register_ = 0;
}
void StackMapStream::EndInlineInfoEntry() {
+ current_inline_info_.dex_register_map_index =
+ AddDexRegisterMapEntry(current_inline_info_.dex_register_entry);
DCHECK(in_inline_frame_);
- DCHECK_EQ(current_dex_register_, current_inline_info_.num_dex_registers)
+ DCHECK_EQ(current_dex_register_, current_inline_info_.dex_register_entry.num_dex_registers)
<< "Inline information contains less registers than expected";
in_inline_frame_ = false;
inline_infos_.push_back(current_inline_info_);
@@ -193,8 +183,7 @@ size_t StackMapStream::ComputeDexRegisterLocationCatalogSize() const {
return size;
}
-size_t StackMapStream::ComputeDexRegisterMapSize(uint32_t num_dex_registers,
- const BitVector* live_dex_registers_mask) const {
+size_t StackMapStream::DexRegisterMapEntry::ComputeSize(size_t catalog_size) const {
// For num_dex_registers == 0u live_dex_registers_mask may be null.
if (num_dex_registers == 0u) {
return 0u; // No register map will be emitted.
@@ -208,8 +197,7 @@ size_t StackMapStream::ComputeDexRegisterMapSize(uint32_t num_dex_registers,
// Compute the size of the set of live Dex register entries.
size_t number_of_live_dex_registers = live_dex_registers_mask->NumSetBits();
size_t map_entries_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.size())
- * number_of_live_dex_registers;
+ DexRegisterMap::SingleEntrySizeInBits(catalog_size) * number_of_live_dex_registers;
size_t map_entries_size_in_bytes =
RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
size += map_entries_size_in_bytes;
@@ -218,18 +206,8 @@ size_t StackMapStream::ComputeDexRegisterMapSize(uint32_t num_dex_registers,
size_t StackMapStream::ComputeDexRegisterMapsSize() const {
size_t size = 0;
- size_t inline_info_index = 0;
- for (const StackMapEntry& entry : stack_maps_) {
- if (entry.same_dex_register_map_as_ == kNoSameDexMapFound) {
- size += ComputeDexRegisterMapSize(entry.num_dex_registers, entry.live_dex_registers_mask);
- } else {
- // Entries with the same dex map will have the same offset.
- }
- for (size_t j = 0; j < entry.inlining_depth; ++j) {
- InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
- size += ComputeDexRegisterMapSize(inline_entry.num_dex_registers,
- inline_entry.live_dex_registers_mask);
- }
+ for (const DexRegisterMapEntry& entry : dex_register_entries_) {
+ size += entry.ComputeSize(location_catalog_entries_.size());
}
return size;
}
@@ -264,6 +242,30 @@ void StackMapStream::ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
encoding->SetFromSizes(method_index_max, dex_pc_max, extra_data_max, dex_register_maps_bytes);
}
+size_t StackMapStream::MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
+ size_t* current_offset,
+ MemoryRegion dex_register_locations_region) {
+ DCHECK(current_offset != nullptr);
+ if ((entry.num_dex_registers == 0) || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
+ // No dex register map needed.
+ return StackMap::kNoDexRegisterMap;
+ }
+ if (entry.offset == DexRegisterMapEntry::kOffsetUnassigned) {
+ // Not already copied, need to copy and and assign an offset.
+ entry.offset = *current_offset;
+ const size_t entry_size = entry.ComputeSize(location_catalog_entries_.size());
+ DexRegisterMap dex_register_map(
+ dex_register_locations_region.Subregion(entry.offset, entry_size));
+ *current_offset += entry_size;
+ // Fill in the map since it was just added.
+ FillInDexRegisterMap(dex_register_map,
+ entry.num_dex_registers,
+ *entry.live_dex_registers_mask,
+ entry.locations_start_index);
+ }
+ return entry.offset;
+}
+
void StackMapStream::FillIn(MemoryRegion region) {
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
@@ -311,35 +313,10 @@ void StackMapStream::FillIn(MemoryRegion region) {
stack_map.SetRegisterMaskIndex(encoding.stack_map.encoding, entry.register_mask_index);
stack_map.SetStackMaskIndex(encoding.stack_map.encoding, entry.stack_mask_index);
- if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
- // No dex map available.
- stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, StackMap::kNoDexRegisterMap);
- } else {
- // Search for an entry with the same dex map.
- if (entry.same_dex_register_map_as_ != kNoSameDexMapFound) {
- // If we have a hit reuse the offset.
- stack_map.SetDexRegisterMapOffset(
- encoding.stack_map.encoding,
- code_info.GetStackMapAt(entry.same_dex_register_map_as_, encoding)
- .GetDexRegisterMapOffset(encoding.stack_map.encoding));
- } else {
- // New dex registers maps should be added to the stack map.
- MemoryRegion register_region = dex_register_locations_region.Subregion(
- next_dex_register_map_offset,
- ComputeDexRegisterMapSize(entry.num_dex_registers, entry.live_dex_registers_mask));
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- stack_map.SetDexRegisterMapOffset(
- encoding.stack_map.encoding,
- register_region.begin() - dex_register_locations_region.begin());
-
- // Set the dex register location.
- FillInDexRegisterMap(dex_register_map,
- entry.num_dex_registers,
- *entry.live_dex_registers_mask,
- entry.dex_register_locations_start_index);
- }
- }
+ size_t offset = MaybeCopyDexRegisterMap(dex_register_entries_[entry.dex_register_map_index],
+ &next_dex_register_map_offset,
+ dex_register_locations_region);
+ stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, offset);
// Set the inlining info.
if (entry.inlining_depth != 0) {
@@ -371,29 +348,13 @@ void StackMapStream::FillIn(MemoryRegion region) {
inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
}
inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
- if (inline_entry.num_dex_registers == 0) {
- // No dex map available.
- inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
- depth,
- StackMap::kNoDexRegisterMap);
- DCHECK(inline_entry.live_dex_registers_mask == nullptr);
- } else {
- MemoryRegion register_region = dex_register_locations_region.Subregion(
- next_dex_register_map_offset,
- ComputeDexRegisterMapSize(inline_entry.num_dex_registers,
- inline_entry.live_dex_registers_mask));
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- inline_info.SetDexRegisterMapOffsetAtDepth(
- encoding.inline_info.encoding,
- depth,
- register_region.begin() - dex_register_locations_region.begin());
-
- FillInDexRegisterMap(dex_register_map,
- inline_entry.num_dex_registers,
- *inline_entry.live_dex_registers_mask,
- inline_entry.dex_register_locations_start_index);
- }
+ size_t dex_register_map_offset = MaybeCopyDexRegisterMap(
+ dex_register_entries_[inline_entry.dex_register_map_index],
+ &next_dex_register_map_offset,
+ dex_register_locations_region);
+ inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
+ depth,
+ dex_register_map_offset);
}
} else if (encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, StackMap::kNoInlineInfo);
@@ -448,34 +409,31 @@ void StackMapStream::FillInDexRegisterMap(DexRegisterMap dex_register_map,
}
}
-size_t StackMapStream::FindEntryWithTheSameDexMap() {
- size_t current_entry_index = stack_maps_.size();
- auto entries_it = dex_map_hash_to_stack_map_indices_.find(current_entry_.dex_register_map_hash);
+size_t StackMapStream::AddDexRegisterMapEntry(const DexRegisterMapEntry& entry) {
+ const size_t current_entry_index = dex_register_entries_.size();
+ auto entries_it = dex_map_hash_to_stack_map_indices_.find(entry.hash);
if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
// We don't have a perfect hash functions so we need a list to collect all stack maps
// which might have the same dex register map.
ArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
stack_map_indices.push_back(current_entry_index);
- dex_map_hash_to_stack_map_indices_.Put(current_entry_.dex_register_map_hash,
- std::move(stack_map_indices));
- return kNoSameDexMapFound;
- }
-
- // We might have collisions, so we need to check whether or not we really have a match.
- for (uint32_t test_entry_index : entries_it->second) {
- if (HaveTheSameDexMaps(GetStackMap(test_entry_index), current_entry_)) {
- return test_entry_index;
+ dex_map_hash_to_stack_map_indices_.Put(entry.hash, std::move(stack_map_indices));
+ } else {
+ // We might have collisions, so we need to check whether or not we really have a match.
+ for (uint32_t test_entry_index : entries_it->second) {
+ if (DexRegisterMapEntryEquals(dex_register_entries_[test_entry_index], entry)) {
+ return test_entry_index;
+ }
}
+ entries_it->second.push_back(current_entry_index);
}
- entries_it->second.push_back(current_entry_index);
- return kNoSameDexMapFound;
+ dex_register_entries_.push_back(entry);
+ return current_entry_index;
}
-bool StackMapStream::HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const {
- if (a.live_dex_registers_mask == nullptr && b.live_dex_registers_mask == nullptr) {
- return true;
- }
- if (a.live_dex_registers_mask == nullptr || b.live_dex_registers_mask == nullptr) {
+bool StackMapStream::DexRegisterMapEntryEquals(const DexRegisterMapEntry& a,
+ const DexRegisterMapEntry& b) const {
+ if ((a.live_dex_registers_mask == nullptr) != (b.live_dex_registers_mask == nullptr)) {
return false;
}
if (a.num_dex_registers != b.num_dex_registers) {
@@ -489,12 +447,12 @@ bool StackMapStream::HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEn
}
size_t number_of_live_dex_registers = a.live_dex_registers_mask->NumSetBits();
DCHECK_LE(number_of_live_dex_registers, dex_register_locations_.size());
- DCHECK_LE(a.dex_register_locations_start_index,
+ DCHECK_LE(a.locations_start_index,
dex_register_locations_.size() - number_of_live_dex_registers);
- DCHECK_LE(b.dex_register_locations_start_index,
+ DCHECK_LE(b.locations_start_index,
dex_register_locations_.size() - number_of_live_dex_registers);
- auto a_begin = dex_register_locations_.begin() + a.dex_register_locations_start_index;
- auto b_begin = dex_register_locations_.begin() + b.dex_register_locations_start_index;
+ auto a_begin = dex_register_locations_.begin() + a.locations_start_index;
+ auto b_begin = dex_register_locations_.begin() + b.locations_start_index;
if (!std::equal(a_begin, a_begin + number_of_live_dex_registers, b_begin)) {
return false;
}
@@ -597,10 +555,10 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
CheckDexRegisterMap(code_info,
code_info.GetDexRegisterMapOf(
- stack_map, encoding, entry.num_dex_registers),
- entry.num_dex_registers,
- entry.live_dex_registers_mask,
- entry.dex_register_locations_start_index);
+ stack_map, encoding, entry.dex_register_entry.num_dex_registers),
+ entry.dex_register_entry.num_dex_registers,
+ entry.dex_register_entry.live_dex_registers_mask,
+ entry.dex_register_entry.locations_start_index);
// Check inline info.
DCHECK_EQ(stack_map.HasInlineInfo(stack_map_encoding), (entry.inlining_depth != 0));
@@ -623,10 +581,13 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
CheckDexRegisterMap(code_info,
code_info.GetDexRegisterMapAtDepth(
- d, inline_info, encoding, inline_entry.num_dex_registers),
- inline_entry.num_dex_registers,
- inline_entry.live_dex_registers_mask,
- inline_entry.dex_register_locations_start_index);
+ d,
+ inline_info,
+ encoding,
+ inline_entry.dex_register_entry.num_dex_registers),
+ inline_entry.dex_register_entry.num_dex_registers,
+ inline_entry.dex_register_entry.live_dex_registers_mask,
+ inline_entry.dex_register_entry.locations_start_index);
}
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 08c1d3e3c0..bba3d51e62 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -70,6 +70,7 @@ class StackMapStream : public ValueObject {
inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
register_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
+ dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_mask_max_(-1),
dex_pc_max_(0),
register_mask_max_(0),
@@ -89,30 +90,42 @@ class StackMapStream : public ValueObject {
code_info_encoding_.reserve(16);
}
+ // A dex register map entry for a single stack map entry, contains what registers are live as
+ // well as indices into the location catalog.
+ class DexRegisterMapEntry {
+ public:
+ static const size_t kOffsetUnassigned = -1;
+
+ BitVector* live_dex_registers_mask;
+ uint32_t num_dex_registers;
+ size_t locations_start_index;
+ // Computed fields
+ size_t hash = 0;
+ size_t offset = kOffsetUnassigned;
+
+ size_t ComputeSize(size_t catalog_size) const;
+ };
+
// See runtime/stack_map.h to know what these fields contain.
struct StackMapEntry {
uint32_t dex_pc;
CodeOffset native_pc_code_offset;
uint32_t register_mask;
BitVector* sp_mask;
- uint32_t num_dex_registers;
uint8_t inlining_depth;
- size_t dex_register_locations_start_index;
size_t inline_infos_start_index;
- BitVector* live_dex_registers_mask;
- uint32_t dex_register_map_hash;
- size_t same_dex_register_map_as_;
uint32_t stack_mask_index;
uint32_t register_mask_index;
+ DexRegisterMapEntry dex_register_entry;
+ size_t dex_register_map_index;
};
struct InlineInfoEntry {
uint32_t dex_pc; // DexFile::kDexNoIndex for intrinsified native methods.
ArtMethod* method;
uint32_t method_index;
- uint32_t num_dex_registers;
- BitVector* live_dex_registers_mask;
- size_t dex_register_locations_start_index;
+ DexRegisterMapEntry dex_register_entry;
+ size_t dex_register_map_index;
};
void BeginStackMapEntry(uint32_t dex_pc,
@@ -140,7 +153,8 @@ class StackMapStream : public ValueObject {
}
void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
- stack_maps_[i].native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
+ stack_maps_[i].native_pc_code_offset =
+ CodeOffset::FromOffset(native_pc_offset, instruction_set_);
}
// Prepares the stream to fill in a memory region. Must be called before FillIn.
@@ -150,8 +164,6 @@ class StackMapStream : public ValueObject {
private:
size_t ComputeDexRegisterLocationCatalogSize() const;
- size_t ComputeDexRegisterMapSize(uint32_t num_dex_registers,
- const BitVector* live_dex_registers_mask) const;
size_t ComputeDexRegisterMapsSize() const;
void ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
size_t dex_register_maps_bytes);
@@ -164,15 +176,24 @@ class StackMapStream : public ValueObject {
// Returns the number of unique register masks.
size_t PrepareRegisterMasks();
- // Returns the index of an entry with the same dex register map as the current_entry,
- // or kNoSameDexMapFound if no such entry exists.
- size_t FindEntryWithTheSameDexMap();
- bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const;
+ // Deduplicate entry if possible and return the corresponding index into dex_register_entries_
+ // array. If entry is not a duplicate, a new entry is added to dex_register_entries_.
+ size_t AddDexRegisterMapEntry(const DexRegisterMapEntry& entry);
+
+ // Return true if the two dex register map entries are equal.
+ bool DexRegisterMapEntryEquals(const DexRegisterMapEntry& a, const DexRegisterMapEntry& b) const;
+
+ // Fill in the corresponding entries of a register map.
void FillInDexRegisterMap(DexRegisterMap dex_register_map,
uint32_t num_dex_registers,
const BitVector& live_dex_registers_mask,
uint32_t start_index_in_dex_register_locations) const;
+ // Returns the offset for the dex register inside of the dex register location region. See FillIn.
+ // Only copies the dex register map if the offset for the entry is not already assigned.
+ size_t MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
+ size_t* current_offset,
+ MemoryRegion dex_register_locations_region);
void CheckDexRegisterMap(const CodeInfo& code_info,
const DexRegisterMap& dex_register_map,
size_t num_dex_registers,
@@ -199,6 +220,7 @@ class StackMapStream : public ValueObject {
ArenaVector<InlineInfoEntry> inline_infos_;
ArenaVector<uint8_t> stack_masks_;
ArenaVector<uint32_t> register_masks_;
+ ArenaVector<DexRegisterMapEntry> dex_register_entries_;
int stack_mask_max_;
uint32_t dex_pc_max_;
uint32_t register_mask_max_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index bd0aa6dea7..041695187b 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -410,6 +410,100 @@ TEST(StackMapTest, Test2) {
}
}
+TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena, kRuntimeISA);
+ ArtMethod art_method;
+
+ ArenaBitVector sp_mask1(&arena, 0, true);
+ sp_mask1.SetBit(2);
+ sp_mask1.SetBit(4);
+ const size_t number_of_dex_registers = 2;
+ const size_t number_of_dex_registers_in_inline_info = 2;
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 1);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.EndInlineInfoEntry();
+ stream.EndStackMapEntry();
+
+ size_t size = stream.PrepareForFillIn();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+
+ uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ ASSERT_EQ(2u, number_of_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+ // The Dex register location catalog contains:
+ // - one 1-byte short Dex register locations, and
+ // - one 5-byte large Dex register location.
+ const size_t expected_location_catalog_size = 1u + 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
+ // First stack map.
+ {
+ StackMap stack_map = code_info.GetStackMapAt(0, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
+ ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+
+ ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
+
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+ DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+ ASSERT_TRUE(map.IsDexRegisterLive(0));
+ ASSERT_TRUE(map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_map_size = 1u + 1u;
+ ASSERT_EQ(expected_map_size, map.Size());
+
+ ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstant,
+ map.GetLocationKind(1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInStack,
+ map.GetLocationInternalKind(0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ map.GetLocationInternalKind(1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(0, map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(-2, map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+
+ const size_t index0 =
+ map.GetLocationCatalogEntryIndex(0, number_of_dex_registers, number_of_catalog_entries);
+ const size_t index1 =
+ map.GetLocationCatalogEntryIndex(1, number_of_dex_registers, number_of_catalog_entries);
+ ASSERT_EQ(0u, index0);
+ ASSERT_EQ(1u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_EQ(0, location0.GetValue());
+ ASSERT_EQ(-2, location1.GetValue());
+
+ // Test that the inline info dex register map deduplicated to the same offset as the stack map
+ // one.
+ ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ EXPECT_EQ(inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, 0),
+ stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+ }
+}
+
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
ArenaAllocator arena(&pool);