summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/common_compiler_test.h12
-rw-r--r--compiler/compiler.h2
-rw-r--r--compiler/dex/mir_field_info.h4
-rw-r--r--compiler/dex/mir_graph.h12
-rw-r--r--compiler/dex/mir_method_info.h2
-rw-r--r--compiler/dex/mir_optimization.cc3
-rw-r--r--compiler/dex/mir_optimization_test.cc11
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h22
-rw-r--r--compiler/dex/quick/quick_compiler.h2
-rw-r--r--compiler/dex/quick_compiler_callbacks.h2
-rw-r--r--compiler/dex/verification_results.h12
-rw-r--r--compiler/dex/verified_method.h8
-rw-r--r--compiler/driver/compiler_driver.cc891
-rw-r--r--compiler/driver/compiler_driver.h135
-rw-r--r--compiler/driver/compiler_driver_test.cc4
-rw-r--r--compiler/elf_writer.h2
-rw-r--r--compiler/elf_writer_quick.h4
-rw-r--r--compiler/image_writer.cc25
-rw-r--r--compiler/image_writer.h96
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/jit/jit_compiler.h6
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/oat_writer.cc26
-rw-r--r--compiler/oat_writer.h4
-rw-r--r--compiler/optimizing/graph_visualizer.cc25
-rw-r--r--compiler/optimizing/inliner.cc12
-rw-r--r--compiler/optimizing/instruction_simplifier.cc12
-rw-r--r--compiler/optimizing/licm_test.cc195
-rw-r--r--compiler/optimizing/nodes.cc32
-rw-r--r--compiler/optimizing/nodes.h127
-rw-r--r--compiler/optimizing/optimizing_compiler.cc38
-rw-r--r--compiler/optimizing/reference_type_propagation.cc446
-rw-r--r--compiler/optimizing/reference_type_propagation.h17
-rw-r--r--compiler/trampolines/trampoline_compiler.h4
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc29
-rw-r--r--compiler/utils/arm/assembler_thumb2.h4
-rw-r--r--compiler/utils/arm/assembler_thumb2_test.cc61
-rw-r--r--compiler/utils/swap_space.cc1
-rw-r--r--compiler/utils/swap_space.h6
39 files changed, 1187 insertions, 1111 deletions
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index d215662645..dc2bc5c3f4 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -46,12 +46,12 @@ class CommonCompilerTest : public CommonRuntimeTest {
// Create an OatMethod based on pointers (for unit tests).
OatFile::OatMethod CreateOatMethod(const void* code);
- void MakeExecutable(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
static void MakeExecutable(const void* code_start, size_t code_length);
void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
protected:
virtual void SetUp();
@@ -76,17 +76,17 @@ class CommonCompilerTest : public CommonRuntimeTest {
virtual void TearDown();
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void CompileMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ReserveImageSpace();
diff --git a/compiler/compiler.h b/compiler/compiler.h
index e5d1aff08c..fcd3434e68 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -58,7 +58,7 @@ class Compiler {
const DexFile& dex_file) const = 0;
virtual uintptr_t GetEntryPointOf(ArtMethod* method) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
uint64_t GetMaximumCompilationTimeBeforeWarning() const {
return maximum_compilation_time_before_warning_;
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index e4570fd8d3..04c58aca6b 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -135,7 +135,7 @@ class MirIFieldLoweringInfo : public MirFieldInfo {
// with IGET/IPUT. For fast path fields, retrieve the field offset.
static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
MirIFieldLoweringInfo* field_infos, size_t count)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Construct an unresolved instance field lowering info.
explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
@@ -192,7 +192,7 @@ class MirSFieldLoweringInfo : public MirFieldInfo {
// and the type index of the declaring class in the compiled method's dex file.
static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
MirSFieldLoweringInfo* field_infos, size_t count)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Construct an unresolved static field lowering info.
explicit MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index dbe906280f..23b7c4292b 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -173,7 +173,17 @@ enum OatMethodAttributes {
typedef uint16_t BasicBlockId;
static const BasicBlockId NullBasicBlockId = 0;
-static constexpr bool kLeafOptimization = false;
+
+// Leaf optimization is basically the removal of suspend checks from leaf methods.
+// This is incompatible with SuspendCheckElimination (SCE) which eliminates suspend
+// checks from loops that call any non-intrinsic method, since a loop that calls
+// only a leaf method would end up without any suspend checks at all. So turning
+// this on automatically disables the SCE in MIRGraph::EliminateSuspendChecksGate().
+//
+// Since the Optimizing compiler is actually applying the same optimization, Quick
+// must not run SCE anyway, so we enable this optimization as a way to disable SCE
+// while keeping a consistent behavior across the backends, b/22657404.
+static constexpr bool kLeafOptimization = true;
/*
* In general, vreg/sreg describe Dalvik registers that originated with dx. However,
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 946c74becf..4512f35a99 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -99,7 +99,7 @@ class MirMethodLoweringInfo : public MirMethodInfo {
// path methods, retrieve the method's vtable index and direct code and method when applicable.
static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
MirMethodLoweringInfo* method_infos, size_t count)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened)
: MirMethodInfo(method_idx,
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 5bb0ce3ba5..80b7ac1e5b 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1724,7 +1724,8 @@ void MIRGraph::StringChange() {
bool MIRGraph::EliminateSuspendChecksGate() {
- if ((cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
+ if (kLeafOptimization || // Incompatible (could create loops without suspend checks).
+ (cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
GetMaxNestedLoops() == 0u || // Nothing to do.
GetMaxNestedLoops() >= 32u || // Only 32 bits in suspend_checks_in_loops_[.].
// Exclude 32 as well to keep bit shifts well-defined.
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 10a4337cf5..47123ba28c 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -467,8 +467,17 @@ class SuspendCheckEliminationTest : public MirOptimizationTest {
cu_.mir_graph->ComputeDominators();
cu_.mir_graph->ComputeTopologicalSortOrder();
cu_.mir_graph->SSATransformationEnd();
+
bool gate_result = cu_.mir_graph->EliminateSuspendChecksGate();
- ASSERT_TRUE(gate_result);
+ ASSERT_NE(gate_result, kLeafOptimization);
+ if (kLeafOptimization) {
+ // Even with kLeafOptimization on and Gate() refusing to allow SCE, we want
+ // to run the SCE test to avoid bitrot, so we need to initialize explicitly.
+ cu_.mir_graph->suspend_checks_in_loops_ =
+ cu_.mir_graph->arena_->AllocArray<uint32_t>(cu_.mir_graph->GetNumBlocks(),
+ kArenaAllocMisc);
+ }
+
TopologicalSortIterator iterator(cu_.mir_graph.get());
bool change = false;
for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 26b41bf54d..a8cb9f0c30 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -62,49 +62,49 @@ class DexFileMethodInliner {
* @return true if the method is a candidate for inlining, false otherwise.
*/
bool AnalyseMethodCode(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to an intrinsic or special function.
*/
- InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+ InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to an intrinsic function.
*/
- bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) LOCKS_EXCLUDED(lock_);
+ bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) REQUIRES(!lock_);
/**
* Generate code for an intrinsic function invocation.
*/
- bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) LOCKS_EXCLUDED(lock_);
+ bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to a special function.
*/
- bool IsSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+ bool IsSpecial(uint32_t method_index) REQUIRES(!lock_);
/**
* Generate code for a special function.
*/
- bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) LOCKS_EXCLUDED(lock_);
+ bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) REQUIRES(!lock_);
/**
* Try to inline an invoke.
*/
bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
/**
* Gets the thread pointer entrypoint offset for a string init method index and pointer size.
*/
uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
- LOCKS_EXCLUDED(lock_);
+ REQUIRES(!lock_);
/**
* Check whether a particular method index is a string init.
*/
- bool IsStringInitMethodIndex(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+ bool IsStringInitMethodIndex(uint32_t method_index) REQUIRES(!lock_);
/**
* To avoid multiple lookups of a class by its descriptor, we cache its
@@ -351,11 +351,11 @@ class DexFileMethodInliner {
*
* Only DexFileToMethodInlinerMap may call this function to initialize the inliner.
*/
- void FindIntrinsics(const DexFile* dex_file) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void FindIntrinsics(const DexFile* dex_file) REQUIRES(lock_);
friend class DexFileToMethodInlinerMap;
- bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) LOCKS_EXCLUDED(lock_);
+ bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) REQUIRES(!lock_);
static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
MIR* move_result, const InlineMethod& method);
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 43dd5786af..4a39ab3565 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -50,7 +50,7 @@ class QuickCompiler : public Compiler {
const DexFile& dex_file) const OVERRIDE;
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit);
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index d692d26229..03bf57bded 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,7 +38,7 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
~QuickCompilerCallbacks() { }
bool MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+ SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
void ClassRejected(ClassReference ref) OVERRIDE;
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 7fc2a2363d..9934f6b13b 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -43,15 +43,15 @@ class VerificationResults {
~VerificationResults();
bool ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(verified_methods_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!verified_methods_lock_);
const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
- LOCKS_EXCLUDED(verified_methods_lock_);
- void RemoveVerifiedMethod(MethodReference ref) LOCKS_EXCLUDED(verified_methods_lock_);
+ REQUIRES(!verified_methods_lock_);
+ void RemoveVerifiedMethod(MethodReference ref) REQUIRES(!verified_methods_lock_);
- void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
- bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
+ void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_);
+ bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_);
bool IsCandidateForCompilation(MethodReference& method_ref,
const uint32_t access_flags);
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index bf11839cf0..f7d6d67368 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -44,7 +44,7 @@ class VerifiedMethod {
typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
~VerifiedMethod() = default;
const std::vector<uint8_t>& GetDexGcMap() const {
@@ -107,15 +107,15 @@ class VerifiedMethod {
// Generate devirtualizaion map into devirt_map_.
void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Generate dequickening map into dequicken_map_. Returns false if there is an error.
bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Generate safe case set into safe_cast_set_.
void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
std::vector<uint8_t> dex_gc_map_;
DevirtualizationMap devirt_map_;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a52bfaeb5b..a35f306612 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -167,69 +167,69 @@ class CompilerDriver::AOTCompilationStats {
#define STATS_LOCK()
#endif
- void TypeInDexCache() {
+ void TypeInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
types_in_dex_cache_++;
}
- void TypeNotInDexCache() {
+ void TypeNotInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
types_not_in_dex_cache_++;
}
- void StringInDexCache() {
+ void StringInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
strings_in_dex_cache_++;
}
- void StringNotInDexCache() {
+ void StringNotInDexCache() REQUIRES(!stats_lock_) {
STATS_LOCK();
strings_not_in_dex_cache_++;
}
- void TypeDoesntNeedAccessCheck() {
+ void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_types_++;
}
- void TypeNeedsAccessCheck() {
+ void TypeNeedsAccessCheck() REQUIRES(!stats_lock_) {
STATS_LOCK();
unresolved_types_++;
}
- void ResolvedInstanceField() {
+ void ResolvedInstanceField() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_instance_fields_++;
}
- void UnresolvedInstanceField() {
+ void UnresolvedInstanceField() REQUIRES(!stats_lock_) {
STATS_LOCK();
unresolved_instance_fields_++;
}
- void ResolvedLocalStaticField() {
+ void ResolvedLocalStaticField() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_local_static_fields_++;
}
- void ResolvedStaticField() {
+ void ResolvedStaticField() REQUIRES(!stats_lock_) {
STATS_LOCK();
resolved_static_fields_++;
}
- void UnresolvedStaticField() {
+ void UnresolvedStaticField() REQUIRES(!stats_lock_) {
STATS_LOCK();
unresolved_static_fields_++;
}
// Indicate that type information from the verifier led to devirtualization.
- void PreciseTypeDevirtualization() {
+ void PreciseTypeDevirtualization() REQUIRES(!stats_lock_) {
STATS_LOCK();
type_based_devirtualization_++;
}
// Indicate that a method of the given type was resolved at compile time.
- void ResolvedMethod(InvokeType type) {
+ void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
resolved_methods_[type]++;
@@ -237,7 +237,7 @@ class CompilerDriver::AOTCompilationStats {
// Indicate that a method of the given type was unresolved at compile time as it was in an
// unknown dex file.
- void UnresolvedMethod(InvokeType type) {
+ void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
unresolved_methods_[type]++;
@@ -245,27 +245,27 @@ class CompilerDriver::AOTCompilationStats {
// Indicate that a type of virtual method dispatch has been converted into a direct method
// dispatch.
- void VirtualMadeDirect(InvokeType type) {
+ void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK(type == kVirtual || type == kInterface || type == kSuper);
STATS_LOCK();
virtual_made_direct_[type]++;
}
// Indicate that a method of the given type was able to call directly into boot.
- void DirectCallsToBoot(InvokeType type) {
+ void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
direct_calls_to_boot_[type]++;
}
// Indicate that a method of the given type was able to be resolved directly from boot.
- void DirectMethodsToBoot(InvokeType type) {
+ void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
direct_methods_to_boot_[type]++;
}
- void ProcessedInvoke(InvokeType type, int flags) {
+ void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) {
STATS_LOCK();
if (flags == 0) {
unresolved_methods_[type]++;
@@ -290,13 +290,13 @@ class CompilerDriver::AOTCompilationStats {
}
// A check-cast could be eliminated due to verifier type analysis.
- void SafeCast() {
+ void SafeCast() REQUIRES(!stats_lock_) {
STATS_LOCK();
safe_casts_++;
}
// A check-cast couldn't be eliminated due to verifier type analysis.
- void NotASafeCast() {
+ void NotASafeCast() REQUIRES(!stats_lock_) {
STATS_LOCK();
not_safe_casts_++;
}
@@ -692,7 +692,7 @@ bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const
static void ResolveExceptionsForMethod(
ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
@@ -729,7 +729,7 @@ static void ResolveExceptionsForMethod(
}
static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
auto* exceptions_to_resolve =
reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
@@ -743,7 +743,7 @@ static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
}
static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
std::unordered_set<std::string>* image_classes =
reinterpret_cast<std::unordered_set<std::string>*>(arg);
std::string temp;
@@ -752,8 +752,7 @@ static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
}
// Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
+void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
CHECK(timings != nullptr);
if (!IsImage()) {
return;
@@ -819,7 +818,7 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
static void MaybeAddToImageClasses(Handle<mirror::Class> c,
std::unordered_set<std::string>* image_classes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
// Make a copy of the handle so that we don't clobber it doing Assign.
@@ -876,7 +875,7 @@ class ClinitImageUpdate {
// Visitor for VisitReferences.
void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
VisitClinitClassesObject(ref);
@@ -884,10 +883,15 @@ class ClinitImageUpdate {
}
// java.lang.Reference visitor for VisitReferences.
- void operator()(mirror::Class* /* klass */, mirror::Reference* /* ref */) const {
- }
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED)
+ const {}
+
+ // Ignore class native roots.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
- void Walk() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Walk() SHARED_REQUIRES(Locks::mutator_lock_) {
// Use the initial classes as roots for a search.
for (mirror::Class* klass_root : image_classes_) {
VisitClinitClassesObject(klass_root);
@@ -897,7 +901,7 @@ class ClinitImageUpdate {
private:
ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
ClassLinker* linker)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ SHARED_REQUIRES(Locks::mutator_lock_) :
image_class_descriptors_(image_class_descriptors), self_(self) {
CHECK(linker != nullptr);
CHECK(image_class_descriptors != nullptr);
@@ -915,7 +919,7 @@ class ClinitImageUpdate {
}
static bool FindImageClasses(mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ClinitImageUpdate* data = reinterpret_cast<ClinitImageUpdate*>(arg);
std::string temp;
const char* name = klass->GetDescriptor(&temp);
@@ -933,7 +937,7 @@ class ClinitImageUpdate {
}
void VisitClinitClassesObject(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(object != nullptr);
if (marked_objects_.find(object) != marked_objects_.end()) {
// Already processed.
@@ -1569,10 +1573,14 @@ bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc
return result;
}
-class ParallelCompilationManager {
+class CompilationVisitor {
public:
- typedef void Callback(const ParallelCompilationManager* manager, size_t index);
+ virtual ~CompilationVisitor() {}
+ virtual void Visit(size_t index) = 0;
+};
+class ParallelCompilationManager {
+ public:
ParallelCompilationManager(ClassLinker* class_linker,
jobject class_loader,
CompilerDriver* compiler,
@@ -1610,14 +1618,15 @@ class ParallelCompilationManager {
return dex_files_;
}
- void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) {
+ void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units)
+ REQUIRES(!*Locks::mutator_lock_) {
Thread* self = Thread::Current();
self->AssertNoPendingException();
CHECK_GT(work_units, 0U);
index_.StoreRelaxed(begin);
for (size_t i = 0; i < work_units; ++i) {
- thread_pool_->AddTask(self, new ForAllClosure(this, end, callback));
+ thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor));
}
thread_pool_->StartWorkers(self);
@@ -1636,10 +1645,10 @@ class ParallelCompilationManager {
private:
class ForAllClosure : public Task {
public:
- ForAllClosure(ParallelCompilationManager* manager, size_t end, Callback* callback)
+ ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor)
: manager_(manager),
end_(end),
- callback_(callback) {}
+ visitor_(visitor) {}
virtual void Run(Thread* self) {
while (true) {
@@ -1647,7 +1656,7 @@ class ParallelCompilationManager {
if (UNLIKELY(index >= end_)) {
break;
}
- callback_(manager_, index);
+ visitor_->Visit(index);
self->AssertNoPendingException();
}
}
@@ -1659,7 +1668,7 @@ class ParallelCompilationManager {
private:
ParallelCompilationManager* const manager_;
const size_t end_;
- Callback* const callback_;
+ CompilationVisitor* const visitor_;
};
AtomicInteger index_;
@@ -1676,7 +1685,7 @@ class ParallelCompilationManager {
// A fast version of SkipClass above if the class pointer is available
// that avoids the expensive FindInClassPath search.
static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
if (&dex_file != &original_dex_file) {
@@ -1691,7 +1700,7 @@ static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Cla
}
static void CheckAndClearResolveException(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(self->IsExceptionPending());
mirror::Throwable* exception = self->GetException();
std::string temp;
@@ -1717,134 +1726,148 @@ static void CheckAndClearResolveException(Thread* self)
self->ClearException();
}
-static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager,
- size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- Thread* self = Thread::Current();
- jobject jclass_loader = manager->GetClassLoader();
- const DexFile& dex_file = *manager->GetDexFile();
- ClassLinker* class_linker = manager->GetClassLinker();
-
- // If an instance field is final then we need to have a barrier on the return, static final
- // fields are assigned within the lock held for class initialization. Conservatively assume
- // constructor barriers are always required.
- bool requires_constructor_barrier = true;
-
- // Method and Field are the worst. We can't resolve without either
- // context from the code use (to disambiguate virtual vs direct
- // method and instance vs static field) or from class
- // definitions. While the compiler will resolve what it can as it
- // needs it, here we try to resolve fields and methods used in class
- // definitions, since many of them many never be referenced by
- // generated code.
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- ScopedObjectAccess soa(self);
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
- // Resolve the class.
- mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
- class_loader);
- bool resolve_fields_and_methods;
- if (klass == nullptr) {
- // Class couldn't be resolved, for example, super-class is in a different dex file. Don't
- // attempt to resolve methods and fields when there is no declaring class.
- CheckAndClearResolveException(soa.Self());
- resolve_fields_and_methods = false;
- } else {
- // We successfully resolved a class, should we skip it?
- if (SkipClass(jclass_loader, dex_file, klass)) {
- return;
- }
- // We want to resolve the methods and fields eagerly.
- resolve_fields_and_methods = true;
- }
- // Note the class_data pointer advances through the headers,
- // static fields, instance fields, direct methods, and virtual
- // methods.
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- // Empty class such as a marker interface.
- requires_constructor_barrier = false;
- } else {
- ClassDataItemIterator it(dex_file, class_data);
- while (it.HasNextStaticField()) {
- if (resolve_fields_and_methods) {
- ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
- dex_cache, class_loader, true);
- if (field == nullptr) {
- CheckAndClearResolveException(soa.Self());
- }
- }
- it.Next();
- }
- // We require a constructor barrier if there are final instance fields.
- requires_constructor_barrier = false;
- while (it.HasNextInstanceField()) {
- if (it.MemberIsFinal()) {
- requires_constructor_barrier = true;
+class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
+ public:
+ explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
+ : manager_(manager) {}
+
+ virtual void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ ATRACE_CALL();
+ Thread* const self = Thread::Current();
+ jobject jclass_loader = manager_->GetClassLoader();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ ClassLinker* class_linker = manager_->GetClassLinker();
+
+ // If an instance field is final then we need to have a barrier on the return, static final
+ // fields are assigned within the lock held for class initialization. Conservatively assume
+ // constructor barriers are always required.
+ bool requires_constructor_barrier = true;
+
+ // Method and Field are the worst. We can't resolve without either
+ // context from the code use (to disambiguate virtual vs direct
+ // method and instance vs static field) or from class
+ // definitions. While the compiler will resolve what it can as it
+ // needs it, here we try to resolve fields and methods used in class
+ // definitions, since many of them many never be referenced by
+ // generated code.
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ ScopedObjectAccess soa(self);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ // Resolve the class.
+ mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
+ class_loader);
+ bool resolve_fields_and_methods;
+ if (klass == nullptr) {
+ // Class couldn't be resolved, for example, super-class is in a different dex file. Don't
+ // attempt to resolve methods and fields when there is no declaring class.
+ CheckAndClearResolveException(soa.Self());
+ resolve_fields_and_methods = false;
+ } else {
+ // We successfully resolved a class, should we skip it?
+ if (SkipClass(jclass_loader, dex_file, klass)) {
+ return;
}
- if (resolve_fields_and_methods) {
- ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
- dex_cache, class_loader, false);
- if (field == nullptr) {
- CheckAndClearResolveException(soa.Self());
+ // We want to resolve the methods and fields eagerly.
+ resolve_fields_and_methods = true;
+ }
+ // Note the class_data pointer advances through the headers,
+ // static fields, instance fields, direct methods, and virtual
+ // methods.
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ // Empty class such as a marker interface.
+ requires_constructor_barrier = false;
+ } else {
+ ClassDataItemIterator it(dex_file, class_data);
+ while (it.HasNextStaticField()) {
+ if (resolve_fields_and_methods) {
+ ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+ dex_cache, class_loader, true);
+ if (field == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
}
+ it.Next();
}
- it.Next();
- }
- if (resolve_fields_and_methods) {
- while (it.HasNextDirectMethod()) {
- ArtMethod* method = class_linker->ResolveMethod(
- dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
- it.GetMethodInvokeType(class_def));
- if (method == nullptr) {
- CheckAndClearResolveException(soa.Self());
+ // We require a constructor barrier if there are final instance fields.
+ requires_constructor_barrier = false;
+ while (it.HasNextInstanceField()) {
+ if (it.MemberIsFinal()) {
+ requires_constructor_barrier = true;
+ }
+ if (resolve_fields_and_methods) {
+ ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+ dex_cache, class_loader, false);
+ if (field == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
}
it.Next();
}
- while (it.HasNextVirtualMethod()) {
- ArtMethod* method = class_linker->ResolveMethod(
- dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
- it.GetMethodInvokeType(class_def));
- if (method == nullptr) {
- CheckAndClearResolveException(soa.Self());
+ if (resolve_fields_and_methods) {
+ while (it.HasNextDirectMethod()) {
+ ArtMethod* method = class_linker->ResolveMethod(
+ dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+ it.GetMethodInvokeType(class_def));
+ if (method == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
+ it.Next();
}
- it.Next();
+ while (it.HasNextVirtualMethod()) {
+ ArtMethod* method = class_linker->ResolveMethod(
+ dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+ it.GetMethodInvokeType(class_def));
+ if (method == nullptr) {
+ CheckAndClearResolveException(soa.Self());
+ }
+ it.Next();
+ }
+ DCHECK(!it.HasNext());
}
- DCHECK(!it.HasNext());
+ }
+ if (requires_constructor_barrier) {
+ manager_->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index);
}
}
- if (requires_constructor_barrier) {
- manager->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index);
- }
-}
-static void ResolveType(const ParallelCompilationManager* manager, size_t type_idx)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- // Class derived values are more complicated, they require the linker and loader.
- ScopedObjectAccess soa(Thread::Current());
- ClassLinker* class_linker = manager->GetClassLinker();
- const DexFile& dex_file = *manager->GetDexFile();
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
- mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
+ private:
+ const ParallelCompilationManager* const manager_;
+};
- if (klass == nullptr) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
- if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
- // There's little point continuing compilation if the heap is exhausted.
- LOG(FATAL) << "Out of memory during type resolution for compilation";
+class ResolveTypeVisitor : public CompilationVisitor {
+ public:
+ explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) {
+ }
+ virtual void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ // Class derived values are more complicated, they require the linker and loader.
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
+ mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
+
+ if (klass == nullptr) {
+ soa.Self()->AssertPendingException();
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
+ if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
+ // There's little point continuing compilation if the heap is exhausted.
+ LOG(FATAL) << "Out of memory during type resolution for compilation";
+ }
+ soa.Self()->ClearException();
}
- soa.Self()->ClearException();
}
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -1860,17 +1883,18 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
TimingLogger::ScopedTiming t("Resolve Types", timings);
- context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
+ ResolveTypeVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumTypeIds(), &visitor, thread_count_);
}
TimingLogger::ScopedTiming t("Resolve MethodsAndFields", timings);
- context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
+ ResolveClassFieldsAndMethodsVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
- for (size_t i = 0; i != dex_files.size(); ++i) {
- const DexFile* dex_file = dex_files[i];
+ for (const DexFile* dex_file : dex_files) {
CHECK(dex_file != nullptr);
SetVerifiedDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
@@ -1878,67 +1902,73 @@ void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const D
void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
- for (size_t i = 0; i != dex_files.size(); ++i) {
- const DexFile* dex_file = dex_files[i];
+ for (const DexFile* dex_file : dex_files) {
CHECK(dex_file != nullptr);
VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
-static void VerifyClass(const ParallelCompilationManager* manager, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- ScopedObjectAccess soa(Thread::Current());
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ClassLinker* class_linker = manager->GetClassLinker();
- jobject jclass_loader = manager->GetClassLoader();
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass.Get() == nullptr) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
+class VerifyClassVisitor : public CompilationVisitor {
+ public:
+ explicit VerifyClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- /*
- * At compile time, we can still structurally verify the class even if FindClass fails.
- * This is to ensure the class is structurally sound for compilation. An unsound class
- * will be rejected by the verifier and later skipped during compilation in the compiler.
- */
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
- std::string error_msg;
- if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
- &class_def, true, &error_msg) ==
- verifier::MethodVerifier::kHardFailure) {
- LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
- << " because: " << error_msg;
- manager->GetCompiler()->SetHadHardVerifierFailure();
- }
- } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
- CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
- class_linker->VerifyClass(soa.Self(), klass);
-
- if (klass->IsErroneous()) {
- // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ jobject jclass_loader = manager_->GetClassLoader();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
- manager->GetCompiler()->SetHadHardVerifierFailure();
- }
- CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
- << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
+ /*
+ * At compile time, we can still structurally verify the class even if FindClass fails.
+ * This is to ensure the class is structurally sound for compilation. An unsound class
+ * will be rejected by the verifier and later skipped during compilation in the compiler.
+ */
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ std::string error_msg;
+ if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
+ &class_def, true, &error_msg) ==
+ verifier::MethodVerifier::kHardFailure) {
+ LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
+ << " because: " << error_msg;
+ manager_->GetCompiler()->SetHadHardVerifierFailure();
+ }
+ } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
+ CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
+ class_linker->VerifyClass(soa.Self(), klass);
+
+ if (klass->IsErroneous()) {
+ // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
+ CHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ manager_->GetCompiler()->SetHadHardVerifierFailure();
+ }
- // It is *very* problematic if there are verification errors in the boot classpath. For example,
- // we rely on things working OK without verification when the decryption dialog is brought up.
- // So abort in a debug build if we find this violated.
- DCHECK(!manager->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " <<
- PrettyClass(klass.Get()) << " failed to fully verify.";
+ CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
+ << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
+
+ // It is *very* problematic if there are verification errors in the boot classpath. For example,
+ // we rely on things working OK without verification when the decryption dialog is brought up.
+ // So abort in a debug build if we find this violated.
+ DCHECK(!manager_->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class "
+ << PrettyClass(klass.Get()) << " failed to fully verify.";
+ }
+ soa.Self()->AssertNoPendingException();
}
- soa.Self()->AssertNoPendingException();
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -1947,48 +1977,56 @@ void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
thread_pool);
- context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
+ VerifyClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
-static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- ScopedObjectAccess soa(Thread::Current());
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ClassLinker* class_linker = manager->GetClassLinker();
- jobject jclass_loader = manager->GetClassLoader();
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
- // Class might have failed resolution. Then don't set it to verified.
- if (klass.Get() != nullptr) {
- // Only do this if the class is resolved. If even resolution fails, quickening will go very,
- // very wrong.
- if (klass->IsResolved()) {
- if (klass->GetStatus() < mirror::Class::kStatusVerified) {
- ObjectLock<mirror::Class> lock(soa.Self(), klass);
- // Set class status to verified.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
- // Mark methods as pre-verified. If we don't do this, the interpreter will run with
- // access checks.
- klass->SetPreverifiedFlagOnAllMethods(
- GetInstructionSetPointerSize(manager->GetCompiler()->GetInstructionSet()));
- klass->SetPreverified();
+class SetVerifiedClassVisitor : public CompilationVisitor {
+ public:
+ explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ jobject jclass_loader = manager_->GetClassLoader();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ // Class might have failed resolution. Then don't set it to verified.
+ if (klass.Get() != nullptr) {
+ // Only do this if the class is resolved. If even resolution fails, quickening will go very,
+ // very wrong.
+ if (klass->IsResolved()) {
+ if (klass->GetStatus() < mirror::Class::kStatusVerified) {
+ ObjectLock<mirror::Class> lock(soa.Self(), klass);
+ // Set class status to verified.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
+ // Mark methods as pre-verified. If we don't do this, the interpreter will run with
+ // access checks.
+ klass->SetPreverifiedFlagOnAllMethods(
+ GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet()));
+ klass->SetPreverified();
+ }
+ // Record the final class status if necessary.
+ ClassReference ref(manager_->GetDexFile(), class_def_index);
+ manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
}
- // Record the final class status if necessary.
- ClassReference ref(manager->GetDexFile(), class_def_index);
- manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+ } else {
+ Thread* self = soa.Self();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
}
- } else {
- Thread* self = soa.Self();
- DCHECK(self->IsExceptionPending());
- self->ClearException();
}
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -1997,99 +2035,107 @@ void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
thread_pool);
- context.ForAll(0, dex_file.NumClassDefs(), SetVerifiedClass, thread_count_);
+ SetVerifiedClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
-static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- ATRACE_CALL();
- jobject jclass_loader = manager->GetClassLoader();
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
- const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
+class InitializeClassVisitor : public CompilationVisitor {
+ public:
+ explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
-
- if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
- // Only try to initialize classes that were successfully verified.
- if (klass->IsVerified()) {
- // Attempt to initialize the class but bail if we either need to initialize the super-class
- // or static fields.
- manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
- if (!klass->IsInitialized()) {
- // We don't want non-trivial class initialization occurring on multiple threads due to
- // deadlock problems. For example, a parent class is initialized (holding its lock) that
- // refers to a sub-class in its static/class initializer causing it to try to acquire the
- // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
- // after first initializing its parents, whose locks are acquired. This leads to a
- // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
- // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
- // than use a special Object for the purpose we use the Class of java.lang.Class.
- Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
- ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
- // Attempt to initialize allowing initialization of parent classes but still not static
- // fields.
- manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ jobject jclass_loader = manager_->GetClassLoader();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
+
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
+
+ if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
+ // Only try to initialize classes that were successfully verified.
+ if (klass->IsVerified()) {
+ // Attempt to initialize the class but bail if we either need to initialize the super-class
+ // or static fields.
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
if (!klass->IsInitialized()) {
- // We need to initialize static fields, we only do this for image classes that aren't
- // marked with the $NoPreloadHolder (which implies this should not be initialized early).
- bool can_init_static_fields = manager->GetCompiler()->IsImage() &&
- manager->GetCompiler()->IsImageClass(descriptor) &&
- !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
- if (can_init_static_fields) {
- VLOG(compiler) << "Initializing: " << descriptor;
- // TODO multithreading support. We should ensure the current compilation thread has
- // exclusive access to the runtime and the transaction. To achieve this, we could use
- // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
- // checks in Thread::AssertThreadSuspensionIsAllowable.
- Runtime* const runtime = Runtime::Current();
- Transaction transaction;
-
- // Run the class initializer in transaction mode.
- runtime->EnterTransactionMode(&transaction);
- const mirror::Class::Status old_status = klass->GetStatus();
- bool success = manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
- true);
- // TODO we detach transaction from runtime to indicate we quit the transactional
- // mode which prevents the GC from visiting objects modified during the transaction.
- // Ensure GC is not run so don't access freed objects when aborting transaction.
-
- ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
- runtime->ExitTransactionMode();
-
- if (!success) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
- << exception->Dump();
- std::ostream* file_log = manager->GetCompiler()->
- GetCompilerOptions().GetInitFailureOutput();
- if (file_log != nullptr) {
- *file_log << descriptor << "\n";
- *file_log << exception->Dump() << "\n";
+ // We don't want non-trivial class initialization occurring on multiple threads due to
+ // deadlock problems. For example, a parent class is initialized (holding its lock) that
+ // refers to a sub-class in its static/class initializer causing it to try to acquire the
+ // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
+ // after first initializing its parents, whose locks are acquired. This leads to a
+ // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
+ // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
+ // than use a special Object for the purpose we use the Class of java.lang.Class.
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+ ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
+ // Attempt to initialize allowing initialization of parent classes but still not static
+ // fields.
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
+ if (!klass->IsInitialized()) {
+ // We need to initialize static fields, we only do this for image classes that aren't
+ // marked with the $NoPreloadHolder (which implies this should not be initialized early).
+ bool can_init_static_fields = manager_->GetCompiler()->IsImage() &&
+ manager_->GetCompiler()->IsImageClass(descriptor) &&
+ !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
+ if (can_init_static_fields) {
+ VLOG(compiler) << "Initializing: " << descriptor;
+ // TODO multithreading support. We should ensure the current compilation thread has
+ // exclusive access to the runtime and the transaction. To achieve this, we could use
+ // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
+ // checks in Thread::AssertThreadSuspensionIsAllowable.
+ Runtime* const runtime = Runtime::Current();
+ Transaction transaction;
+
+ // Run the class initializer in transaction mode.
+ runtime->EnterTransactionMode(&transaction);
+ const mirror::Class::Status old_status = klass->GetStatus();
+ bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
+ true);
+ // TODO we detach transaction from runtime to indicate we quit the transactional
+ // mode which prevents the GC from visiting objects modified during the transaction.
+ // Ensure GC is not run so don't access freed objects when aborting transaction.
+
+ ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
+ runtime->ExitTransactionMode();
+
+ if (!success) {
+ CHECK(soa.Self()->IsExceptionPending());
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+ << exception->Dump();
+ std::ostream* file_log = manager_->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
+ }
+ soa.Self()->ClearException();
+ transaction.Rollback();
+ CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
}
- soa.Self()->ClearException();
- transaction.Rollback();
- CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
}
}
+ soa.Self()->AssertNoPendingException();
}
- soa.Self()->AssertNoPendingException();
}
+ // Record the final class status if necessary.
+ ClassReference ref(manager_->GetDexFile(), class_def_index);
+ manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
}
- // Record the final class status if necessary.
- ClassReference ref(manager->GetDexFile(), class_def_index);
- manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+ // Clear any class not found or verification exceptions.
+ soa.Self()->ClearException();
}
- // Clear any class not found or verification exceptions.
- soa.Self()->ClearException();
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -2105,7 +2151,8 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile&
} else {
thread_count = thread_count_;
}
- context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count);
+ InitializeClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count);
}
void CompilerDriver::InitializeClasses(jobject class_loader,
@@ -2132,101 +2179,108 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi
VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
- size_t class_def_index) {
- ATRACE_CALL();
- const DexFile& dex_file = *manager->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- ClassLinker* class_linker = manager->GetClassLinker();
- jobject jclass_loader = manager->GetClassLoader();
- Thread* self = Thread::Current();
- {
- // Use a scoped object access to perform to the quick SkipClass check.
- const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ScopedObjectAccess soa(self);
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::Class> klass(
- hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass.Get() == nullptr) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
- } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+class CompileClassVisitor : public CompilationVisitor {
+ public:
+ explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ ATRACE_CALL();
+ const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ ClassLinker* class_linker = manager_->GetClassLinker();
+ jobject jclass_loader = manager_->GetClassLoader();
+ Thread* self = Thread::Current();
+ {
+ // Use a scoped object access to perform to the quick SkipClass check.
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ScopedObjectAccess soa(self);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
+ CHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+ return;
+ }
+ }
+ ClassReference ref(&dex_file, class_def_index);
+ // Skip compiling classes with generic verifier failures since they will still fail at runtime
+ if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) {
+ return;
+ }
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ // empty class, probably a marker interface
return;
}
- }
- ClassReference ref(&dex_file, class_def_index);
- // Skip compiling classes with generic verifier failures since they will still fail at runtime
- if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) {
- return;
- }
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- // empty class, probably a marker interface
- return;
- }
- CompilerDriver* const driver = manager->GetCompiler();
+ CompilerDriver* const driver = manager_->GetCompiler();
- // Can we run DEX-to-DEX compiler on this class ?
- DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
- {
- ScopedObjectAccess soa(self);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel(
- soa.Self(), class_loader, dex_file, class_def);
- }
- ClassDataItemIterator it(dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
+ // Can we run DEX-to-DEX compiler on this class ?
+ DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
+ {
+ ScopedObjectAccess soa(self);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel(
+ soa.Self(), class_loader, dex_file, class_def);
+ }
+ ClassDataItemIterator it(dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
- bool compilation_enabled = driver->IsClassToCompile(
- dex_file.StringByTypeIdx(class_def.class_idx_));
+ bool compilation_enabled = driver->IsClassToCompile(
+ dex_file.StringByTypeIdx(class_def.class_idx_));
- // Compile direct methods
- int64_t previous_direct_method_idx = -1;
- while (it.HasNextDirectMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- if (method_idx == previous_direct_method_idx) {
- // smali can create dex files with two encoded_methods sharing the same method_idx
- // http://code.google.com/p/smali/issues/detail?id=119
+ // Compile direct methods
+ int64_t previous_direct_method_idx = -1;
+ while (it.HasNextDirectMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == previous_direct_method_idx) {
+ // smali can create dex files with two encoded_methods sharing the same method_idx
+ // http://code.google.com/p/smali/issues/detail?id=119
+ it.Next();
+ continue;
+ }
+ previous_direct_method_idx = method_idx;
+ driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def), class_def_index,
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
- continue;
- }
- previous_direct_method_idx = method_idx;
- driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled);
- it.Next();
- }
- // Compile virtual methods
- int64_t previous_virtual_method_idx = -1;
- while (it.HasNextVirtualMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- if (method_idx == previous_virtual_method_idx) {
- // smali can create dex files with two encoded_methods sharing the same method_idx
- // http://code.google.com/p/smali/issues/detail?id=119
+ }
+ // Compile virtual methods
+ int64_t previous_virtual_method_idx = -1;
+ while (it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == previous_virtual_method_idx) {
+ // smali can create dex files with two encoded_methods sharing the same method_idx
+ // http://code.google.com/p/smali/issues/detail?id=119
+ it.Next();
+ continue;
+ }
+ previous_virtual_method_idx = method_idx;
+ driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def), class_def_index,
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
- continue;
}
- previous_virtual_method_idx = method_idx;
- driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
- compilation_enabled);
- it.Next();
+ DCHECK(!it.HasNext());
}
- DCHECK(!it.HasNext());
-}
+
+ private:
+ const ParallelCompilationManager* const manager_;
+};
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -2234,7 +2288,8 @@ void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_fil
TimingLogger::ScopedTiming t("Compile Dex File", timings);
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
&dex_file, dex_files, thread_pool);
- context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
+ CompileClassVisitor visitor(&context);
+ context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
}
// Does the runtime for the InstructionSet provide an implementation returned by
@@ -2453,7 +2508,7 @@ bool CompilerDriver::WriteElf(const std::string& android_root,
const std::vector<const art::DexFile*>& dex_files,
OatWriter* oat_writer,
art::File* file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kProduce64BitELFFiles && Is64BitInstructionSet(GetInstructionSet())) {
return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host, *this);
} else {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 5cf4044fd4..88e03a231f 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -114,14 +114,15 @@ class CompilerDriver {
void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
CompiledMethod* CompileMethod(Thread* self, ArtMethod*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED;
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!compiled_methods_lock_) WARN_UNUSED;
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_);
VerificationResults* GetVerificationResults() const {
return verification_results_;
@@ -162,54 +163,56 @@ class CompilerDriver {
// Generate the trampolines that are invoked by unresolved direct methods.
const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateJniDlsymLookup() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
CompiledClass* GetCompiledClass(ClassReference ref) const
- LOCKS_EXCLUDED(compiled_classes_lock_);
+ REQUIRES(!compiled_classes_lock_);
CompiledMethod* GetCompiledMethod(MethodReference ref) const
- LOCKS_EXCLUDED(compiled_methods_lock_);
+ REQUIRES(!compiled_methods_lock_);
size_t GetNonRelativeLinkerPatchCount() const
- LOCKS_EXCLUDED(compiled_methods_lock_);
+ REQUIRES(!compiled_methods_lock_);
// Remove and delete a compiled method.
- void RemoveCompiledMethod(const MethodReference& method_ref);
+ void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_);
void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index);
+ uint16_t class_def_index)
+ REQUIRES(!freezing_constructor_lock_);
bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index) const;
+ uint16_t class_def_index) const
+ REQUIRES(!freezing_constructor_lock_);
// Callbacks from compiler to see what runtime checks must be generated.
bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx);
bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Are runtime access checks necessary in the compiled code?
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
uint32_t type_idx, bool* type_known_final = nullptr,
bool* type_known_abstract = nullptr,
bool* equals_referrers_class = nullptr)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
uint32_t type_idx)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
bool* is_type_initialized, bool* use_direct_type_ptr,
@@ -223,22 +226,22 @@ class CompilerDriver {
// Get the DexCache for the
mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Class* ResolveClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, uint16_t type_index,
const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
@@ -246,40 +249,40 @@ class CompilerDriver {
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a field with a given dex file.
ArtField* ResolveFieldWithDexFile(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedFieldDexFileLocation(
ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsFieldVolatile(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- MemberOffset GetFieldOffset(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
// Find a dex cache for a dex file.
inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
// of the declaring class in the referrer's dex file.
std::pair<bool, bool> IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return whether the declaring class of `resolved_method` is
// available to `referrer_class`. If this is true, compute the type
@@ -291,34 +294,34 @@ class CompilerDriver {
ArtMethod* resolved_method,
uint16_t method_idx,
uint32_t* storage_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Is static field's in referrer's class?
bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Is static field's class initialized?
bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
ArtField* resolved_field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedMethodDexFileLocation(
ArtMethod* resolved_method, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the index in the vtable of the method.
uint16_t GetResolvedMethodVTableIndex(
ArtMethod* resolved_method, InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
// for ProcessedInvoke() and computes the necessary lowering info.
@@ -328,13 +331,13 @@ class CompilerDriver {
mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Is method's class initialized for an invoke?
// For static invokes to determine whether we need to consider potential call to <clinit>().
// For non-static invokes, assuming a non-null reference, the class is always initialized.
bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
// dex cache arrays don't have a fixed layout.
@@ -349,18 +352,18 @@ class CompilerDriver {
ArtField** resolved_field,
mirror::Class** referrer_class,
mirror::DexCache** dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
MemberOffset* field_offset, bool* is_volatile)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
ArtField* ComputeInstanceFieldInfo(uint32_t field_idx,
const DexCompilationUnit* mUnit,
bool is_put,
const ScopedObjectAccess& soa)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we fastpath static field access? Computes field's offset, volatility and whether the
@@ -369,7 +372,7 @@ class CompilerDriver {
MemberOffset* field_offset, uint32_t* storage_index,
bool* is_referrers_class, bool* is_volatile, bool* is_initialized,
Primitive::Type* type)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
// index.
@@ -377,7 +380,7 @@ class CompilerDriver {
bool update_stats, bool enable_devirtualization,
InvokeType* type, MethodReference* target_method, int* vtable_idx,
uintptr_t* direct_code, uintptr_t* direct_method)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
@@ -445,7 +448,7 @@ class CompilerDriver {
bool IsMethodToCompile(const MethodReference& method_ref) const;
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
- LOCKS_EXCLUDED(compiled_classes_lock_);
+ REQUIRES(!compiled_classes_lock_);
// Checks if the specified method has been verified without failures. Returns
// false if the method is not in the verification results (GetVerificationResults).
@@ -487,7 +490,7 @@ class CompilerDriver {
ArtMember* resolved_member,
uint16_t member_idx,
uint32_t* storage_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can `referrer_class` access the resolved `member`?
// Dispatch call to mirror::Class::CanAccessResolvedField or
@@ -499,17 +502,17 @@ class CompilerDriver {
ArtMember* member,
mirror::DexCache* dex_cache,
uint32_t field_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we assume that the klass is initialized?
bool CanAssumeClassIsInitialized(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Can we assume that the klass is loaded?
bool CanAssumeClassIsLoaded(mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -540,71 +543,68 @@ class CompilerDriver {
/*out*/int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
DexToDexCompilationLevel GetDexToDexCompilationlevel(
Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
- const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile::ClassDef& class_def) SHARED_REQUIRES(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
- void LoadImageClasses(TimingLogger* timings);
+ void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
// Attempt to resolve all type, methods, fields, and strings
// referenced from code in the dex file following PathClassLoader
// ordering semantics.
void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
void InitializeClasses(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
+ REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
- void UpdateImageClasses(TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void CompileDexFile(jobject class_loader, const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled)
- LOCKS_EXCLUDED(compiled_methods_lock_);
-
- static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!compiled_methods_lock_);
// Swap pool and allocator used for native allocations. May be file-backed. Needs to be first
// as other fields rely on this.
@@ -776,6 +776,7 @@ class CompilerDriver {
DedupeSet<ArrayRef<const uint8_t>,
SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_cfi_info_;
+ friend class CompileClassVisitor;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index b358f4f396..e35d07da83 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -37,7 +37,7 @@ namespace art {
class CompilerDriverTest : public CommonCompilerTest {
protected:
- void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
compiler_driver_->CompileAll(class_loader,
@@ -49,7 +49,7 @@ class CompilerDriverTest : public CommonCompilerTest {
void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
const char* signature, bool is_virtual)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ REQUIRES(!Locks::mutator_lock_) {
CompileAll(class_loader);
Thread::Current()->TransitionFromSuspendedToRunnable();
bool started = runtime_->Start();
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 8e13b51bbe..03f8ceb306 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -57,7 +57,7 @@ class ElfWriter {
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
const CompilerDriver* const compiler_driver_;
File* const elf_file_;
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index fd202eeb5f..83781abeff 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -33,7 +33,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
const std::string& android_root,
bool is_host,
const CompilerDriver& driver)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
std::vector<uint8_t>* buffer);
@@ -44,7 +44,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
const std::string& android_root,
bool is_host)
OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 2b65aa9337..293a488ccf 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -73,7 +73,7 @@ static constexpr bool kBinObjects = true;
static constexpr bool kComputeEagerResolvedStrings = false;
static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Class* klass = obj->GetClass();
CHECK_NE(PrettyClass(klass), "com.android.dex.Dex");
}
@@ -1035,7 +1035,7 @@ class FixupRootVisitor : public RootVisitor {
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = ImageAddress(*roots[i]);
}
@@ -1043,7 +1043,7 @@ class FixupRootVisitor : public RootVisitor {
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
}
@@ -1052,7 +1052,7 @@ class FixupRootVisitor : public RootVisitor {
private:
ImageWriter* const image_writer_;
- mirror::Object* ImageAddress(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
const size_t offset = image_writer_->GetImageOffset(obj);
auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset);
VLOG(compiler) << "Update root from " << obj << " to " << dest;
@@ -1189,8 +1189,15 @@ class FixupVisitor {
FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
}
+ // Ignore class roots since we don't have a way to map them to the destination. These are handled
+ // with other logic.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+
void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
@@ -1200,8 +1207,7 @@ class FixupVisitor {
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent()));
}
@@ -1217,15 +1223,14 @@ class FixupClassVisitor FINAL : public FixupVisitor {
}
void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj->IsClass());
FixupVisitor::operator()(obj, offset, /*is_static*/false);
}
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
mirror::Reference* ref ATTRIBUTE_UNUSED) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
}
};
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1523383657..42b1cbf58a 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -69,15 +69,15 @@ class ImageWriter FINAL {
}
template <typename T>
- T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
return object == nullptr ? nullptr :
reinterpret_cast<T*>(image_begin_ + GetImageOffset(object));
}
- ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress(
- const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile* dex_file, uint32_t offset) const SHARED_REQUIRES(Locks::mutator_lock_) {
auto it = dex_cache_array_starts_.find(dex_file);
DCHECK(it != dex_cache_array_starts_.end());
return reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
@@ -88,7 +88,7 @@ class ImageWriter FINAL {
bool Write(const std::string& image_filename, const std::string& oat_filename,
const std::string& oat_location)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ REQUIRES(!Locks::mutator_lock_);
uintptr_t GetOatDataBegin() {
return reinterpret_cast<uintptr_t>(oat_data_begin_);
@@ -98,7 +98,7 @@ class ImageWriter FINAL {
bool AllocMemory();
// Mark the objects defined in this space in the given live bitmap.
- void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
// Classify different kinds of bins that objects end up getting packed into during image writing.
enum Bin {
@@ -165,32 +165,32 @@ class ImageWriter FINAL {
// We use the lock word to store the offset of the object in the image.
void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SetImageOffset(mirror::Object* object, size_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsImageOffsetAssigned(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsImageBinSlotAssigned(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
- void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
}
mirror::Object* GetLocalAddress(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
size_t offset = GetImageOffset(object);
uint8_t* dst = image_->Begin() + offset;
return reinterpret_cast<mirror::Object*>(dst);
@@ -209,74 +209,74 @@ class ImageWriter FINAL {
}
// Returns true if the class was in the original requested image classes list.
- bool IsImageClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
// Debug aid that list of requested image classes.
void DumpImageClasses();
// Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
void ComputeLazyFieldsForImageClasses()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Wire dex cache resolved strings to strings in the image to avoid runtime resolution.
- void ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ComputeEagerResolvedStrings() SHARED_REQUIRES(Locks::mutator_lock_);
static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Remove unwanted classes from various roots.
- void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Verify unwanted classes removed.
- void CheckNonImageClassesRemoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Lays out where the image objects will be at runtime.
void CalculateNewObjectOffsets()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CalculateObjectBinSlots(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void UnbinObjectsIntoOffset(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void WalkFieldsInOrder(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void WalkFieldsCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Creates the contiguous image in memory and adjusts pointers.
- void CopyAndFixupNativeData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CopyAndFixupNativeData() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass,
- Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Bin array_type) SHARED_REQUIRES(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const uint8_t* GetQuickEntryPoint(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Patches references in OatFile to expect runtime addresses.
void SetOatChecksumFromElfFile(File* elf_file);
@@ -285,10 +285,10 @@ class ImageWriter FINAL {
size_t GetBinSizeSum(Bin up_to = kBinSize) const;
// Return true if a method is likely to be dirtied at runtime.
- bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
// Assign the offset for an ArtMethod.
- void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_REQUIRES(Locks::mutator_lock_);
const CompilerDriver& compiler_driver_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index a122cebf50..d70211f9a9 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -55,7 +55,7 @@ extern "C" void jit_unload(void* handle) {
}
extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
return jit_compiler->CompileMethod(self, method);
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index b0010e0eb2..ef68caa5fa 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -38,11 +38,11 @@ class JitCompiler {
static JitCompiler* Create();
virtual ~JitCompiler();
bool CompileMethod(Thread* self, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// This is in the compiler since the runtime doesn't have access to the compiled method
// structures.
bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
CompilerCallbacks* GetCompilerCallbacks() const;
size_t GetTotalCompileTime() const {
return total_time_;
@@ -63,7 +63,7 @@ class JitCompiler {
const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 074775633f..c98a5f8ba8 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -44,7 +44,7 @@ class OatTest : public CommonCompilerTest {
void CheckMethod(ArtMethod* method,
const OatFile::OatMethod& oat_method,
const DexFile& dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const CompiledMethod* compiled_method =
compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
method->GetDexMethodIndex()));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 4318ea5b6c..64e748776d 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -365,7 +365,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -560,7 +560,7 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -601,7 +601,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -665,7 +665,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
bool StartClass(const DexFile* dex_file, size_t class_def_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
dex_cache_ = class_linker_->FindDexCache(*dex_file);
@@ -673,7 +673,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return true;
}
- bool EndClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) {
bool result = OatDexMethodVisitor::EndClass();
if (oat_class_index_ == writer_->oat_classes_.size()) {
DCHECK(result); // OatDexMethodVisitor::EndClass() never fails.
@@ -687,7 +687,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -793,7 +793,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
ArtMethod* GetTargetMethod(const LinkerPatch& patch)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
mirror::DexCache* dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file);
@@ -803,7 +803,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return method;
}
- uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
auto target_it = writer_->method_offset_map_.map.find(patch.TargetMethod());
uint32_t target_offset =
(target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u;
@@ -828,7 +828,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
mirror::Class* GetTargetType(const LinkerPatch& patch)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile())
? dex_cache_ : class_linker_->FindDexCache(*patch.TargetTypeDexFile());
mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
@@ -836,7 +836,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return type;
}
- uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
if (writer_->image_writer_ != nullptr) {
auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
@@ -849,7 +849,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// NOTE: Direct method pointers across oat files don't use linker patches. However, direct
// type pointers across oat files do. (TODO: Investigate why.)
if (writer_->image_writer_ != nullptr) {
@@ -865,7 +865,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
// NOTE: Direct method pointers across oat files don't use linker patches. However, direct
// type pointers across oat files do. (TODO: Investigate why.)
if (writer_->image_writer_ != nullptr) {
@@ -882,7 +882,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t address = writer_->image_writer_ == nullptr ? target_offset :
PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
writer_->oat_data_offset_ + target_offset);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 82b9377c07..3baf43872e 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -165,9 +165,9 @@ class OatWriter {
size_t InitOatClasses(size_t offset);
size_t InitOatMaps(size_t offset);
size_t InitOatCode(size_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
size_t InitOatCodeDexFiles(size_t offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool WriteTables(OutputStream* out, const size_t file_offset);
size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index d6b5636edc..afea40316c 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -396,11 +396,6 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
return strcmp(pass_name_, name) == 0;
}
- bool IsReferenceTypePropagationPass() {
- return strstr(pass_name_, ReferenceTypePropagation::kReferenceTypePropagationPassName)
- != nullptr;
- }
-
void PrintInstruction(HInstruction* instruction) {
output_ << instruction->DebugName();
if (instruction->InputCount() > 0) {
@@ -464,19 +459,25 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
} else {
StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
}
- } else if (IsReferenceTypePropagationPass() && is_after_pass_) {
+ } else if (IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName)
+ && is_after_pass_) {
if (instruction->GetType() == Primitive::kPrimNot) {
if (instruction->IsLoadClass()) {
ReferenceTypeInfo info = instruction->AsLoadClass()->GetLoadedClassRTI();
ScopedObjectAccess soa(Thread::Current());
- DCHECK(info.IsValid()) << "Invalid RTI for " << instruction->DebugName();
- StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get());
- StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
+ if (info.GetTypeHandle().GetReference() != nullptr) {
+ StartAttributeStream("klass") << PrettyClass(info.GetTypeHandle().Get());
+ } else {
+ StartAttributeStream("klass") << "unresolved";
+ }
} else {
ReferenceTypeInfo info = instruction->GetReferenceTypeInfo();
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(info.IsValid()) << "Invalid RTI for " << instruction->DebugName();
- StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get());
+ if (info.IsTop()) {
+ StartAttributeStream("klass") << "java.lang.Object";
+ } else {
+ ScopedObjectAccess soa(Thread::Current());
+ StartAttributeStream("klass") << PrettyClass(info.GetTypeHandle().Get());
+ }
StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
}
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 1551c1531a..c185b5887b 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -86,7 +86,7 @@ void HInliner::Run() {
}
static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
}
@@ -96,7 +96,7 @@ static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
* Return nullptr if the runtime target cannot be proven.
*/
static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (IsMethodOrDeclaringClassFinal(resolved_method)) {
// No need to lookup further, the resolved method will be the target.
return resolved_method;
@@ -109,8 +109,10 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol
receiver = receiver->InputAt(0);
}
ReferenceTypeInfo info = receiver->GetReferenceTypeInfo();
- DCHECK(info.IsValid()) << "Invalid RTI for " << receiver->DebugName();
- if (!info.IsExact()) {
+ if (info.IsTop()) {
+ // We have no information on the receiver.
+ return nullptr;
+ } else if (!info.IsExact()) {
// We currently only support inlining with known receivers.
// TODO: Remove this check, we should be able to inline final methods
// on unknown receivers.
@@ -162,7 +164,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol
static uint32_t FindMethodIndexIn(ArtMethod* method,
const DexFile& dex_file,
uint32_t referrer_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (method->GetDexFile()->GetLocation().compare(dex_file.GetLocation()) == 0) {
return method->GetDexMethodIndex();
} else {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 1089812beb..b30b6c7bae 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -195,16 +195,16 @@ bool InstructionSimplifierVisitor::IsDominatedByInputNullCheck(HInstruction* ins
// Returns whether doing a type test between the class of `object` against `klass` has
// a statically known outcome. The result of the test is stored in `outcome`.
static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bool* outcome) {
- ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo();
- ScopedObjectAccess soa(Thread::Current());
- if (!obj_rti.IsValid()) {
- // We run the simplifier before the reference type propagation so type info might not be
- // available.
+ if (!klass->IsResolved()) {
+ // If the class couldn't be resolve it's not safe to compare against it. It's
+ // default type would be Top which might be wider that the actual class type
+ // and thus producing wrong results.
return false;
}
+ ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo();
ReferenceTypeInfo class_rti = klass->GetLoadedClassRTI();
- DCHECK(class_rti.IsValid() && class_rti.IsExact());
+ ScopedObjectAccess soa(Thread::Current());
if (class_rti.IsSupertypeOf(obj_rti)) {
*outcome = true;
return true;
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
new file mode 100644
index 0000000000..2fc66e6de4
--- /dev/null
+++ b/compiler/optimizing/licm_test.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/arena_allocator.h"
+#include "builder.h"
+#include "gtest/gtest.h"
+#include "licm.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "side_effects_analysis.h"
+
+namespace art {
+
+/**
+ * Fixture class for the LICM tests.
+ */
+class LICMTest : public testing::Test {
+ public:
+ LICMTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ }
+
+ ~LICMTest() { }
+
+ // Builds a singly-nested loop structure in CFG. Tests can further populate
+ // the basic blocks with instructions to set up interesting scenarios.
+ void BuildLoop() {
+ entry_ = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ loop_header_ = new (&allocator_) HBasicBlock(graph_);
+ loop_body_ = new (&allocator_) HBasicBlock(graph_);
+ exit_ = new (&allocator_) HBasicBlock(graph_);
+
+ graph_->AddBlock(entry_);
+ graph_->AddBlock(loop_preheader_);
+ graph_->AddBlock(loop_header_);
+ graph_->AddBlock(loop_body_);
+ graph_->AddBlock(exit_);
+
+ graph_->SetEntryBlock(entry_);
+ graph_->SetExitBlock(exit_);
+
+ // Set up loop flow in CFG.
+ entry_->AddSuccessor(loop_preheader_);
+ loop_preheader_->AddSuccessor(loop_header_);
+ loop_header_->AddSuccessor(loop_body_);
+ loop_header_->AddSuccessor(exit_);
+ loop_body_->AddSuccessor(loop_header_);
+
+ // Provide boiler-plate instructions.
+ parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ entry_->AddInstruction(parameter_);
+ constant_ = new (&allocator_) HConstant(Primitive::kPrimInt);
+ loop_preheader_->AddInstruction(constant_);
+ loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
+ loop_body_->AddInstruction(new (&allocator_) HGoto());
+ exit_->AddInstruction(new (&allocator_) HExit());
+ }
+
+ // Performs LICM optimizations (after proper set up).
+ void PerformLICM() {
+ ASSERT_TRUE(graph_->TryBuildingSsa());
+ SideEffectsAnalysis side_effects(graph_);
+ side_effects.Run();
+ LICM licm(graph_, side_effects);
+ licm.Run();
+ }
+
+ // General building fields.
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+
+ // Specific basic blocks.
+ HBasicBlock* entry_;
+ HBasicBlock* loop_preheader_;
+ HBasicBlock* loop_header_;
+ HBasicBlock* loop_body_;
+ HBasicBlock* exit_;
+
+ HInstruction* parameter_; // "this"
+ HInstruction* constant_;
+};
+
+//
+// The actual LICM tests.
+//
+
+TEST_F(LICMTest, ConstantHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set array to constant.
+ HInstruction* constant = new (&allocator_) HConstant(Primitive::kPrimDouble);
+ loop_body_->InsertInstructionBefore(constant, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, constant_, constant, Primitive::kPrimDouble, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(constant->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(constant->GetBlock(), loop_preheader_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, FieldHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get field with different types.
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
+ parameter_, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
+ HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_field->GetBlock(), loop_body_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_field->GetBlock(), loop_preheader_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, NoFieldHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get field with same types.
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
+ parameter_, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
+ HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_field->GetBlock(), loop_body_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_field->GetBlock(), loop_body_);
+ EXPECT_EQ(set_field->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, ArrayHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get array with different types.
+ HInstruction* get_array = new (&allocator_) HArrayGet(
+ parameter_, constant_, Primitive::kPrimLong);
+ loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, constant_, constant_, Primitive::kPrimInt, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_array->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_array->GetBlock(), loop_preheader_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, NoArrayHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get array with same types.
+ HInstruction* get_array = new (&allocator_) HArrayGet(
+ parameter_, constant_, Primitive::kPrimLong);
+ loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, get_array, constant_, Primitive::kPrimLong, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ EXPECT_EQ(get_array->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ EXPECT_EQ(get_array->GetBlock(), loop_body_);
+ EXPECT_EQ(set_array->GetBlock(), loop_body_);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 296c1b02fc..519fa005a6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1757,39 +1757,11 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
}
}
-void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) {
- if (kIsDebugBuild) {
- DCHECK_EQ(GetType(), Primitive::kPrimNot);
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(rti.IsValid()) << "Invalid RTI for " << DebugName();
- if (IsBoundType()) {
- // Having the test here spares us from making the method virtual just for
- // the sake of a DCHECK.
- ReferenceTypeInfo upper_bound_rti = AsBoundType()->GetUpperBound();
- DCHECK(upper_bound_rti.IsSupertypeOf(rti))
- << " upper_bound_rti: " << upper_bound_rti
- << " rti: " << rti;
- DCHECK(!upper_bound_rti.GetTypeHandle()->IsFinal() || rti.IsExact());
- }
- }
- reference_type_info_ = rti;
-}
-
-ReferenceTypeInfo::ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {}
-
-ReferenceTypeInfo::ReferenceTypeInfo(TypeHandle type_handle, bool is_exact)
- : type_handle_(type_handle), is_exact_(is_exact) {
- if (kIsDebugBuild) {
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(IsValidHandle(type_handle));
- }
-}
-
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) {
ScopedObjectAccess soa(Thread::Current());
os << "["
- << " is_valid=" << rhs.IsValid()
- << " type=" << (!rhs.IsValid() ? "?" : PrettyClass(rhs.GetTypeHandle().Get()))
+ << " is_top=" << rhs.IsTop()
+ << " type=" << (rhs.IsTop() ? "?" : PrettyClass(rhs.GetTypeHandle().Get()))
<< " is_exact=" << rhs.IsExact()
<< " ]";
return os;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1190fae914..7f446d4cf6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1460,63 +1460,79 @@ class ReferenceTypeInfo : ValueObject {
public:
typedef Handle<mirror::Class> TypeHandle;
- static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) {
- // The constructor will check that the type_handle is valid.
- return ReferenceTypeInfo(type_handle, is_exact);
+ static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (type_handle->IsObjectClass()) {
+ // Override the type handle to be consistent with the case when we get to
+ // Top but don't have the Object class available. It avoids having to guess
+ // what value the type_handle has when it's Top.
+ return ReferenceTypeInfo(TypeHandle(), is_exact, true);
+ } else {
+ return ReferenceTypeInfo(type_handle, is_exact, false);
+ }
}
- static ReferenceTypeInfo CreateInvalid() { return ReferenceTypeInfo(); }
-
- static bool IsValidHandle(TypeHandle handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return handle.GetReference() != nullptr;
+ static ReferenceTypeInfo CreateTop(bool is_exact) {
+ return ReferenceTypeInfo(TypeHandle(), is_exact, true);
}
- bool IsValid() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return IsValidHandle(type_handle_);
- }
bool IsExact() const { return is_exact_; }
- bool IsObjectClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsObjectClass();
- }
- bool IsInterface() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsInterface();
+ bool IsTop() const { return is_top_; }
+ bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return !IsTop() && GetTypeHandle()->IsInterface();
}
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
- bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsValid());
- DCHECK(rti.IsValid());
+ bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (IsTop()) {
+ // Top (equivalent for java.lang.Object) is supertype of anything.
+ return true;
+ }
+ if (rti.IsTop()) {
+ // If we get here `this` is not Top() so it can't be a supertype.
+ return false;
+ }
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
- // (because the type can be the result of a merge).
- bool IsEqual(ReferenceTypeInfo rti) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (!IsValid() && !rti.IsValid()) {
- // Invalid types are equal.
+ // (e.g. tops are equal but they can be the result of a merge).
+ bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (IsExact() != rti.IsExact()) {
+ return false;
+ }
+ if (IsTop() && rti.IsTop()) {
+ // `Top` means java.lang.Object, so the types are equivalent.
return true;
}
- if (!IsValid() || !rti.IsValid()) {
- // One is valid, the other not.
+ if (IsTop() || rti.IsTop()) {
+ // If only one is top or object than they are not equivalent.
+ // NB: We need this extra check because the type_handle of `Top` is invalid
+ // and we cannot inspect its reference.
return false;
}
- return IsExact() == rti.IsExact()
- && GetTypeHandle().Get() == rti.GetTypeHandle().Get();
+
+ // Finally check the types.
+ return GetTypeHandle().Get() == rti.GetTypeHandle().Get();
}
private:
- ReferenceTypeInfo();
- ReferenceTypeInfo(TypeHandle type_handle, bool is_exact);
+ ReferenceTypeInfo() : ReferenceTypeInfo(TypeHandle(), false, true) {}
+ ReferenceTypeInfo(TypeHandle type_handle, bool is_exact, bool is_top)
+ : type_handle_(type_handle), is_exact_(is_exact), is_top_(is_top) {}
// The class of the object.
TypeHandle type_handle_;
// Whether or not the type is exact or a superclass of the actual type.
// Whether or not we have any information about this type.
bool is_exact_;
+ // A true value here means that the object type should be java.lang.Object.
+ // We don't have access to the corresponding mirror object every time so this
+ // flag acts as a substitute. When true, the TypeHandle refers to a null
+ // pointer and should not be used.
+ bool is_top_;
};
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
@@ -1534,7 +1550,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
live_interval_(nullptr),
lifetime_position_(kNoLifetime),
side_effects_(side_effects),
- reference_type_info_(ReferenceTypeInfo::CreateInvalid()) {}
+ reference_type_info_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {}
virtual ~HInstruction() {}
@@ -1580,7 +1596,6 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
// Does not apply for all instructions, but having this at top level greatly
// simplifies the null check elimination.
- // TODO: Consider merging can_be_null into ReferenceTypeInfo.
virtual bool CanBeNull() const {
DCHECK_EQ(GetType(), Primitive::kPrimNot) << "CanBeNull only applies to reference types";
return true;
@@ -1591,7 +1606,10 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
return false;
}
- void SetReferenceTypeInfo(ReferenceTypeInfo rti);
+ void SetReferenceTypeInfo(ReferenceTypeInfo reference_type_info) {
+ DCHECK_EQ(GetType(), Primitive::kPrimNot);
+ reference_type_info_ = reference_type_info;
+ }
ReferenceTypeInfo GetReferenceTypeInfo() const {
DCHECK_EQ(GetType(), Primitive::kPrimNot);
@@ -3589,7 +3607,7 @@ class HInstanceFieldGet : public HExpression<1> {
const DexFile& dex_file)
: HExpression(
field_type,
- SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)),
+ SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, value);
}
@@ -3886,7 +3904,7 @@ class HLoadClass : public HExpression<1> {
is_referrers_class_(is_referrers_class),
dex_pc_(dex_pc),
generate_clinit_check_(false),
- loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
+ loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {
SetRawInputAt(0, current_method);
}
@@ -3937,6 +3955,10 @@ class HLoadClass : public HExpression<1> {
loaded_class_rti_ = rti;
}
+ bool IsResolved() {
+ return loaded_class_rti_.IsExact();
+ }
+
const DexFile& GetDexFile() { return dex_file_; }
bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; }
@@ -4036,7 +4058,7 @@ class HStaticFieldGet : public HExpression<1> {
const DexFile& dex_file)
: HExpression(
field_type,
- SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)),
+ SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, cls);
}
@@ -4179,42 +4201,27 @@ class HInstanceOf : public HExpression<2> {
class HBoundType : public HExpression<1> {
public:
- // Constructs an HBoundType with the given upper_bound.
- // Ensures that the upper_bound is valid.
- HBoundType(HInstruction* input, ReferenceTypeInfo upper_bound, bool upper_can_be_null)
+ HBoundType(HInstruction* input, ReferenceTypeInfo bound_type)
: HExpression(Primitive::kPrimNot, SideEffects::None()),
- upper_bound_(upper_bound),
- upper_can_be_null_(upper_can_be_null) {
+ bound_type_(bound_type) {
DCHECK_EQ(input->GetType(), Primitive::kPrimNot);
SetRawInputAt(0, input);
- SetReferenceTypeInfo(upper_bound_);
}
- // GetUpper* should only be used in reference type propagation.
- const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
- bool GetUpperCanBeNull() const { return upper_can_be_null_; }
+ const ReferenceTypeInfo& GetBoundType() const { return bound_type_; }
- void SetCanBeNull(bool can_be_null) {
- DCHECK(upper_can_be_null_ || !can_be_null);
- can_be_null_ = can_be_null;
+ bool CanBeNull() const OVERRIDE {
+ // `null instanceof ClassX` always return false so we can't be null.
+ return false;
}
- bool CanBeNull() const OVERRIDE { return can_be_null_; }
-
DECLARE_INSTRUCTION(BoundType);
private:
// Encodes the most upper class that this instruction can have. In other words
- // it is always the case that GetUpperBound().IsSupertypeOf(GetReferenceType()).
- // It is used to bound the type in cases like:
- // if (x instanceof ClassX) {
- // // uper_bound_ will be ClassX
- // }
- const ReferenceTypeInfo upper_bound_;
- // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
- // is false then can_be_null_ cannot be true).
- const bool upper_can_be_null_;
- bool can_be_null_;
+ // it is always the case that GetBoundType().IsSupertypeOf(GetReferenceType()).
+ // It is used to bound the type in cases like `if (x instanceof ClassX) {}`
+ const ReferenceTypeInfo bound_type_;
DISALLOW_COPY_AND_ASSIGN(HBoundType);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 601d668995..1c0123e188 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -86,7 +86,7 @@ class CodeVectorAllocator FINAL : public CodeAllocator {
* Filter to apply to the visualizer. Methods whose name contain that filter will
* be dumped.
*/
-static const char* kStringFilter = "";
+static constexpr const char kStringFilter[] = "";
class PassScope;
@@ -105,12 +105,14 @@ class PassObserver : public ValueObject {
visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()),
visualizer_(visualizer_output, graph, *codegen),
graph_in_bad_state_(false) {
- if (strstr(method_name, kStringFilter) == nullptr) {
- timing_logger_enabled_ = visualizer_enabled_ = false;
- }
- if (visualizer_enabled_) {
- visualizer_.PrintHeader(method_name_);
- codegen->SetDisassemblyInformation(&disasm_info_);
+ if (timing_logger_enabled_ || visualizer_enabled_) {
+ if (!IsVerboseMethod(compiler_driver, method_name)) {
+ timing_logger_enabled_ = visualizer_enabled_ = false;
+ }
+ if (visualizer_enabled_) {
+ visualizer_.PrintHeader(method_name_);
+ codegen->SetDisassemblyInformation(&disasm_info_);
+ }
}
}
@@ -169,6 +171,23 @@ class PassObserver : public ValueObject {
}
}
+ static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) {
+ // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
+ // empty kStringFilter matching all methods.
+ if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) {
+ return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name);
+ }
+
+ // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
+ // warning when the string is empty.
+ constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
+ if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
+ return true;
+ }
+
+ return false;
+ }
+
HGraph* const graph_;
const char* method_name_;
@@ -237,7 +256,7 @@ class OptimizingCompiler FINAL : public Compiler {
}
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
@@ -380,8 +399,7 @@ static void RunOptimizations(HGraph* graph,
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_after_bce");
ReferenceTypePropagation* type_propagation2 =
- new (arena) ReferenceTypePropagation(
- graph, handles, "reference_type_propagation_after_inlining");
+ new (arena) ReferenceTypePropagation(graph, handles);
InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_before_codegen");
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index d11a441a6e..68316c2618 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -25,35 +25,19 @@ namespace art {
class RTPVisitor : public HGraphDelegateVisitor {
public:
- RTPVisitor(HGraph* graph,
- StackHandleScopeCollection* handles,
- GrowableArray<HInstruction*>* worklist,
- ReferenceTypeInfo::TypeHandle object_class_handle,
- ReferenceTypeInfo::TypeHandle class_class_handle,
- ReferenceTypeInfo::TypeHandle string_class_handle)
+ RTPVisitor(HGraph* graph, StackHandleScopeCollection* handles)
: HGraphDelegateVisitor(graph),
- handles_(handles),
- object_class_handle_(object_class_handle),
- class_class_handle_(class_class_handle),
- string_class_handle_(string_class_handle),
- worklist_(worklist) {}
+ handles_(handles) {}
- void VisitNullConstant(HNullConstant* null_constant) OVERRIDE;
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
- void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
- void VisitLoadString(HLoadString* instr) OVERRIDE;
void VisitNewArray(HNewArray* instr) OVERRIDE;
- void VisitParameterValue(HParameterValue* instr) OVERRIDE;
void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
void VisitInvoke(HInvoke* instr) OVERRIDE;
void VisitArrayGet(HArrayGet* instr) OVERRIDE;
- void VisitCheckCast(HCheckCast* instr) OVERRIDE;
- void VisitNullCheck(HNullCheck* instr) OVERRIDE;
- void VisitFakeString(HFakeString* instr) OVERRIDE;
void UpdateReferenceTypeInfo(HInstruction* instr,
uint16_t type_idx,
const DexFile& dex_file,
@@ -61,33 +45,8 @@ class RTPVisitor : public HGraphDelegateVisitor {
private:
StackHandleScopeCollection* handles_;
- ReferenceTypeInfo::TypeHandle object_class_handle_;
- ReferenceTypeInfo::TypeHandle class_class_handle_;
- ReferenceTypeInfo::TypeHandle string_class_handle_;
- GrowableArray<HInstruction*>* worklist_;
-
- static constexpr size_t kDefaultWorklistSize = 8;
};
-ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
- StackHandleScopeCollection* handles,
- const char* name)
- : HOptimization(graph, name),
- handles_(handles),
- worklist_(graph->GetArena(), kDefaultWorklistSize) {
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- object_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject));
- string_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangString));
- class_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangClass));
-
- if (kIsDebugBuild) {
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(ReferenceTypeInfo::IsValidHandle(object_class_handle_));
- DCHECK(ReferenceTypeInfo::IsValidHandle(class_class_handle_));
- DCHECK(ReferenceTypeInfo::IsValidHandle(string_class_handle_));
- }
-}
-
void ReferenceTypePropagation::Run() {
// To properly propagate type info we need to visit in the dominator-based order.
// Reverse post order guarantees a node's dominators are visited first.
@@ -96,122 +55,29 @@ void ReferenceTypePropagation::Run() {
VisitBasicBlock(it.Current());
}
ProcessWorklist();
-
- if (kIsDebugBuild) {
- // TODO: move this to the graph checker.
- ScopedObjectAccess soa(Thread::Current());
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
- for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) {
- HInstruction* instr = iti.Current();
- if (instr->GetType() == Primitive::kPrimNot) {
- DCHECK(instr->GetReferenceTypeInfo().IsValid())
- << "Invalid RTI for instruction: " << instr->DebugName();
- if (instr->IsBoundType()) {
- DCHECK(instr->AsBoundType()->GetUpperBound().IsValid());
- } else if (instr->IsLoadClass()) {
- DCHECK(instr->AsLoadClass()->GetReferenceTypeInfo().IsExact());
- DCHECK(instr->AsLoadClass()->GetLoadedClassRTI().IsValid());
- } else if (instr->IsNullCheck()) {
- DCHECK(instr->GetReferenceTypeInfo().IsEqual(instr->InputAt(0)->GetReferenceTypeInfo()))
- << "NullCheck " << instr->GetReferenceTypeInfo()
- << "Input(0) " << instr->InputAt(0)->GetReferenceTypeInfo();
- }
- }
- }
- }
- }
}
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_,
- handles_,
- &worklist_,
- object_class_handle_,
- class_class_handle_,
- string_class_handle_);
- // Handle Phis first as there might be instructions in the same block who depend on them.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- VisitPhi(it.Current()->AsPhi());
- }
+ // TODO: handle other instructions that give type info
+ // (array accesses)
- // Handle instructions.
+ RTPVisitor visitor(graph_, handles_);
+ // Initialize exact types first for faster convergence.
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
instr->Accept(&visitor);
}
+ // Handle Phis.
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ VisitPhi(it.Current()->AsPhi());
+ }
+
// Add extra nodes to bound types.
BoundTypeForIfNotNull(block);
BoundTypeForIfInstanceOf(block);
}
-// Create a bound type for the given object narrowing the type as much as possible.
-// The BoundType upper values for the super type and can_be_null will be taken from
-// load_class.GetLoadedClassRTI() and upper_can_be_null.
-static HBoundType* CreateBoundType(ArenaAllocator* arena,
- HInstruction* obj,
- HLoadClass* load_class,
- bool upper_can_be_null)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
- // Narrow the type as much as possible.
- if (class_rti.GetTypeHandle()->IsFinal()) {
- bound_type->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
- } else if (obj_rti.IsValid() && class_rti.IsSupertypeOf(obj_rti)) {
- bound_type->SetReferenceTypeInfo(obj_rti);
- } else {
- bound_type->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
- }
- return bound_type;
-}
-
-// Check if we should create a bound type for the given object at the specified
-// position. Because of inlining and the fact we run RTP more than once and we
-// might have a HBoundType already. If we do, we should not create a new one.
-// In this case we also assert that there are no other uses of the object (except
-// the bound type) dominated by the specified dominator_instr or dominator_block.
-static bool ShouldCreateBoundType(HInstruction* position,
- HInstruction* obj,
- ReferenceTypeInfo upper_bound,
- HInstruction* dominator_instr,
- HBasicBlock* dominator_block)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // If the position where we should insert the bound type is not already a
- // a bound type then we need to create one.
- if (position == nullptr || !position->IsBoundType()) {
- return true;
- }
-
- HBoundType* existing_bound_type = position->AsBoundType();
- if (existing_bound_type->GetUpperBound().IsSupertypeOf(upper_bound)) {
- if (kIsDebugBuild) {
- // Check that the existing HBoundType dominates all the uses.
- for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
- HInstruction* user = it.Current()->GetUser();
- if (dominator_instr != nullptr) {
- DCHECK(!dominator_instr->StrictlyDominates(user)
- || user == existing_bound_type
- || existing_bound_type->StrictlyDominates(user));
- } else if (dominator_block != nullptr) {
- DCHECK(!dominator_block->Dominates(user->GetBlock())
- || user == existing_bound_type
- || existing_bound_type->StrictlyDominates(user));
- }
- }
- }
- } else {
- // TODO: if the current bound type is a refinement we could update the
- // existing_bound_type with the a new upper limit. However, we also need to
- // update its users and have access to the work list.
- }
- return false;
-}
-
void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
HIf* ifInstruction = block->GetLastInstruction()->AsIf();
if (ifInstruction == nullptr) {
@@ -250,23 +116,8 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
HInstruction* user = it.Current()->GetUser();
if (notNullBlock->Dominates(user->GetBlock())) {
if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HInstruction* insert_point = notNullBlock->GetFirstInstruction();
- ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- object_class_handle_, /* is_exact */ true);
- if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(
- obj, object_rti, /* bound_can_be_null */ false);
- if (obj->GetReferenceTypeInfo().IsValid()) {
- bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo());
- }
- notNullBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
+ bound_type = new (graph_->GetArena()) HBoundType(obj, ReferenceTypeInfo::CreateTop(false));
+ notNullBlock->InsertInstructionBefore(bound_type, notNullBlock->GetFirstInstruction());
}
user->ReplaceInput(bound_type, it.Current()->GetIndex());
}
@@ -320,23 +171,25 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
HInstruction* user = it.Current()->GetUser();
if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
+
+ ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
- if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
- bound_type = CreateBoundType(
- graph_->GetArena(),
- obj,
- load_class,
- false /* InstanceOf ensures the object is not null. */);
- instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
+ bound_type = new (graph_->GetArena()) HBoundType(obj, class_rti);
+
+ // Narrow the type as much as possible.
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!load_class->IsResolved() || class_rti.IsSupertypeOf(obj_rti)) {
+ bound_type->SetReferenceTypeInfo(obj_rti);
+ } else {
+ bound_type->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+ }
}
+
+ instanceOfTrueBlock->InsertInstructionBefore(
+ bound_type, instanceOfTrueBlock->GetFirstInstruction());
}
user->ReplaceInput(bound_type, it.Current()->GetIndex());
}
@@ -346,32 +199,11 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
void RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
mirror::Class* klass,
bool is_exact) {
- if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
- // Calls to String.<init> are replaced with a StringFactory.
- if (kIsDebugBuild) {
- ScopedObjectAccess soa(Thread::Current());
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = cl->FindDexCache(instr->AsInvoke()->GetDexFile());
- ArtMethod* method = dex_cache->GetResolvedMethod(
- instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize());
- DCHECK(method != nullptr);
- mirror::Class* declaring_class = method->GetDeclaringClass();
- DCHECK(declaring_class != nullptr);
- DCHECK(declaring_class->IsStringClass())
- << "Expected String class: " << PrettyDescriptor(declaring_class);
- DCHECK(method->IsConstructor())
- << "Expected String.<init>: " << PrettyMethod(method);
- }
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true));
- } else if (klass != nullptr) {
+ if (klass != nullptr) {
ScopedObjectAccess soa(Thread::Current());
- ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(klass);
+ MutableHandle<mirror::Class> handle = handles_->NewHandle(klass);
is_exact = is_exact || klass->IsFinal();
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
- } else {
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
}
}
@@ -387,11 +219,6 @@ void RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr,
SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
}
-void RTPVisitor::VisitNullConstant(HNullConstant* instr) {
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
-}
-
void RTPVisitor::VisitNewInstance(HNewInstance* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
@@ -400,13 +227,6 @@ void RTPVisitor::VisitNewArray(HNewArray* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
-void RTPVisitor::VisitParameterValue(HParameterValue* instr) {
- if (instr->GetType() == Primitive::kPrimNot) {
- // TODO: parse the signature and add precise types for the parameters.
- SetClassAsTypeInfo(instr, nullptr, /* is_exact */ false);
- }
-}
-
void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr,
const FieldInfo& info) {
// The field index is unknown only during tests.
@@ -418,10 +238,10 @@ void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr,
ClassLinker* cl = Runtime::Current()->GetClassLinker();
mirror::DexCache* dex_cache = cl->FindDexCache(info.GetDexFile());
ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), dex_cache);
- // TODO: There are certain cases where we can't resolve the field.
- // b/21914925 is open to keep track of a repro case for this issue.
- mirror::Class* klass = (field == nullptr) ? nullptr : field->GetType<false>();
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ if (field != nullptr) {
+ mirror::Class* klass = field->GetType<false>();
+ SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ }
}
void RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
@@ -438,58 +258,12 @@ void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
Runtime::Current()->GetClassLinker()->FindDexCache(instr->GetDexFile());
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
- DCHECK(resolved_class != nullptr);
- ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(resolved_class);
- instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true));
- instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_class_handle_, /* is_exact */ true));
-}
-
-void RTPVisitor::VisitClinitCheck(HClinitCheck* instr) {
- instr->SetReferenceTypeInfo(instr->InputAt(0)->GetReferenceTypeInfo());
-}
-
-void RTPVisitor::VisitLoadString(HLoadString* instr) {
- instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true));
-}
-
-void RTPVisitor::VisitNullCheck(HNullCheck* instr) {
- ScopedObjectAccess soa(Thread::Current());
- ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- DCHECK(parent_rti.IsValid());
- instr->SetReferenceTypeInfo(parent_rti);
-}
-
-void RTPVisitor::VisitFakeString(HFakeString* instr) {
- instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true));
-}
-
-void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) {
- HInstruction* obj = check_cast->InputAt(0);
- HBoundType* bound_type = nullptr;
- for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
- HInstruction* user = it.Current()->GetUser();
- if (check_cast->StrictlyDominates(user)) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- if (ShouldCreateBoundType(check_cast->GetNext(), obj, class_rti, check_cast, nullptr)) {
- bound_type = CreateBoundType(
- GetGraph()->GetArena(),
- obj,
- load_class,
- true /* CheckCast succeeds for nulls. */);
- check_cast->GetBlock()->InsertInstructionAfter(bound_type, check_cast);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, it.Current()->GetIndex());
- }
+ if (resolved_class != nullptr) {
+ Handle<mirror::Class> handle = handles_->NewHandle(resolved_class);
+ instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true));
}
+ Handle<mirror::Class> class_handle = handles_->NewHandle(mirror::Class::GetJavaLangClass());
+ instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_handle, /* is_exact */ true));
}
void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
@@ -516,54 +290,29 @@ void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
ReferenceTypeInfo ReferenceTypePropagation::MergeTypes(const ReferenceTypeInfo& a,
const ReferenceTypeInfo& b) {
- if (!b.IsValid()) {
- return a;
- }
- if (!a.IsValid()) {
- return b;
- }
-
bool is_exact = a.IsExact() && b.IsExact();
+ bool is_top = a.IsTop() || b.IsTop();
Handle<mirror::Class> type_handle;
- if (a.GetTypeHandle().Get() == b.GetTypeHandle().Get()) {
- type_handle = a.GetTypeHandle();
- } else if (a.IsSupertypeOf(b)) {
- type_handle = a.GetTypeHandle();
- is_exact = false;
- } else if (b.IsSupertypeOf(a)) {
- type_handle = b.GetTypeHandle();
- is_exact = false;
- } else {
- // TODO: Find the first common super class.
- type_handle = object_class_handle_;
- is_exact = false;
- }
-
- return ReferenceTypeInfo::Create(type_handle, is_exact);
-}
-
-static void UpdateArrayGet(HArrayGet* instr,
- StackHandleScopeCollection* handles,
- ReferenceTypeInfo::TypeHandle object_class_handle)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_EQ(Primitive::kPrimNot, instr->GetType());
-
- ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- DCHECK(parent_rti.IsValid());
-
- Handle<mirror::Class> handle = parent_rti.GetTypeHandle();
- if (handle->IsObjectArrayClass()) {
- ReferenceTypeInfo::TypeHandle component_handle = handles->NewHandle(handle->GetComponentType());
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(component_handle, /* is_exact */ false));
- } else {
- // We don't know what the parent actually is, so we fallback to object.
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(object_class_handle, /* is_exact */ false));
+ if (!is_top) {
+ if (a.GetTypeHandle().Get() == b.GetTypeHandle().Get()) {
+ type_handle = a.GetTypeHandle();
+ } else if (a.IsSupertypeOf(b)) {
+ type_handle = a.GetTypeHandle();
+ is_exact = false;
+ } else if (b.IsSupertypeOf(a)) {
+ type_handle = b.GetTypeHandle();
+ is_exact = false;
+ } else {
+ // TODO: Find a common super class.
+ is_top = true;
+ is_exact = false;
+ }
}
- return;
+ return is_top
+ ? ReferenceTypeInfo::CreateTop(is_exact)
+ : ReferenceTypeInfo::Create(type_handle, is_exact);
}
bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) {
@@ -574,15 +323,6 @@ bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) {
UpdateBoundType(instr->AsBoundType());
} else if (instr->IsPhi()) {
UpdatePhi(instr->AsPhi());
- } else if (instr->IsNullCheck()) {
- ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- if (parent_rti.IsValid()) {
- instr->SetReferenceTypeInfo(parent_rti);
- }
- } else if (instr->IsArrayGet()) {
- // TODO: consider if it's worth "looking back" and bounding the input object
- // to an array type.
- UpdateArrayGet(instr->AsArrayGet(), handles_, object_class_handle_);
} else {
LOG(FATAL) << "Invalid instruction (should not get here)";
}
@@ -600,45 +340,45 @@ void RTPVisitor::VisitInvoke(HInvoke* instr) {
mirror::DexCache* dex_cache = cl->FindDexCache(instr->GetDexFile());
ArtMethod* method = dex_cache->GetResolvedMethod(
instr->GetDexMethodIndex(), cl->GetImagePointerSize());
- mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false);
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ if (method != nullptr) {
+ mirror::Class* klass = method->GetReturnType(false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ }
}
void RTPVisitor::VisitArrayGet(HArrayGet* instr) {
if (instr->GetType() != Primitive::kPrimNot) {
return;
}
+
+ HInstruction* parent = instr->InputAt(0);
ScopedObjectAccess soa(Thread::Current());
- UpdateArrayGet(instr, handles_, object_class_handle_);
- if (!instr->GetReferenceTypeInfo().IsValid()) {
- worklist_->Add(instr);
+ Handle<mirror::Class> handle = parent->GetReferenceTypeInfo().GetTypeHandle();
+ if (handle.GetReference() != nullptr && handle->IsObjectArrayClass()) {
+ SetClassAsTypeInfo(instr, handle->GetComponentType(), /* is_exact */ false);
}
}
void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) {
ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- if (!new_rti.IsValid()) {
- return; // No new info yet.
- }
-
- // Make sure that we don't go over the bounded type.
- ReferenceTypeInfo upper_bound_rti = instr->GetUpperBound();
- if (!upper_bound_rti.IsSupertypeOf(new_rti)) {
- new_rti = upper_bound_rti;
+ // Be sure that we don't go over the bounded type.
+ ReferenceTypeInfo bound_rti = instr->GetBoundType();
+ if (!bound_rti.IsSupertypeOf(new_rti)) {
+ new_rti = bound_rti;
}
instr->SetReferenceTypeInfo(new_rti);
}
void ReferenceTypePropagation::UpdatePhi(HPhi* instr) {
ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- if (new_rti.IsValid() && new_rti.IsObjectClass() && !new_rti.IsExact()) {
- // Early return if we are Object and inexact.
+ if (new_rti.IsTop() && !new_rti.IsExact()) {
+ // Early return if we are Top and inexact.
instr->SetReferenceTypeInfo(new_rti);
return;
}
for (size_t i = 1; i < instr->InputCount(); i++) {
new_rti = MergeTypes(new_rti, instr->InputAt(i)->GetReferenceTypeInfo());
- if (new_rti.IsValid() && new_rti.IsObjectClass()) {
+ if (new_rti.IsTop()) {
if (!new_rti.IsExact()) {
break;
} else {
@@ -652,31 +392,21 @@ void ReferenceTypePropagation::UpdatePhi(HPhi* instr) {
// Re-computes and updates the nullability of the instruction. Returns whether or
// not the nullability was changed.
bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) {
- DCHECK(instr->IsPhi()
- || instr->IsBoundType()
- || instr->IsNullCheck()
- || instr->IsArrayGet());
+ DCHECK(instr->IsPhi() || instr->IsBoundType());
- if (!instr->IsPhi() && !instr->IsBoundType()) {
+ if (!instr->IsPhi()) {
return false;
}
- bool existing_can_be_null = instr->CanBeNull();
- if (instr->IsPhi()) {
- HPhi* phi = instr->AsPhi();
- bool new_can_be_null = false;
- for (size_t i = 0; i < phi->InputCount(); i++) {
- if (phi->InputAt(i)->CanBeNull()) {
- new_can_be_null = true;
- break;
- }
- }
- phi->SetCanBeNull(new_can_be_null);
- } else if (instr->IsBoundType()) {
- HBoundType* bound_type = instr->AsBoundType();
- bound_type->SetCanBeNull(instr->InputAt(0)->CanBeNull() && bound_type->GetUpperCanBeNull());
+ HPhi* phi = instr->AsPhi();
+ bool existing_can_be_null = phi->CanBeNull();
+ bool new_can_be_null = false;
+ for (size_t i = 0; i < phi->InputCount(); i++) {
+ new_can_be_null |= phi->InputAt(i)->CanBeNull();
}
- return existing_can_be_null != instr->CanBeNull();
+ phi->SetCanBeNull(new_can_be_null);
+
+ return existing_can_be_null != new_can_be_null;
}
void ReferenceTypePropagation::ProcessWorklist() {
@@ -689,18 +419,14 @@ void ReferenceTypePropagation::ProcessWorklist() {
}
void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) {
- DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot)
- << instruction->DebugName() << ":" << instruction->GetType();
+ DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot) << instruction->GetType();
worklist_.Add(instruction);
}
void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
HInstruction* user = it.Current()->GetUser();
- if (user->IsPhi()
- || user->IsBoundType()
- || user->IsNullCheck()
- || (user->IsArrayGet() && (user->GetType() == Primitive::kPrimNot))) {
+ if (user->IsPhi() || user->IsBoundType()) {
AddToWorklist(user);
}
}
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 9196b56e37..11f5ac91ca 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -30,9 +30,10 @@ namespace art {
*/
class ReferenceTypePropagation : public HOptimization {
public:
- ReferenceTypePropagation(HGraph* graph,
- StackHandleScopeCollection* handles,
- const char* name = kReferenceTypePropagationPassName);
+ ReferenceTypePropagation(HGraph* graph, StackHandleScopeCollection* handles)
+ : HOptimization(graph, kReferenceTypePropagationPassName),
+ handles_(handles),
+ worklist_(graph->GetArena(), kDefaultWorklistSize) {}
void Run() OVERRIDE;
@@ -41,8 +42,8 @@ class ReferenceTypePropagation : public HOptimization {
private:
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
- void UpdateBoundType(HBoundType* bound_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void UpdatePhi(HPhi* phi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_);
+ void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_);
void BoundTypeForIfNotNull(HBasicBlock* block);
void BoundTypeForIfInstanceOf(HBasicBlock* block);
void ProcessWorklist();
@@ -53,16 +54,12 @@ class ReferenceTypePropagation : public HOptimization {
bool UpdateReferenceTypeInfo(HInstruction* instr);
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
StackHandleScopeCollection* handles_;
GrowableArray<HInstruction*> worklist_;
- ReferenceTypeInfo::TypeHandle object_class_handle_;
- ReferenceTypeInfo::TypeHandle class_class_handle_;
- ReferenceTypeInfo::TypeHandle string_class_handle_;
-
static constexpr size_t kDefaultWorklistSize = 8;
DISALLOW_COPY_AND_ASSIGN(ReferenceTypePropagation);
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
index bdab2796d8..9fb22452ea 100644
--- a/compiler/trampolines/trampoline_compiler.h
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -27,10 +27,10 @@ namespace art {
// Create code that will invoke the function held in thread local storage.
const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi,
ThreadOffset<4> entry_point_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi,
ThreadOffset<8> entry_point_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 413b9eaa8c..b499dddb0c 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -133,14 +133,27 @@ uint32_t Thumb2Assembler::AdjustFixups() {
AdjustFixupIfNeeded(&fixup, &current_code_size, &fixups_to_recalculate);
}
while (!fixups_to_recalculate.empty()) {
- // Pop the fixup.
- FixupId fixup_id = fixups_to_recalculate.front();
- fixups_to_recalculate.pop_front();
- Fixup* fixup = GetFixup(fixup_id);
- DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0);
- buffer_.Store<int16_t>(fixup->GetLocation(), 0);
- // See if it needs adjustment.
- AdjustFixupIfNeeded(fixup, &current_code_size, &fixups_to_recalculate);
+ do {
+ // Pop the fixup.
+ FixupId fixup_id = fixups_to_recalculate.front();
+ fixups_to_recalculate.pop_front();
+ Fixup* fixup = GetFixup(fixup_id);
+ DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0);
+ buffer_.Store<int16_t>(fixup->GetLocation(), 0);
+ // See if it needs adjustment.
+ AdjustFixupIfNeeded(fixup, &current_code_size, &fixups_to_recalculate);
+ } while (!fixups_to_recalculate.empty());
+
+ if ((current_code_size & 2) != 0 && !literals_.empty()) {
+ // If we need to add padding before literals, this may just push some out of range,
+ // so recalculate all load literals. This makes up for the fact that we don't mark
+ // load literal as a dependency of all previous Fixups even though it actually is.
+ for (Fixup& fixup : fixups_) {
+ if (fixup.IsLoadLiteral()) {
+ AdjustFixupIfNeeded(&fixup, &current_code_size, &fixups_to_recalculate);
+ }
+ }
+ }
}
if (kIsDebugBuild) {
// Check that no fixup is marked as being in fixups_to_recalculate anymore.
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 838554ee6d..41eb5d36f2 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -489,6 +489,10 @@ class Thumb2Assembler FINAL : public ArmAssembler {
return type_;
}
+ bool IsLoadLiteral() const {
+ return GetType() >= kLoadLiteralNarrow;
+ }
+
Size GetOriginalSize() const {
return original_size_;
}
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 68b7931a0c..004853f224 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -950,4 +950,65 @@ TEST_F(AssemblerThumb2Test, LoadLiteralDoubleFar) {
__ GetAdjustedPosition(label.Position()));
}
+TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1KiBDueToAlignmentOnSecondPass) {
+ // First part: as TwoCbzBeyondMaxOffset but add one 16-bit instruction to the end,
+ // so that the size is not Aligned<4>(.). On the first pass, the assembler resizes
+ // the second CBZ because it's out of range, then it will resize the first CBZ
+ // which has been pushed out of range. Thus, after the first pass, the code size
+ // will appear Aligned<4>(.) but the final size will not be.
+ Label label0, label1, label2;
+ __ cbz(arm::R0, &label1);
+ constexpr size_t kLdrR0R0Count1 = 63;
+ for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
+ __ ldr(arm::R0, arm::Address(arm::R0));
+ }
+ __ Bind(&label0);
+ __ cbz(arm::R0, &label2);
+ __ Bind(&label1);
+ constexpr size_t kLdrR0R0Count2 = 65;
+ for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
+ __ ldr(arm::R0, arm::Address(arm::R0));
+ }
+ __ Bind(&label2);
+ __ ldr(arm::R0, arm::Address(arm::R0));
+
+ std::string expected_part1 =
+ "cmp r0, #0\n" // cbz r0, label1
+ "beq.n 1f\n" +
+ RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
+ "0:\n"
+ "cmp r0, #0\n" // cbz r0, label2
+ "beq.n 2f\n"
+ "1:\n" +
+ RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
+ "2:\n" // Here the offset is Aligned<4>(.).
+ "ldr r0, [r0]\n"; // Make the first part
+
+ // Second part: as LoadLiteralMax1KiB with the caveat that the offset of the load
+ // literal will not be Aligned<4>(.) but it will appear to be when we process the
+ // instruction during the first pass, so the literal will need a padding and it
+ // will push the literal out of range, so we shall end up with "ldr.w".
+ arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
+ __ LoadLiteral(arm::R0, literal);
+ Label label;
+ __ Bind(&label);
+ constexpr size_t kLdrR0R0Count = 511;
+ for (size_t i = 0; i != kLdrR0R0Count; ++i) {
+ __ ldr(arm::R0, arm::Address(arm::R0));
+ }
+
+ std::string expected =
+ expected_part1 +
+ "1:\n"
+ "ldr.w r0, [pc, #((2f - 1b - 2) & ~2)]\n" +
+ RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
+ ".align 2, 0\n"
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadLiteralMax1KiB");
+
+ EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
+ __ GetAdjustedPosition(label.Position()));
+}
+
} // namespace art
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 325ee4fa01..42ed8810f8 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -143,7 +143,6 @@ SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
LOG(ERROR) << "Unable to mmap new swap file chunk.";
LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size;
LOG(ERROR) << "Free list:";
- MutexLock lock(Thread::Current(), lock_);
DumpFreeMap(free_by_size_);
LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_);
LOG(FATAL) << "Aborting...";
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 691df4a945..f7c772d673 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -60,15 +60,15 @@ class SwapSpace {
public:
SwapSpace(int fd, size_t initial_size);
~SwapSpace();
- void* Alloc(size_t size) LOCKS_EXCLUDED(lock_);
- void Free(void* ptr, size_t size) LOCKS_EXCLUDED(lock_);
+ void* Alloc(size_t size) REQUIRES(!lock_);
+ void Free(void* ptr, size_t size) REQUIRES(!lock_);
size_t GetSize() {
return size_;
}
private:
- SpaceChunk NewFileChunk(size_t min_size);
+ SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
int fd_;
size_t size_;