summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/driver/compiler_driver-inl.h59
-rw-r--r--compiler/driver/compiler_driver.h36
-rw-r--r--compiler/oat_writer.cc4
-rw-r--r--compiler/optimizing/builder.h5
-rw-r--r--compiler/optimizing/code_generator_arm.cc4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc4
-rw-r--r--compiler/optimizing/code_generator_mips.cc4
-rw-r--r--compiler/optimizing/code_generator_mips64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc3
-rw-r--r--compiler/optimizing/inliner.cc9
-rw-r--r--compiler/optimizing/instruction_builder.cc215
-rw-r--r--compiler/optimizing/instruction_builder.h18
-rw-r--r--compiler/optimizing/nodes.cc9
-rw-r--r--compiler/optimizing/nodes.h21
-rw-r--r--compiler/optimizing/optimizing_compiler.cc1
-rw-r--r--compiler/optimizing/sharpening.cc143
-rw-r--r--compiler/optimizing/sharpening.h12
-rw-r--r--compiler/optimizing/ssa_builder.cc2
-rw-r--r--compiler/optimizing/stack_map_stream.cc2
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/cha.cc1
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc6
-rw-r--r--runtime/gc/heap-inl.h2
-rw-r--r--runtime/gc/heap.cc1
-rw-r--r--runtime/handle_scope-inl.h2
-rw-r--r--runtime/handle_scope_test.cc2
-rw-r--r--runtime/indirect_reference_table-inl.h2
-rw-r--r--runtime/indirect_reference_table.cc1
-rw-r--r--runtime/jdwp/object_registry.cc1
-rw-r--r--runtime/jit/jit.cc13
-rw-r--r--runtime/mirror/class-inl.h17
-rw-r--r--runtime/mirror/class.cc21
-rw-r--r--runtime/mirror/class.h11
-rw-r--r--runtime/mirror/method_handle_impl.h2
-rw-r--r--runtime/mirror/method_type_test.cc7
-rw-r--r--runtime/mirror/object-inl.h87
-rw-r--r--runtime/mirror/object.h34
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/java_lang_Thread.cc2
-rw-r--r--runtime/native/java_lang_reflect_Proxy.cc2
-rw-r--r--runtime/oat_file_manager.cc3
-rw-r--r--runtime/openjdkjvm/OpenjdkJvm.cc2
-rw-r--r--runtime/runtime.cc39
-rw-r--r--runtime/scoped_thread_state_change-inl.h4
-rw-r--r--runtime/scoped_thread_state_change.h8
-rw-r--r--runtime/stack.cc2
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/transaction.cc171
-rw-r--r--runtime/transaction.h78
-rw-r--r--runtime/verify_object-inl.h22
-rw-r--r--runtime/verify_object.cc47
-rw-r--r--runtime/verify_object.h11
-rw-r--r--test/552-checker-sharpening/src/Main.java6
-rw-r--r--test/626-const-class-linking/clear_dex_cache_types.cc3
-rw-r--r--test/636-wrong-static-access/expected.txt1
-rw-r--r--test/636-wrong-static-access/info.txt2
-rwxr-xr-xtest/636-wrong-static-access/run20
-rw-r--r--test/636-wrong-static-access/src-ex/Foo.java38
-rw-r--r--test/636-wrong-static-access/src/Holder.java19
-rw-r--r--test/636-wrong-static-access/src/Main.java39
-rw-r--r--test/636-wrong-static-access/src2/Holder.java19
-rw-r--r--test/911-get-stack-trace/src/PrintThread.java3
-rw-r--r--test/924-threads/src/Main.java17
-rw-r--r--test/925-threadgroups/src/Main.java21
-rw-r--r--tools/cpp-define-generator/constant_jit.def1
69 files changed, 819 insertions, 542 deletions
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index f056dd3c00..f296851ebf 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -135,65 +135,6 @@ inline bool CompilerDriver::CanAccessResolvedMember<ArtMethod>(
return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx);
}
-template <typename ArtMember>
-inline std::pair<bool, bool> CompilerDriver::IsClassOfStaticMemberAvailableToReferrer(
- mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMember* resolved_member,
- uint16_t member_idx,
- dex::TypeIndex* storage_index) {
- DCHECK(resolved_member->IsStatic());
- if (LIKELY(referrer_class != nullptr)) {
- ObjPtr<mirror::Class> members_class = resolved_member->GetDeclaringClass();
- if (members_class == referrer_class) {
- *storage_index = members_class->GetDexTypeIndex();
- return std::make_pair(true, true);
- }
- if (CanAccessResolvedMember<ArtMember>(
- referrer_class, members_class.Ptr(), resolved_member, dex_cache, member_idx)) {
- // We have the resolved member, we must make it into a index for the referrer
- // in its static storage (which may fail if it doesn't have a slot for it)
- // TODO: for images we can elide the static storage base null check
- // if we know there's a non-null entry in the image
- const DexFile* dex_file = dex_cache->GetDexFile();
- dex::TypeIndex storage_idx(DexFile::kDexNoIndex16);
- if (LIKELY(members_class->GetDexCache() == dex_cache)) {
- // common case where the dex cache of both the referrer and the member are the same,
- // no need to search the dex file
- storage_idx = members_class->GetDexTypeIndex();
- } else {
- // Search dex file for localized ssb index, may fail if member's class is a parent
- // of the class mentioned in the dex file and there is no dex cache entry.
- storage_idx = resolved_member->GetDeclaringClass()->FindTypeIndexInOtherDexFile(*dex_file);
- }
- if (storage_idx.IsValid()) {
- *storage_index = storage_idx;
- return std::make_pair(true, !resolved_member->IsFinal());
- }
- }
- }
- // Conservative defaults.
- *storage_index = dex::TypeIndex(DexFile::kDexNoIndex16);
- return std::make_pair(false, false);
-}
-
-inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
- mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- ArtField* resolved_field, uint16_t field_idx, dex::TypeIndex* storage_index) {
- return IsClassOfStaticMemberAvailableToReferrer(
- dex_cache, referrer_class, resolved_field, field_idx, storage_index);
-}
-
-inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer(
- mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- ArtMethod* resolved_method, uint16_t method_idx, dex::TypeIndex* storage_index) {
- std::pair<bool, bool> result = IsClassOfStaticMemberAvailableToReferrer(
- dex_cache, referrer_class, resolved_method, method_idx, storage_index);
- // Only the first member of `result` is meaningful, as there is no
- // "write access" to a method.
- return result.first;
-}
-
inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 503fe3adfc..5b4c751c4a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -233,27 +233,6 @@ class CompilerDriver {
ArtField* resolved_field, uint16_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
- // of the declaring class in the referrer's dex file.
- std::pair<bool, bool> IsFastStaticField(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtField* resolved_field,
- uint16_t field_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Return whether the declaring class of `resolved_method` is
- // available to `referrer_class`. If this is true, compute the type
- // index of the declaring class in the referrer's dex file and
- // return it through the out argument `storage_index`; otherwise
- // return DexFile::kDexNoIndex through `storage_index`.
- bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMethod* resolved_method,
- uint16_t method_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -379,21 +358,6 @@ class CompilerDriver {
}
private:
- // Return whether the declaring class of `resolved_member` is
- // available to `referrer_class` for read or write access using two
- // Boolean values returned as a pair. If is true at least for read
- // access, compute the type index of the declaring class in the
- // referrer's dex file and return it through the out argument
- // `storage_index`; otherwise return DexFile::kDexNoIndex through
- // `storage_index`.
- template <typename ArtMember>
- std::pair<bool, bool> IsClassOfStaticMemberAvailableToReferrer(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMember* resolved_member,
- uint16_t member_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Can `referrer_class` access the resolved `member`?
// Dispatch call to mirror::Class::CanAccessResolvedField or
// mirror::Class::CanAccessResolvedMember depending on the value of
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bd2c5e3bfc..a16a34b299 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -2266,6 +2266,10 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
File* raw_file = oat_dex_file->source_.GetRawFile();
dex_file = DexFile::OpenDex(raw_file->Fd(), location, /* verify_checksum */ true, &error_msg);
}
+ if (dex_file == nullptr) {
+ LOG(ERROR) << "Failed to open dex file for layout:" << error_msg;
+ return false;
+ }
Options options;
options.output_to_memmap_ = true;
DexLayout dex_layout(options, profile_compilation_info_, nullptr);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 8cf4089eba..e4ad4222fb 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -32,6 +32,8 @@
namespace art {
+class CodeGenerator;
+
class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(HGraph* graph,
@@ -40,6 +42,7 @@ class HGraphBuilder : public ValueObject {
const DexFile* dex_file,
const DexFile::CodeItem& code_item,
CompilerDriver* driver,
+ CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
Handle<mirror::DexCache> dex_cache,
@@ -61,6 +64,7 @@ class HGraphBuilder : public ValueObject {
dex_compilation_unit,
outer_compilation_unit,
driver,
+ code_generator,
interpreter_metadata,
compiler_stats,
dex_cache,
@@ -89,6 +93,7 @@ class HGraphBuilder : public ValueObject {
/* dex_compilation_unit */ nullptr,
/* outer_compilation_unit */ nullptr,
/* compiler_driver */ nullptr,
+ /* code_generator */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
null_dex_cache_,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 1893029cad..20cdae3619 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5722,6 +5722,9 @@ void ParallelMoveResolverARM::RestoreScratch(int reg) {
HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5852,6 +5855,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 26c8254c76..598be4715b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4360,6 +4360,9 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -4498,6 +4501,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f4d3ec54ee..0d31d830c8 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -5796,6 +5796,9 @@ void ParallelMoveResolverARMVIXL::RestoreScratch(int reg ATTRIBUTE_UNUSED) {
HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5916,6 +5919,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 00969443c1..0677dad078 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5308,6 +5308,9 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
bool is_r6 = GetInstructionSetFeatures().IsR6();
bool fallback_load = has_irreducible_loops && !is_r6;
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
fallback_load = false;
break;
@@ -5660,6 +5663,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 55904a3679..4c8dabfede 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3368,6 +3368,9 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
}
bool fallback_load = false;
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -3629,6 +3632,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
GenerateGcRootFieldLoad(cls, out_loc, out, 0);
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3e795c7bf8..137b55423b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6024,6 +6024,9 @@ void ParallelMoveResolverX86::RestoreScratch(int reg) {
HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -6159,6 +6162,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index abd8246325..c5367ce86e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5427,6 +5427,9 @@ void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 7772e8f973..b08c7a0615 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -558,9 +558,13 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
+ HLoadClass::LoadKind kind = HSharpening::SharpenClass(
+ load_class, codegen_, compiler_driver_, caller_compilation_unit_);
+ DCHECK(kind != HLoadClass::LoadKind::kInvalid)
+ << "We should always be able to reference a class for inline caches";
+ // Insert before setting the kind, as setting the kind affects the inputs.
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
- // Sharpen after adding the instruction, as the sharpening may remove inputs.
- HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
+ load_class->SetLoadKind(kind);
// TODO: Extend reference type propagation to understand the guard.
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -1286,6 +1290,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDexFile(),
*code_item,
compiler_driver_,
+ codegen_,
inline_stats.get(),
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
dex_cache,
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index cac385ce3c..9a3fd2b054 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -22,6 +22,7 @@
#include "dex_instruction-inl.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
+#include "sharpening.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
@@ -847,7 +848,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
if (invoke_type == kStatic) {
clinit_check = ProcessClinitCheckForInvoke(
- dex_pc, resolved_method, method_idx, &clinit_check_requirement);
+ dex_pc, resolved_method, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
@@ -941,7 +942,7 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d
return false;
}
- HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* load_class = BuildLoadClass(type_index, dex_pc);
HInstruction* cls = load_class;
Handle<mirror::Class> klass = load_class->GetClass();
@@ -1005,39 +1006,23 @@ bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
uint32_t dex_pc,
ArtMethod* resolved_method,
- uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
- Thread* self = Thread::Current();
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- Handle<mirror::Class> resolved_method_class(hs.NewHandle(resolved_method->GetDeclaringClass()));
-
- // The index at which the method's class is stored in the DexCache's type array.
- dex::TypeIndex storage_index;
- bool is_outer_class = (resolved_method->GetDeclaringClass() == outer_class.Get());
- if (is_outer_class) {
- storage_index = outer_class->GetDexTypeIndex();
- } else if (outer_dex_cache.Get() == dex_cache.Get()) {
- // Get `storage_index` from IsClassOfStaticMethodAvailableToReferrer.
- compiler_driver_->IsClassOfStaticMethodAvailableToReferrer(outer_dex_cache.Get(),
- GetCompilingClass(),
- resolved_method,
- method_idx,
- &storage_index);
- }
+ Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
HClinitCheck* clinit_check = nullptr;
-
- if (IsInitialized(resolved_method_class)) {
+ if (IsInitialized(klass)) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
- } else if (storage_index.IsValid()) {
- *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- HLoadClass* cls = BuildLoadClass(
- storage_index, dex_pc, /* check_access */ false, /* outer */ true);
- clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
- AppendInstruction(clinit_check);
+ } else {
+ HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
+ klass->GetDexFile(),
+ klass,
+ dex_pc,
+ /* needs_access_check */ false);
+ if (cls != nullptr) {
+ *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
+ clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
+ AppendInstruction(clinit_check);
+ }
}
return clinit_check;
}
@@ -1216,9 +1201,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
}
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field =
- compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
-
+ ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
// Generate an explicit null check on the reference, unless the field access
// is unresolved. In that case, we rely on the runtime to perform various
@@ -1336,6 +1319,56 @@ void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& in
}
}
+ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+
+ ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
+
+ ArtField* resolved_field = class_linker->ResolveField(*dex_compilation_unit_->GetDexFile(),
+ field_idx,
+ dex_compilation_unit_->GetDexCache(),
+ class_loader,
+ is_static);
+
+ if (UNLIKELY(resolved_field == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return nullptr;
+ }
+
+ // Check static/instance. The class linker has a fast path for looking into the dex cache
+ // and does not check static/instance if it hits it.
+ if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+ return nullptr;
+ }
+
+ // Check access.
+ if (compiling_class.Get() == nullptr) {
+ if (!resolved_field->IsPublic()) {
+ return nullptr;
+ }
+ } else if (!compiling_class->CanAccessResolvedField(resolved_field->GetDeclaringClass(),
+ resolved_field,
+ dex_compilation_unit_->GetDexCache().Get(),
+ field_idx)) {
+ return nullptr;
+ }
+
+ if (is_put &&
+ resolved_field->IsFinal() &&
+ (compiling_class.Get() != resolved_field->GetDeclaringClass())) {
+ // Final fields can only be updated within their own class.
+ // TODO: Only allow it in constructors. b/34966607.
+ return nullptr;
+ }
+
+ return resolved_field;
+}
+
bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1343,12 +1376,7 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
- ArtField* resolved_field = compiler_driver_->ResolveField(
- soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
if (resolved_field == nullptr) {
MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
@@ -1358,38 +1386,23 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- // The index at which the field's class is stored in the DexCache's type array.
- dex::TypeIndex storage_index;
- bool is_outer_class = (outer_class.Get() == resolved_field->GetDeclaringClass());
- if (is_outer_class) {
- storage_index = outer_class->GetDexTypeIndex();
- } else if (outer_dex_cache.Get() != dex_cache.Get()) {
- // The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
- return false;
- } else {
- // TODO: This is rather expensive. Perf it and cache the results if needed.
- std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
- outer_dex_cache.Get(),
- GetCompilingClass(),
- resolved_field,
- field_index,
- &storage_index);
- bool can_easily_access = is_put ? pair.second : pair.first;
- if (!can_easily_access) {
- MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
- BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
- return true;
- }
+ Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
+ HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
+ klass->GetDexFile(),
+ klass,
+ dex_pc,
+ /* needs_access_check */ false);
+
+ if (constant == nullptr) {
+ // The class cannot be referenced from this compiled code. Generate
+ // an unresolved access.
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
+ BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+ return true;
}
- HLoadClass* constant = BuildLoadClass(
- storage_index, dex_pc, /* check_access */ false, /* outer */ true);
-
HInstruction* cls = constant;
- Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
@@ -1497,7 +1510,7 @@ void HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
uint32_t* args,
uint32_t register_index) {
HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
HInstruction* object = new (arena_) HNewArray(cls, length, dex_pc);
AppendInstruction(object);
@@ -1627,44 +1640,68 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
}
}
-HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
- uint32_t dex_pc,
- bool check_access,
- bool outer) {
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- const DexCompilationUnit* compilation_unit =
- outer ? outer_compilation_unit_ : dex_compilation_unit_;
- const DexFile& dex_file = *compilation_unit->GetDexFile();
- StackHandleScope<1> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
+ const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
- soa, compilation_unit->GetDexCache(), class_loader, type_index, compilation_unit));
+ soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_));
- bool is_accessible = false;
- if (!check_access) {
- is_accessible = true;
- } else if (klass.Get() != nullptr) {
+ bool needs_access_check = true;
+ if (klass.Get() != nullptr) {
if (klass->IsPublic()) {
- is_accessible = true;
+ needs_access_check = false;
} else {
mirror::Class* compiling_class = GetCompilingClass();
if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
- is_accessible = true;
+ needs_access_check = false;
}
}
}
+ return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
+}
+
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+ const DexFile& dex_file,
+ Handle<mirror::Class> klass,
+ uint32_t dex_pc,
+ bool needs_access_check) {
+ // Try to find a reference in the compiling dex file.
+ const DexFile* actual_dex_file = &dex_file;
+ if (!IsSameDexFile(dex_file, *dex_compilation_unit_->GetDexFile())) {
+ dex::TypeIndex local_type_index =
+ klass->FindTypeIndexInOtherDexFile(*dex_compilation_unit_->GetDexFile());
+ if (local_type_index.IsValid()) {
+ type_index = local_type_index;
+ actual_dex_file = dex_compilation_unit_->GetDexFile();
+ }
+ }
+
+ // Note: `klass` must be from `handles_`.
HLoadClass* load_class = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
- dex_file,
+ *actual_dex_file,
klass,
klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
dex_pc,
- !is_accessible);
+ needs_access_check);
+ HLoadClass::LoadKind load_kind = HSharpening::SharpenClass(load_class,
+ code_generator_,
+ compiler_driver_,
+ *dex_compilation_unit_);
+
+ if (load_kind == HLoadClass::LoadKind::kInvalid) {
+ // We actually cannot reference this class, we're forced to bail.
+ return nullptr;
+ }
+ // Append the instruction first, as setting the load kind affects the inputs.
AppendInstruction(load_class);
+ load_class->SetLoadKind(load_kind);
return load_class;
}
@@ -1674,7 +1711,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
dex::TypeIndex type_index,
uint32_t dex_pc) {
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
ScopedObjectAccess soa(Thread::Current());
TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
@@ -2498,7 +2535,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
AppendInstruction(new (arena_) HNewArray(cls, length, dex_pc));
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
@@ -2673,7 +2710,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::CONST_CLASS: {
dex::TypeIndex type_index(instruction.VRegB_21c());
- BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ BuildLoadClass(type_index, dex_pc);
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 5efe95094c..3bb680ce44 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -31,6 +31,7 @@
namespace art {
+class CodeGenerator;
class Instruction;
class HInstructionBuilder : public ValueObject {
@@ -44,6 +45,7 @@ class HInstructionBuilder : public ValueObject {
DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* const outer_compilation_unit,
CompilerDriver* driver,
+ CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
Handle<mirror::DexCache> dex_cache,
@@ -61,6 +63,7 @@ class HInstructionBuilder : public ValueObject {
current_locals_(nullptr),
latest_result_(nullptr),
compiler_driver_(driver),
+ code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
interpreter_metadata_(interpreter_metadata),
@@ -228,10 +231,14 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
// this method will use the outer class's dex file to lookup the type at
// `type_index`.
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
+
HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+ const DexFile& dex_file,
+ Handle<mirror::Class> klass,
uint32_t dex_pc,
- bool check_access,
- bool outer = false);
+ bool needs_access_check)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the outer-most compiling method's class.
mirror::Class* GetOutermostCompilingClass() const;
@@ -275,7 +282,6 @@ class HInstructionBuilder : public ValueObject {
HClinitCheck* ProcessClinitCheckForInvoke(
uint32_t dex_pc,
ArtMethod* method,
- uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -290,6 +296,10 @@ class HInstructionBuilder : public ValueObject {
// not be resolved.
ArtMethod* ResolveMethod(uint16_t method_idx, InvokeType invoke_type);
+ // Try to resolve a field using the class linker. Return null if it could not
+ // be found.
+ ArtField* ResolveField(uint16_t field_idx, bool is_static, bool is_put);
+
ArenaAllocator* const arena_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
@@ -311,6 +321,8 @@ class HInstructionBuilder : public ValueObject {
CompilerDriver* const compiler_driver_;
+ CodeGenerator* const code_generator_;
+
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
DexCompilationUnit* const dex_compilation_unit_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 76900f23a9..abbb91a1a9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2464,16 +2464,15 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
}
}
-void HLoadClass::SetLoadKindInternal(LoadKind load_kind) {
- // Once sharpened, the load kind should not be changed again.
- // Also, kReferrersClass should never be overwritten.
- DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
+void HLoadClass::SetLoadKind(LoadKind load_kind) {
SetPackedField<LoadKindField>(load_kind);
- if (load_kind != LoadKind::kDexCacheViaMethod) {
+ if (load_kind != LoadKind::kDexCacheViaMethod &&
+ load_kind != LoadKind::kReferrersClass) {
RemoveAsUserOfInput(0u);
SetRawInputAt(0u, nullptr);
}
+
if (!NeedsEnvironment()) {
RemoveEnvironment();
SetSideEffects(SideEffects::None());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index acf14aa726..fae164aa0b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5508,6 +5508,9 @@ class HLoadClass FINAL : public HInstruction {
public:
// Determines how to load the Class.
enum class LoadKind {
+ // We cannot load this class. See HSharpening::SharpenLoadClass.
+ kInvalid = -1,
+
// Use the Class* from the method's own ArtMethod*.
kReferrersClass,
@@ -5564,18 +5567,7 @@ class HLoadClass FINAL : public HInstruction {
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
- void SetLoadKind(LoadKind load_kind) {
- SetLoadKindInternal(load_kind);
- }
-
- void SetLoadKindWithTypeReference(LoadKind load_kind,
- const DexFile& dex_file,
- dex::TypeIndex type_index) {
- DCHECK(HasTypeReference(load_kind));
- DCHECK(IsSameDexFile(dex_file_, dex_file));
- DCHECK_EQ(type_index_, type_index);
- SetLoadKindInternal(load_kind);
- }
+ void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
@@ -5694,6 +5686,11 @@ class HLoadClass FINAL : public HInstruction {
// for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
+ // A type index and dex file where the class can be accessed. The dex file can be:
+ // - The compiling method's dex file if the class is defined there too.
+ // - The compiling method's dex file if the class is referenced there.
+ // - The dex file where the class is defined. When the load kind can only be
+ // kBssEntry or kDexCacheViaMethod, we cannot emit code for this `HLoadClass`.
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1ab671022b..4bc6ff1b65 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -999,6 +999,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
&dex_file,
*code_item,
compiler_driver,
+ codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
dex_cache,
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index e745c73091..f07f02a719 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -42,8 +42,6 @@ void HSharpening::Run() {
HInstruction* instruction = it.Current();
if (instruction->IsInvokeStaticOrDirect()) {
ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
- } else if (instruction->IsLoadClass()) {
- ProcessLoadClass(instruction->AsLoadClass());
} else if (instruction->IsLoadString()) {
ProcessLoadString(instruction->AsLoadString());
}
@@ -133,104 +131,93 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
invoke->SetDispatchInfo(dispatch_info);
}
-void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
- ScopedObjectAccess soa(Thread::Current());
- SharpenClass(load_class, codegen_, compiler_driver_);
-}
-
-void HSharpening::SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) {
+HLoadClass::LoadKind HSharpening::SharpenClass(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit) {
Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
+ HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
+
if (load_class->NeedsAccessCheck()) {
// We need to call the runtime anyway, so we simply get the class as that call's return value.
- return;
- }
-
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+ } else if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
// Loading from the ArtMethod* is the most efficient retrieval in code size.
// TODO: This may not actually be true for all architectures and
// locations of target classes. The additional register pressure
// for using the ArtMethod* should be considered.
- return;
- }
-
- const DexFile& dex_file = load_class->GetDexFile();
- dex::TypeIndex type_index = load_class->GetTypeIndex();
+ } else {
+ const DexFile& dex_file = load_class->GetDexFile();
+ dex::TypeIndex type_index = load_class->GetTypeIndex();
- bool is_in_boot_image = false;
- HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
- Runtime* runtime = Runtime::Current();
- if (codegen->GetCompilerOptions().IsBootImage()) {
- // Compiling boot image. Check if the class is a boot image class.
- DCHECK(!runtime->UseJitCompilation());
- if (!compiler_driver->GetSupportBootImageFixup()) {
- // compiler_driver_test. Do not sharpen.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
- is_in_boot_image = true;
- desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
- ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
- : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+ bool is_in_boot_image = false;
+ HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
+ Runtime* runtime = Runtime::Current();
+ if (codegen->GetCompilerOptions().IsBootImage()) {
+ // Compiling boot image. Check if the class is a boot image class.
+ DCHECK(!runtime->UseJitCompilation());
+ if (!compiler_driver->GetSupportBootImageFixup()) {
+ // compiler_driver_test. Do not sharpen.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
+ dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
+ is_in_boot_image = true;
+ desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
+ ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
+ : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+ } else {
+ // Not a boot image class.
+ DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+ }
} else {
- // Not a boot image class.
- DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = HLoadClass::LoadKind::kBssEntry;
- }
- } else {
- is_in_boot_image = (klass.Get() != nullptr) &&
- runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
- if (runtime->UseJitCompilation()) {
- // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
- // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- if (is_in_boot_image) {
- // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+ is_in_boot_image = (klass.Get() != nullptr) &&
+ runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
+ if (runtime->UseJitCompilation()) {
+ // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
+ // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+ if (is_in_boot_image) {
+ // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+ desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ } else if (klass.Get() != nullptr) {
+ desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+ } else {
+ // Class not loaded yet. This happens when the dex code requesting
+ // this `HLoadClass` hasn't been executed in the interpreter.
+ // Fallback to the dex cache.
+ // TODO(ngeoffray): Generate HDeoptimize instead.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ }
+ } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
+ // AOT app compilation. Check if the class is in the boot image.
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- } else if (klass.Get() != nullptr) {
- desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
} else {
- // Class not loaded yet. This happens when the dex code requesting
- // this `HLoadClass` hasn't been executed in the interpreter.
- // Fallback to the dex cache.
- // TODO(ngeoffray): Generate HDeoptimize instead.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
- } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
- // AOT app compilation. Check if the class is in the boot image.
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- } else {
- // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
- desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
- }
- DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
+ DCHECK_NE(desired_load_kind, HLoadClass::LoadKind::kInvalid);
- if (is_in_boot_image) {
- load_class->MarkInBootImage();
+ if (is_in_boot_image) {
+ load_class->MarkInBootImage();
+ }
+ load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
}
- HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
- switch (load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBssEntry:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
- load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kJitTableAddress:
- load_class->SetLoadKind(load_kind);
- break;
- default:
- LOG(FATAL) << "Unexpected load kind: " << load_kind;
- UNREACHABLE();
+ if (!IsSameDexFile(load_class->GetDexFile(), *dex_compilation_unit.GetDexFile())) {
+ if ((load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) ||
+ (load_kind == HLoadClass::LoadKind::kBssEntry)) {
+ // We actually cannot reference this class, we're forced to bail.
+ // We cannot reference this class with Bss, as the entrypoint will lookup the class
+ // in the caller's dex file, but that dex file does not reference the class.
+ return HLoadClass::LoadKind::kInvalid;
+ }
}
+ return load_kind;
}
void HSharpening::ProcessLoadString(HLoadString* load_string) {
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae3d83ef2c..4240b2f339 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_SHARPENING_H_
#define ART_COMPILER_OPTIMIZING_SHARPENING_H_
+#include "nodes.h"
#include "optimization.h"
namespace art {
@@ -24,7 +25,6 @@ namespace art {
class CodeGenerator;
class CompilerDriver;
class DexCompilationUnit;
-class HInvokeStaticOrDirect;
// Optimization that tries to improve the way we dispatch methods and access types,
// fields, etc. Besides actual method sharpening based on receiver type (for example
@@ -47,15 +47,15 @@ class HSharpening : public HOptimization {
static constexpr const char* kSharpeningPassName = "sharpening";
- // Used internally but also by the inliner.
- static void SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver)
+ // Used by the builder and the inliner.
+ static HLoadClass::LoadKind SharpenClass(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
- void ProcessLoadClass(HLoadClass* load_class);
void ProcessLoadString(HLoadString* load_string);
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ae1e369999..487e4dd498 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -17,8 +17,10 @@
#include "ssa_builder.h"
#include "bytecode_utils.h"
+#include "mirror/class-inl.h"
#include "nodes.h"
#include "reference_type_propagation.h"
+#include "scoped_thread_state_change-inl.h"
#include "ssa_phi_elimination.h"
namespace art {
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 668108daa4..e4d480061d 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -18,8 +18,8 @@
#include <unordered_map>
+#include "art_method-inl.h"
#include "base/stl_util.h"
-#include "art_method.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 276f3043d9..1bae245b4e 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -208,6 +208,7 @@ cc_defaults {
"verifier/reg_type_cache.cc",
"verifier/register_line.cc",
"verifier/verifier_deps.cc",
+ "verify_object.cc",
"well_known_classes.cc",
"zip_archive.cc",
diff --git a/runtime/cha.cc b/runtime/cha.cc
index e726bdbcb8..d11b12f700 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -16,6 +16,7 @@
#include "cha.h"
+#include "art_method-inl.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "runtime.h"
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 670dadcd4d..158c1d6348 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -20,7 +20,7 @@
#include "indirect_reference_table.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 7c649525e4..854d0a58ff 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -22,6 +22,7 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/region_space.h"
+#include "mirror/object-inl.h"
#include "lock_word.h"
namespace art {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 0819ba04f7..f12ad8058d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1875,8 +1875,10 @@ class ConcurrentCopying::RefFieldsVisitor {
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
- if (kDisallowReadBarrierDuringScan) {
+ if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
// Avoid all read barriers during visit references to help performance.
+ // Don't do this in transaction mode because we may read the old value of an field which may
+ // trigger read barriers.
Thread::Current()->ModifyDebugDisallowReadBarrier(1);
}
DCHECK(!region_space_->IsInFromSpace(to_ref));
@@ -1885,7 +1887,7 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
- if (kDisallowReadBarrierDuringScan) {
+ if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
}
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 54f221056a..394e541fd8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -34,7 +34,7 @@
#include "handle_scope-inl.h"
#include "thread-inl.h"
#include "utils.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index aa15714595..fc475b5e5a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -78,6 +78,7 @@
#include "scoped_thread_state_change-inl.h"
#include "handle_scope-inl.h"
#include "thread_list.h"
+#include "verify_object-inl.h"
#include "well_known_classes.h"
namespace art {
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index b212d095cb..077f45e8f3 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -23,7 +23,7 @@
#include "handle.h"
#include "obj_ptr-inl.h"
#include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index aab1d9c224..f888482ae5 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -17,10 +17,12 @@
#include <type_traits>
#include "base/enums.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "gtest/gtest.h"
#include "handle.h"
#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 0e66ae96b5..24ee22759c 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -25,7 +25,7 @@
#include "gc_root-inl.h"
#include "obj_ptr-inl.h"
#include "runtime-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
namespace mirror {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c7371191b7..9fbb2e9930 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -25,7 +25,6 @@
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "utils.h"
-#include "verify_object-inl.h"
#include <cstdlib>
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 170887e397..4615574947 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -19,6 +19,7 @@
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class.h"
+#include "mirror/throwable.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 6deb03dc41..fec3c4f7b8 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -145,7 +145,12 @@ Jit::Jit() : dump_info_on_shutdown_(false),
cumulative_timings_("JIT timings"),
memory_use_("Memory used for compilation", 16),
lock_("JIT memory use lock"),
- use_jit_compilation_(true) {}
+ use_jit_compilation_(true),
+ hot_method_threshold_(0),
+ warm_method_threshold_(0),
+ osr_method_threshold_(0),
+ priority_thread_weight_(0),
+ invoke_transition_weight_(0) {}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
@@ -289,7 +294,11 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
void Jit::CreateThreadPool() {
// There is a DCHECK in the 'AddSamples' method to ensure the tread pool
// is not null when we instrument.
- thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+
+ // We need peers as we may report the JIT thread, e.g., in the debugger.
+ constexpr bool kJitPoolNeedsPeers = true;
+ thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
Start();
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 366a6621d4..2cff47e8b4 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -646,23 +646,6 @@ inline void Class::SetClinitThreadId(pid_t new_clinit_thread_id) {
}
}
-template<VerifyObjectFlags kVerifyFlags>
-inline uint32_t Class::GetAccessFlags() {
- // Check class is loaded/retired or this is java.lang.String that has a
- // circularity issue during loading the names of its members
- DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
- IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
- this == String::GetJavaLangString())
- << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
- << " IsRetired=" << IsRetired<kVerifyFlags>()
- << " IsErroneous=" <<
- IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
- << " IsString=" << (this == String::GetJavaLangString())
- << " status= " << GetStatus<kVerifyFlags>()
- << " descriptor=" << PrettyDescriptor();
- return GetField32<kVerifyFlags>(AccessFlagsOffset());
-}
-
inline String* Class::GetName() {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f08d4daf95..1b8f3f83e7 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1345,5 +1345,26 @@ std::string Class::PrettyClassAndClassLoader() {
return result;
}
+template<VerifyObjectFlags kVerifyFlags> void Class::GetAccessFlagsDCheck() {
+ // Check class is loaded/retired or this is java.lang.String that has a
+ // circularity issue during loading the names of its members
+ DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
+ this == String::GetJavaLangString())
+ << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
+ << " IsRetired=" << IsRetired<kVerifyFlags>()
+ << " IsErroneous=" <<
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
+ << " IsString=" << (this == String::GetJavaLangString())
+ << " status= " << GetStatus<kVerifyFlags>()
+ << " descriptor=" << PrettyDescriptor();
+}
+// Instantiate the common cases.
+template void Class::GetAccessFlagsDCheck<kVerifyNone>();
+template void Class::GetAccessFlagsDCheck<kVerifyThis>();
+template void Class::GetAccessFlagsDCheck<kVerifyReads>();
+template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
+template void Class::GetAccessFlagsDCheck<kVerifyAll>();
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 2ed2d878be..d34f09c721 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -231,7 +231,13 @@ class MANAGED Class FINAL : public Object {
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ GetAccessFlagsDCheck<kVerifyFlags>();
+ }
+ return GetField32<kVerifyFlags>(AccessFlagsOffset());
+ }
+
static MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
@@ -1390,6 +1396,9 @@ class MANAGED Class FINAL : public Object {
bool ProxyDescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags>
+ void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
+
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
diff --git a/runtime/mirror/method_handle_impl.h b/runtime/mirror/method_handle_impl.h
index dca30626e0..53d267b52c 100644
--- a/runtime/mirror/method_handle_impl.h
+++ b/runtime/mirror/method_handle_impl.h
@@ -19,7 +19,7 @@
#include "class.h"
#include "gc_root.h"
-#include "object.h"
+#include "object-inl.h"
#include "method_handles.h"
#include "method_type.h"
diff --git a/runtime/mirror/method_type_test.cc b/runtime/mirror/method_type_test.cc
index 03ab93069c..637bafd75e 100644
--- a/runtime/mirror/method_type_test.cc
+++ b/runtime/mirror/method_type_test.cc
@@ -19,12 +19,13 @@
#include <string>
#include <vector>
+#include "class-inl.h"
#include "class_linker.h"
+#include "class_loader.h"
#include "common_runtime_test.h"
#include "handle_scope-inl.h"
-#include "runtime/mirror/class.h"
-#include "runtime/mirror/class_loader.h"
-#include "scoped_thread_state_change.h"
+#include "object_array-inl.h"
+#include "scoped_thread_state_change-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 354410e6bf..8e591e4434 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -142,8 +142,10 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) {
}
inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
-#ifdef USE_BAKER_READ_BARRIER
- CHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
#if defined(__arm__)
uintptr_t obj = reinterpret_cast<uintptr_t>(this);
uintptr_t result;
@@ -190,37 +192,29 @@ inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency)
UNREACHABLE();
UNUSED(fake_address_dependency);
#endif
-#else // !USE_BAKER_READ_BARRIER
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(fake_address_dependency);
-#endif
}
inline uint32_t Object::GetReadBarrierState() {
-#ifdef USE_BAKER_READ_BARRIER
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(kUseBakerReadBarrier);
LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline uint32_t Object::GetReadBarrierStateAcquire() {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline uint32_t Object::GetMarkBit() {
@@ -233,23 +227,22 @@ inline uint32_t Object::GetMarkBit() {
}
inline void Object::SetReadBarrierState(uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord lw = GetLockWord(false);
lw.SetReadBarrierState(rb_state);
SetLockWord(lw, false);
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(rb_state);
-#endif
}
template<bool kCasRelease>
inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord expected_lw;
@@ -272,11 +265,6 @@ inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32
CasLockWordWeakRelease(expected_lw, new_lw) :
CasLockWordWeakRelaxed(expected_lw, new_lw)));
return true;
-#else
- UNUSED(expected_rb_state, rb_state);
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
@@ -691,19 +679,6 @@ inline void Object::SetFieldShortVolatile(MemberOffset field_offset, int16_t new
field_offset, new_value);
}
-template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
-inline int32_t Object::GetField32(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- return GetField<int32_t, kIsVolatile>(field_offset);
-}
-
-template<VerifyObjectFlags kVerifyFlags>
-inline int32_t Object::GetField32Volatile(MemberOffset field_offset) {
- return GetField32<kVerifyFlags, true>(field_offset);
-}
-
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) {
@@ -854,28 +829,6 @@ inline void Object::SetField64Volatile(MemberOffset field_offset, int64_t new_va
new_value);
}
-template<typename kSize, bool kIsVolatile>
-inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- kSize* addr = reinterpret_cast<kSize*>(raw_addr);
- if (kIsVolatile) {
- reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
- } else {
- reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
- }
-}
-
-template<typename kSize, bool kIsVolatile>
-inline kSize Object::GetField(MemberOffset field_offset) {
- const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
- const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
- if (kIsVolatile) {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
- } else {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
- }
-}
-
template<typename kSize>
inline kSize Object::GetFieldAcquire(MemberOffset field_offset) {
const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index db58a60994..4541ce2a42 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_OBJECT_H_
#define ART_RUNTIME_MIRROR_OBJECT_H_
+#include "atomic.h"
#include "base/casts.h"
#include "base/enums.h"
#include "globals.h"
@@ -432,11 +433,18 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ return GetField<int32_t, kIsVolatile>(field_offset);
+ }
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetField32<kVerifyFlags, true>(field_offset);
+ }
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
@@ -611,10 +619,28 @@ class MANAGED LOCKABLE Object {
private:
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ kSize* addr = reinterpret_cast<kSize*>(raw_addr);
+ if (kIsVolatile) {
+ reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
+ } else {
+ reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
+ }
+ }
+
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
+ const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
+ if (kIsVolatile) {
+ return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
+ } else {
+ return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
+ }
+ }
+
// Get a field with acquire semantics.
template<typename kSize>
ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index ea266d131d..f1d6ff5f70 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -25,7 +25,7 @@
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index fcb017545a..195091f8ab 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -25,7 +25,7 @@
#include "ScopedUtfChars.h"
#include "thread.h"
#include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index ece0338c93..70cd6aaae2 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -22,7 +22,7 @@
#include "mirror/object_array.h"
#include "mirror/string.h"
#include "scoped_fast_native_object_access-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 33bd0f311d..a46b47075c 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -22,6 +22,7 @@
#include "android-base/stringprintf.h"
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -32,11 +33,13 @@
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
#include "oat_file_assistant.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "well_known_classes.h"
namespace art {
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index 2f51e27b2d..bdaad20d7e 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -46,7 +46,7 @@
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
#include "mirror/class_loader.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
#include "base/logging.h"
#include "base/macros.h"
#include "../../libcore/ojluni/src/main/native/jvm.h" // TODO(narayan): fix it
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 693b8f4e2f..9609bee022 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -672,24 +672,6 @@ bool Runtime::Start() {
started_ = true;
- // Create the JIT either if we have to use JIT compilation or save profiling info.
- // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
- // recoding profiles. Maybe we should consider changing the name to be more clear it's
- // not only about compiling. b/28295073.
- if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
- std::string error_msg;
- if (!IsZygote()) {
- // If we are the zygote then we need to wait until after forking to create the code cache
- // due to SELinux restrictions on r/w/x memory regions.
- CreateJit();
- } else if (jit_options_->UseJitCompilation()) {
- if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
- // Try to load compiler pre zygote to reduce PSS. b/27744947
- LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
- }
- }
- }
-
if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
ScopedObjectAccess soa(self);
StackHandleScope<2> hs(soa.Self());
@@ -714,6 +696,27 @@ bool Runtime::Start() {
Thread::FinishStartup();
+ // Create the JIT either if we have to use JIT compilation or save profiling info. This is
+ // done after FinishStartup as the JIT pool needs Java thread peers, which require the main
+ // ThreadGroup to exist.
+ //
+ // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
+ // recoding profiles. Maybe we should consider changing the name to be more clear it's
+ // not only about compiling. b/28295073.
+ if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
+ std::string error_msg;
+ if (!IsZygote()) {
+ // If we are the zygote then we need to wait until after forking to create the code cache
+ // due to SELinux restrictions on r/w/x memory regions.
+ CreateJit();
+ } else if (jit_options_->UseJitCompilation()) {
+ if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
+ // Try to load compiler pre zygote to reduce PSS. b/27744947
+ LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
+ }
+ }
+ }
+
// Send the start phase event. We have to wait till here as this is when the main thread peer
// has just been generated, important root clinits have been run and JNI is completely functional.
{
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index d4469f4357..000da59bd2 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -110,6 +110,10 @@ inline ScopedObjectAccessUnchecked::ScopedObjectAccessUnchecked(Thread* self)
Locks::mutator_lock_->AssertSharedHeld(Self());
}
+inline ScopedObjectAccess::ScopedObjectAccess(JNIEnv* env) : ScopedObjectAccessUnchecked(env) {}
+inline ScopedObjectAccess::ScopedObjectAccess(Thread* self) : ScopedObjectAccessUnchecked(self) {}
+inline ScopedObjectAccess::~ScopedObjectAccess() {}
+
inline ScopedThreadSuspension::ScopedThreadSuspension(Thread* self, ThreadState suspended_state)
: self_(self), suspended_state_(suspended_state) {
DCHECK(self_ != nullptr);
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b4992586ce..24199f76b6 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -159,16 +159,14 @@ class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
public:
ALWAYS_INLINE explicit ScopedObjectAccess(JNIEnv* env)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : ScopedObjectAccessUnchecked(env) {}
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
ALWAYS_INLINE explicit ScopedObjectAccess(Thread* self)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : ScopedObjectAccessUnchecked(self) {}
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
// Base class will release share of lock. Invoked after this destructor.
- ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {}
+ ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE;
private:
// TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 5ad00a4e55..6e0569bb5d 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -37,7 +37,7 @@
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d843de5e7f..632a380bf0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -77,7 +77,7 @@
#include "thread-inl.h"
#include "utils.h"
#include "verifier/method_verifier.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
#include "well_known_classes.h"
#include "interpreter/interpreter.h"
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 25369686fd..56ff0a13ac 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -41,12 +41,12 @@ Transaction::~Transaction() {
MutexLock mu(Thread::Current(), log_lock_);
size_t objects_count = object_logs_.size();
size_t field_values_count = 0;
- for (auto it : object_logs_) {
+ for (const auto& it : object_logs_) {
field_values_count += it.second.Size();
}
size_t array_count = array_logs_.size();
size_t array_values_count = 0;
- for (auto it : array_logs_) {
+ for (const auto& it : array_logs_) {
array_values_count += it.second.Size();
}
size_t intern_string_count = intern_string_logs_.size();
@@ -100,24 +100,30 @@ const std::string& Transaction::GetAbortMessage() {
return abort_message_;
}
-void Transaction::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
- uint8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
object_log.LogBooleanValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
- int8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
object_log.LogByteValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
- uint16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -125,8 +131,10 @@ void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_o
}
-void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
- int16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -134,7 +142,9 @@ void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_
}
-void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+void Transaction::RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
@@ -142,7 +152,9 @@ void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_off
object_log.Log32BitsValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+void Transaction::RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
@@ -150,8 +162,10 @@ void Transaction::RecordWriteField64(mirror::Object* obj, MemberOffset field_off
object_log.Log64BitsValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile) {
+void Transaction::RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -163,8 +177,12 @@ void Transaction::RecordWriteArray(mirror::Array* array, size_t index, uint64_t
DCHECK(array->IsArrayInstance());
DCHECK(!array->IsObjectArray());
MutexLock mu(Thread::Current(), log_lock_);
- ArrayLog& array_log = array_logs_[array];
- array_log.LogValue(index, value);
+ auto it = array_logs_.find(array);
+ if (it == array_logs_.end()) {
+ ArrayLog log;
+ it = array_logs_.emplace(array, std::move(log)).first;
+ }
+ it->second.LogValue(index, value);
}
void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
@@ -172,33 +190,33 @@ void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
DCHECK(dex_cache != nullptr);
DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
MutexLock mu(Thread::Current(), log_lock_);
- resolve_string_logs_.push_back(ResolveStringLog(dex_cache, string_idx));
+ resolve_string_logs_.emplace_back(dex_cache, string_idx);
}
void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
-void Transaction::LogInternedString(const InternStringLog& log) {
+void Transaction::LogInternedString(InternStringLog&& log) {
Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
MutexLock mu(Thread::Current(), log_lock_);
- intern_string_logs_.push_front(log);
+ intern_string_logs_.push_front(std::move(log));
}
void Transaction::Rollback() {
@@ -216,7 +234,7 @@ void Transaction::Rollback() {
void Transaction::UndoObjectModifications() {
// TODO we may not need to restore objects allocated during this transaction. Or we could directly
// remove them from the heap.
- for (auto it : object_logs_) {
+ for (const auto& it : object_logs_) {
it.second.Undo(it.first);
}
object_logs_.clear();
@@ -225,7 +243,7 @@ void Transaction::UndoObjectModifications() {
void Transaction::UndoArrayModifications() {
// TODO we may not need to restore array allocated during this transaction. Or we could directly
// remove them from the heap.
- for (auto it : array_logs_) {
+ for (const auto& it : array_logs_) {
it.second.Undo(it.first);
}
array_logs_.clear();
@@ -235,7 +253,7 @@ void Transaction::UndoInternStringTableModifications() {
InternTable* const intern_table = Runtime::Current()->GetInternTable();
// We want to undo each operation from the most recent to the oldest. List has been filled so the
// most recent operation is at list begin so just have to iterate over it.
- for (InternStringLog& string_log : intern_string_logs_) {
+ for (const InternStringLog& string_log : intern_string_logs_) {
string_log.Undo(intern_table);
}
intern_string_logs_.clear();
@@ -262,7 +280,7 @@ void Transaction::VisitObjectLogs(RootVisitor* visitor) {
std::list<ObjectPair> moving_roots;
// Visit roots.
- for (auto it : object_logs_) {
+ for (auto& it : object_logs_) {
it.second.VisitRoots(visitor);
mirror::Object* old_root = it.first;
mirror::Object* new_root = old_root;
@@ -279,7 +297,7 @@ void Transaction::VisitObjectLogs(RootVisitor* visitor) {
auto old_root_it = object_logs_.find(old_root);
CHECK(old_root_it != object_logs_.end());
CHECK(object_logs_.find(new_root) == object_logs_.end());
- object_logs_.insert(std::make_pair(new_root, old_root_it->second));
+ object_logs_.emplace(new_root, std::move(old_root_it->second));
object_logs_.erase(old_root_it);
}
}
@@ -289,7 +307,7 @@ void Transaction::VisitArrayLogs(RootVisitor* visitor) {
typedef std::pair<mirror::Array*, mirror::Array*> ArrayPair;
std::list<ArrayPair> moving_roots;
- for (auto it : array_logs_) {
+ for (auto& it : array_logs_) {
mirror::Array* old_root = it.first;
CHECK(!old_root->IsObjectArray());
mirror::Array* new_root = old_root;
@@ -306,7 +324,7 @@ void Transaction::VisitArrayLogs(RootVisitor* visitor) {
auto old_root_it = array_logs_.find(old_root);
CHECK(old_root_it != array_logs_.end());
CHECK(array_logs_.find(new_root) == array_logs_.end());
- array_logs_.insert(std::make_pair(new_root, old_root_it->second));
+ array_logs_.emplace(new_root, std::move(old_root_it->second));
array_logs_.erase(old_root_it);
}
}
@@ -347,23 +365,27 @@ void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value,
LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
}
-void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile) {
+void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
+ mirror::Object* obj,
+ bool is_volatile) {
LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
}
void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
- MemberOffset offset, uint64_t value, bool is_volatile) {
+ MemberOffset offset,
+ uint64_t value,
+ bool is_volatile) {
auto it = field_values_.find(offset.Uint32Value());
if (it == field_values_.end()) {
ObjectLog::FieldValue field_value;
field_value.value = value;
field_value.is_volatile = is_volatile;
field_value.kind = kind;
- field_values_.insert(std::make_pair(offset.Uint32Value(), field_value));
+ field_values_.emplace(offset.Uint32Value(), std::move(field_value));
}
}
-void Transaction::ObjectLog::Undo(mirror::Object* obj) {
+void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
for (auto& it : field_values_) {
// Garbage collector needs to access object's class and array's length. So we don't rollback
// these values.
@@ -377,60 +399,71 @@ void Transaction::ObjectLog::Undo(mirror::Object* obj) {
// Skip Array::length field.
continue;
}
- FieldValue& field_value = it.second;
+ const FieldValue& field_value = it.second;
UndoFieldWrite(obj, field_offset, field_value);
}
}
-void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) {
+void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
+ MemberOffset field_offset,
+ const FieldValue& field_value) const {
// TODO We may want to abort a transaction while still being in transaction mode. In this case,
// we'd need to disable the check.
constexpr bool kCheckTransaction = true;
switch (field_value.kind) {
case kBoolean:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldBooleanVolatile<false, kCheckTransaction>(field_offset,
- static_cast<bool>(field_value.value));
+ obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<bool>(field_value.value));
} else {
- obj->SetFieldBoolean<false, kCheckTransaction>(field_offset,
- static_cast<bool>(field_value.value));
+ obj->SetFieldBoolean<false, kCheckTransaction>(
+ field_offset,
+ static_cast<bool>(field_value.value));
}
break;
case kByte:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldByteVolatile<false, kCheckTransaction>(field_offset,
- static_cast<int8_t>(field_value.value));
+ obj->SetFieldByteVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int8_t>(field_value.value));
} else {
- obj->SetFieldByte<false, kCheckTransaction>(field_offset,
- static_cast<int8_t>(field_value.value));
+ obj->SetFieldByte<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int8_t>(field_value.value));
}
break;
case kChar:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldCharVolatile<false, kCheckTransaction>(field_offset,
- static_cast<uint16_t>(field_value.value));
+ obj->SetFieldCharVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint16_t>(field_value.value));
} else {
- obj->SetFieldChar<false, kCheckTransaction>(field_offset,
- static_cast<uint16_t>(field_value.value));
+ obj->SetFieldChar<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint16_t>(field_value.value));
}
break;
case kShort:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldShortVolatile<false, kCheckTransaction>(field_offset,
- static_cast<int16_t>(field_value.value));
+ obj->SetFieldShortVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int16_t>(field_value.value));
} else {
- obj->SetFieldShort<false, kCheckTransaction>(field_offset,
- static_cast<int16_t>(field_value.value));
+ obj->SetFieldShort<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int16_t>(field_value.value));
}
break;
case k32Bits:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetField32Volatile<false, kCheckTransaction>(field_offset,
- static_cast<uint32_t>(field_value.value));
+ obj->SetField32Volatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint32_t>(field_value.value));
} else {
- obj->SetField32<false, kCheckTransaction>(field_offset,
- static_cast<uint32_t>(field_value.value));
+ obj->SetField32<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint32_t>(field_value.value));
}
break;
case k64Bits:
@@ -442,11 +475,13 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
break;
case kReference:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldObjectVolatile<false, kCheckTransaction>(field_offset,
- reinterpret_cast<mirror::Object*>(field_value.value));
+ obj->SetFieldObjectVolatile<false, kCheckTransaction>(
+ field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
} else {
- obj->SetFieldObject<false, kCheckTransaction>(field_offset,
- reinterpret_cast<mirror::Object*>(field_value.value));
+ obj->SetFieldObject<false, kCheckTransaction>(
+ field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
}
break;
default:
@@ -456,7 +491,7 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
}
void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
- for (auto it : field_values_) {
+ for (auto& it : field_values_) {
FieldValue& field_value = it.second;
if (field_value.kind == ObjectLog::kReference) {
visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
@@ -465,7 +500,7 @@ void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
}
}
-void Transaction::InternStringLog::Undo(InternTable* intern_table) {
+void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
DCHECK(intern_table != nullptr);
switch (string_op_) {
case InternStringLog::kInsert: {
@@ -506,7 +541,7 @@ void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
str_.VisitRoot(visitor, RootInfo(kRootInternedString));
}
-void Transaction::ResolveStringLog::Undo() {
+void Transaction::ResolveStringLog::Undo() const {
dex_cache_.Read()->ClearString(string_idx_);
}
@@ -538,7 +573,7 @@ void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
}
}
-void Transaction::ArrayLog::Undo(mirror::Array* array) {
+void Transaction::ArrayLog::Undo(mirror::Array* array) const {
DCHECK(array != nullptr);
DCHECK(array->IsArrayInstance());
Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
@@ -547,8 +582,10 @@ void Transaction::ArrayLog::Undo(mirror::Array* array) {
}
}
-void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array, Primitive::Type array_type,
- size_t index, uint64_t value) {
+void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
+ Primitive::Type array_type,
+ size_t index,
+ uint64_t value) const {
// TODO We may want to abort a transaction while still being in transaction mode. In this case,
// we'd need to disable the check.
switch (array_type) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 1774657d40..7aa98cd33d 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -56,26 +56,40 @@ class Transaction FINAL {
bool IsAborted() REQUIRES(!log_lock_);
// Record object field changes.
- void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
+ void RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
- bool is_volatile)
+ void RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
+ bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
+ void RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
+ void RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+ void RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+ void RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile)
+ void RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile)
REQUIRES(!log_lock_);
// Record array change.
@@ -122,13 +136,16 @@ class Transaction FINAL {
void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
- void Undo(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return field_values_.size();
}
+ ObjectLog() = default;
+ ObjectLog(ObjectLog&& log) = default;
+
private:
enum FieldValueKind {
kBoolean,
@@ -144,33 +161,49 @@ class Transaction FINAL {
uint64_t value;
FieldValueKind kind;
bool is_volatile;
+
+ FieldValue() = default;
+ FieldValue(FieldValue&& log) = default;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldValue);
};
void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
- void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UndoFieldWrite(mirror::Object* obj,
+ MemberOffset field_offset,
+ const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
// Maps field's offset to its value.
std::map<uint32_t, FieldValue> field_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectLog);
};
class ArrayLog : public ValueObject {
public:
void LogValue(size_t index, uint64_t value);
- void Undo(mirror::Array* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return array_values_.size();
}
+ ArrayLog() = default;
+ ArrayLog(ArrayLog&& log) = default;
+
private:
- void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
- uint64_t value) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UndoArrayWrite(mirror::Array* array,
+ Primitive::Type array_type,
+ size_t index,
+ uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
// Maps index to value.
// TODO use JValue instead ?
std::map<size_t, uint64_t> array_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArrayLog);
};
class InternStringLog : public ValueObject {
@@ -185,31 +218,38 @@ class Transaction FINAL {
};
InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
- void Undo(InternTable* intern_table)
+ void Undo(InternTable* intern_table) const
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+ InternStringLog() = default;
+ InternStringLog(InternStringLog&& log) = default;
+
private:
- GcRoot<mirror::String> str_;
+ mutable GcRoot<mirror::String> str_;
const StringKind string_kind_;
const StringOp string_op_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternStringLog);
};
class ResolveStringLog : public ValueObject {
public:
ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
- void Undo() REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
GcRoot<mirror::DexCache> dex_cache_;
const dex::StringIndex string_idx_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
};
- void LogInternedString(const InternStringLog& log)
+ void LogInternedString(InternStringLog&& log)
REQUIRES(Locks::intern_table_lock_)
REQUIRES(!log_lock_);
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index 43151dd425..363fde220d 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -19,33 +19,11 @@
#include "verify_object.h"
-#include "gc/heap.h"
#include "mirror/object-inl.h"
#include "obj_ptr-inl.h"
namespace art {
-inline void VerifyObject(ObjPtr<mirror::Object> obj) {
- if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
- if (kVerifyObjectSupport > kVerifyObjectModeFast) {
- // Slow object verification, try the heap right away.
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
- } else {
- // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
- // print the diagnostic message.
- bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
- if (!failed) {
- mirror::Class* c = obj->GetClass<kVerifyNone>();
- failed = failed || !IsAligned<kObjectAlignment>(c);
- failed = failed || !VerifyClassClass(c);
- }
- if (UNLIKELY(failed)) {
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
- }
- }
- }
-}
-
inline bool VerifyClassClass(ObjPtr<mirror::Class> c) {
if (UNLIKELY(c == nullptr)) {
return false;
diff --git a/runtime/verify_object.cc b/runtime/verify_object.cc
new file mode 100644
index 0000000000..a031a07a94
--- /dev/null
+++ b/runtime/verify_object.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "verify_object-inl.h"
+
+#include "base/bit_utils.h"
+#include "gc/heap.h"
+#include "globals.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) {
+ if (kVerifyObjectSupport > kVerifyObjectModeFast) {
+ // Slow object verification, try the heap right away.
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ } else {
+ // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
+ // print the diagnostic message.
+ bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
+ if (!failed) {
+ mirror::Class* c = obj->GetClass<kVerifyNone>();
+ failed = failed || !IsAligned<kObjectAlignment>(c);
+ failed = failed || !VerifyClassClass(c);
+ }
+ if (UNLIKELY(failed)) {
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ }
+ }
+}
+
+} // namespace art
diff --git a/runtime/verify_object.h b/runtime/verify_object.h
index 384e56f7f4..519f7f5f5a 100644
--- a/runtime/verify_object.h
+++ b/runtime/verify_object.h
@@ -53,7 +53,16 @@ static constexpr VerifyObjectFlags kDefaultVerifyFlags = kVerifyNone;
static constexpr VerifyObjectMode kVerifyObjectSupport =
kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled;
-ALWAYS_INLINE void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+// Implements the actual object checks.
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+
+// Is a front to optimize out any calls if no verification is enabled.
+ALWAYS_INLINE
+static inline void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS {
+ if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
+ VerifyObjectImpl(obj);
+ }
+}
// Check that c.getClass() == c.getClass().getClass().
ALWAYS_INLINE bool VerifyClassClass(ObjPtr<mirror::Class> c) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index bf0cbe66c1..dd77423870 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -283,9 +283,6 @@ public class Main {
return "non-boot-image-string";
}
- /// CHECK-START: java.lang.Class Main.$noinline$getStringClass() sharpening (before)
- /// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:java.lang.String
-
/// CHECK-START-X86: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
@@ -323,9 +320,6 @@ public class Main {
return String.class;
}
- /// CHECK-START: java.lang.Class Main.$noinline$getOtherClass() sharpening (before)
- /// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:Other
-
/// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index b035896166..c0aedc199f 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -15,6 +15,9 @@
*/
#include "jni.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache-inl.h"
#include "object_lock.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/test/636-wrong-static-access/expected.txt b/test/636-wrong-static-access/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/636-wrong-static-access/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/636-wrong-static-access/info.txt b/test/636-wrong-static-access/info.txt
new file mode 100644
index 0000000000..184d858cb9
--- /dev/null
+++ b/test/636-wrong-static-access/info.txt
@@ -0,0 +1,2 @@
+Test that the compiler checks if a resolved field is
+of the expected static/instance kind.
diff --git a/test/636-wrong-static-access/run b/test/636-wrong-static-access/run
new file mode 100755
index 0000000000..5e999209b8
--- /dev/null
+++ b/test/636-wrong-static-access/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make verification soft fail, to ensure the verifier does not flag
+# the method we want to compile as "non-compilable" because it sees
+# the method will throw IncompatibleClassChangeError.
+exec ${RUN} $@ --verify-soft-fail
diff --git a/test/636-wrong-static-access/src-ex/Foo.java b/test/636-wrong-static-access/src-ex/Foo.java
new file mode 100644
index 0000000000..9e3b7a74c8
--- /dev/null
+++ b/test/636-wrong-static-access/src-ex/Foo.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Foo {
+ public static void doTest() {
+ // Execute foo once to make sure the dex cache will be updated.
+ try {
+ foo();
+ throw new Error("Expected IncompatibleClassChangeError");
+ } catch (IncompatibleClassChangeError e) {
+ // Expected.
+ }
+ Main.ensureJitCompiled(Foo.class, "foo");
+ try {
+ foo();
+ throw new Error("Expected IncompatibleClassChangeError");
+ } catch (IncompatibleClassChangeError e) {
+ // Expected.
+ }
+ }
+
+ public static void foo() {
+ System.out.println(Holder.field);
+ }
+}
diff --git a/test/636-wrong-static-access/src/Holder.java b/test/636-wrong-static-access/src/Holder.java
new file mode 100644
index 0000000000..f3b1c5717c
--- /dev/null
+++ b/test/636-wrong-static-access/src/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+ public static int field = 42;
+}
diff --git a/test/636-wrong-static-access/src/Main.java b/test/636-wrong-static-access/src/Main.java
new file mode 100644
index 0000000000..bd8548e372
--- /dev/null
+++ b/test/636-wrong-static-access/src/Main.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+public class Main {
+ static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/636-wrong-static-access-ex.jar";
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
+ if (pathClassLoader == null) {
+ throw new AssertionError("Couldn't find path class loader class");
+ }
+ Constructor<?> constructor =
+ pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
+ ClassLoader loader = (ClassLoader) constructor.newInstance(
+ DEX_FILE, ClassLoader.getSystemClassLoader());
+ Class<?> foo = loader.loadClass("Foo");
+ Method doTest = foo.getDeclaredMethod("doTest");
+ doTest.invoke(null);
+ }
+
+ public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/636-wrong-static-access/src2/Holder.java b/test/636-wrong-static-access/src2/Holder.java
new file mode 100644
index 0000000000..a26da24319
--- /dev/null
+++ b/test/636-wrong-static-access/src2/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+ public int field = 42;
+}
diff --git a/test/911-get-stack-trace/src/PrintThread.java b/test/911-get-stack-trace/src/PrintThread.java
index 97815ccad9..136fd80d40 100644
--- a/test/911-get-stack-trace/src/PrintThread.java
+++ b/test/911-get-stack-trace/src/PrintThread.java
@@ -44,6 +44,9 @@ public class PrintThread {
if (name.contains("Daemon")) {
// Do not print daemon stacks, as they're non-deterministic.
stackSerialization = "<not printed>";
+ } else if (name.startsWith("Jit thread pool worker")) {
+ // Skip JIT thread pool. It may or may not be there depending on configuration.
+ continue;
} else {
StringBuilder sb = new StringBuilder();
for (String[] stackElement : (String[][])stackInfo[1]) {
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
index 29c4aa330c..f18d70e8e1 100644
--- a/test/924-threads/src/Main.java
+++ b/test/924-threads/src/Main.java
@@ -20,6 +20,7 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.concurrent.CountDownLatch;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -162,8 +163,20 @@ public class Main {
private static void doAllThreadsTests() {
Thread[] threads = getAllThreads();
- Arrays.sort(threads, THREAD_COMP);
- System.out.println(Arrays.toString(threads));
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+ System.out.println(threadList);
}
private static void doTLSTests() throws Exception {
diff --git a/test/925-threadgroups/src/Main.java b/test/925-threadgroups/src/Main.java
index 3d7a4ca740..bf7441f9bf 100644
--- a/test/925-threadgroups/src/Main.java
+++ b/test/925-threadgroups/src/Main.java
@@ -14,8 +14,12 @@
* limitations under the License.
*/
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
public class Main {
public static void main(String[] args) throws Exception {
@@ -64,10 +68,23 @@ public class Main {
Thread[] threads = (Thread[])data[0];
ThreadGroup[] groups = (ThreadGroup[])data[1];
- Arrays.sort(threads, THREAD_COMP);
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+
Arrays.sort(groups, THREADGROUP_COMP);
System.out.println(tg.getName() + ":");
- System.out.println(" " + Arrays.toString(threads));
+ System.out.println(" " + threadList);
System.out.println(" " + Arrays.toString(groups));
if (tg.getParent() != null) {
diff --git a/tools/cpp-define-generator/constant_jit.def b/tools/cpp-define-generator/constant_jit.def
index 82cdbb20f1..5fa5194d00 100644
--- a/tools/cpp-define-generator/constant_jit.def
+++ b/tools/cpp-define-generator/constant_jit.def
@@ -25,6 +25,5 @@
DEFINE_JIT_CONSTANT(CHECK_OSR, int16_t, art::jit::kJitCheckForOSR)
DEFINE_JIT_CONSTANT(HOTNESS_DISABLE, int16_t, art::jit::kJitHotnessDisabled)
-DEFINE_JIT_CONSTANT(CHECK_OSR_THRESHOLD, int16_t, art::jit::Jit::kJitRecheckOSRThreshold)
#undef DEFINE_JIT_CONSTANT