summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk15
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc4
-rw-r--r--compiler/driver/compiler_driver.cc91
-rw-r--r--compiler/driver/compiler_driver.h52
-rw-r--r--compiler/driver/compiler_options_map.def2
-rw-r--r--compiler/driver/dex_compilation_unit.cc38
-rw-r--r--compiler/driver/dex_compilation_unit.h45
-rw-r--r--compiler/optimizing/builder.cc12
-rw-r--r--compiler/optimizing/builder.h3
-rw-r--r--compiler/optimizing/code_generator.h1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc43
-rw-r--r--compiler/optimizing/code_generator_arm64.h4
-rw-r--r--compiler/optimizing/inliner.cc5
-rw-r--r--compiler/optimizing/instruction_builder.cc183
-rw-r--r--compiler/optimizing/instruction_builder.h27
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc5
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc4
-rw-r--r--compiler/optimizing/intrinsics.h1
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc34
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc1
-rw-r--r--compiler/optimizing/intrinsics_mips.cc2
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc1
-rw-r--r--compiler/optimizing/intrinsics_x86.cc1
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc1
-rw-r--r--compiler/optimizing/nodes.h2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc40
-rw-r--r--dex2oat/dex2oat_test.cc415
-rw-r--r--dex2oat/linker/image_test.cc18
-rw-r--r--dex2oat/linker/image_test.h24
-rw-r--r--dex2oat/linker/image_writer.cc10
-rw-r--r--libartbase/arch/instruction_set.cc41
-rw-r--r--libartbase/arch/instruction_set.h48
-rw-r--r--libdexfile/dex/dex_file-inl.h8
-rw-r--r--libdexfile/dex/dex_file.h2
-rw-r--r--runtime/arch/stub_test.cc3
-rw-r--r--runtime/art_method-inl.h1
-rw-r--r--runtime/base/mutex-inl.h2
-rw-r--r--runtime/base/mutex.cc22
-rw-r--r--runtime/class_linker.cc55
-rw-r--r--runtime/class_loader_context.cc578
-rw-r--r--runtime/class_loader_context.h60
-rw-r--r--runtime/class_loader_context_test.cc137
-rw-r--r--runtime/gc/collector/concurrent_copying.cc8
-rw-r--r--runtime/gc/space/image_space.cc2
-rw-r--r--runtime/handle.h5
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/intern_table-inl.h69
-rw-r--r--runtime/intern_table.cc50
-rw-r--r--runtime/intern_table.h54
-rw-r--r--runtime/intern_table_test.cc4
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc1
-rw-r--r--runtime/intrinsics_list.h1
-rw-r--r--runtime/mirror/class.cc9
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc5
-rw-r--r--runtime/native/sun_misc_Unsafe.cc31
-rw-r--r--runtime/oat_file.cc7
-rw-r--r--runtime/runtime.cc13
-rw-r--r--runtime/runtime.h3
-rw-r--r--runtime/thread.cc145
-rw-r--r--runtime/thread.h7
-rw-r--r--runtime/thread_list.cc3
-rw-r--r--runtime/well_known_classes.cc2
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--test/004-ThreadStress/src-art/Main.java29
-rw-r--r--test/050-sync-test/src/Main.java5
-rw-r--r--test/160-read-barrier-stress/src/Main.java17
-rw-r--r--test/527-checker-array-access-split/src/Main.java69
-rw-r--r--test/580-crc32/expected.txt0
-rw-r--r--test/580-crc32/info.txt1
-rw-r--r--test/580-crc32/src/Main.java131
-rw-r--r--test/913-heaps/expected.txt52
-rw-r--r--test/Android.run-test.mk1
-rw-r--r--test/VerifySoftFailDuringClinit/ClassToInitialize.smali22
-rw-r--r--test/VerifySoftFailDuringClinit/VerifySoftFail.smali27
-rwxr-xr-xtest/etc/run-test-jar2
-rw-r--r--test/knownfailures.json6
-rw-r--r--tools/timeout_dumper/Android.bp39
-rw-r--r--tools/timeout_dumper/timeout_dumper.cc590
78 files changed, 2574 insertions, 880 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e2a0a391ab..e610bb11b7 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -163,6 +163,16 @@ $(ART_TEST_HOST_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti
$(ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali
$(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
+ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifySoftFailDuringClinit/*.smali))
+ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifySoftFailDuringClinit,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
+ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifySoftFailDuringClinit,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
+
+$(ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX): $(ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC) $(HOST_OUT_EXECUTABLES)/smali
+ $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
+
+$(ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX): $(ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC) $(HOST_OUT_EXECUTABLES)/smali
+ $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
+
# Dex file dependencies for each gtest.
ART_GTEST_art_dex_file_loader_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex
ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary MyClassNatives Nested VerifierDeps VerifierDepsMulti
@@ -180,7 +190,7 @@ ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_D
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi
ART_GTEST_hidden_api_test_DEX_DEPS := HiddenApiSignatures
-ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods
+ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods VerifySoftFailDuringClinit
ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
@@ -752,5 +762,8 @@ ART_TEST_TARGET_GTEST_EmptyUncompressed_DEX :=
ART_TEST_GTEST_VerifierDeps_SRC :=
ART_TEST_HOST_GTEST_VerifierDeps_DEX :=
ART_TEST_TARGET_GTEST_VerifierDeps_DEX :=
+ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC :=
+ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX :=
+ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX :=
GTEST_DEX_DIRECTORIES :=
LOCAL_PATH :=
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ad9a30f8d4..fe99eaa51a 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -376,9 +376,7 @@ void DexToDexCompiler::CompilationState::CompileReturnVoid(Instruction* inst, ui
DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID);
if (unit_.IsConstructor()) {
// Are we compiling a non clinit constructor which needs a barrier ?
- if (!unit_.IsStatic() &&
- driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(),
- unit_.GetClassDefIndex())) {
+ if (!unit_.IsStatic() && unit_.RequiresConstructorBarrier()) {
return;
}
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index df6e8a83e1..8c276bb706 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -254,7 +254,6 @@ CompilerDriver::CompilerDriver(
verification_results_(verification_results),
compiler_(Compiler::Create(this, compiler_kind)),
compiler_kind_(compiler_kind),
- requires_constructor_barrier_lock_("constructor barrier lock"),
image_classes_(std::move(image_classes)),
number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
@@ -1580,18 +1579,6 @@ static void CheckAndClearResolveException(Thread* self)
self->ClearException();
}
-bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file,
- uint16_t class_def_idx) const {
- ClassAccessor accessor(dex_file, class_def_idx);
- // We require a constructor barrier if there are final instance fields.
- for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
- if (field.IsFinal()) {
- return true;
- }
- }
- return false;
-}
-
class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
public:
explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
@@ -1635,57 +1622,42 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
// We want to resolve the methods and fields eagerly.
resolve_fields_and_methods = true;
}
- // If an instance field is final then we need to have a barrier on the return, static final
- // fields are assigned within the lock held for class initialization.
- bool requires_constructor_barrier = false;
- ClassAccessor accessor(dex_file, class_def_index);
- // Optionally resolve fields and methods and figure out if we need a constructor barrier.
- auto method_visitor = [&](const ClassAccessor::Method& method)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (resolve_fields_and_methods) {
+ if (resolve_fields_and_methods) {
+ ClassAccessor accessor(dex_file, class_def_index);
+ // Optionally resolve fields and methods and figure out if we need a constructor barrier.
+ auto method_visitor = [&](const ClassAccessor::Method& method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* resolved = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
method.GetIndex(),
dex_cache,
class_loader,
- /* referrer */ nullptr,
+ /*referrer=*/ nullptr,
method.GetInvokeType(class_def.access_flags_));
if (resolved == nullptr) {
CheckAndClearResolveException(soa.Self());
}
- }
- };
- accessor.VisitFieldsAndMethods(
- // static fields
- [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (resolve_fields_and_methods) {
+ };
+ accessor.VisitFieldsAndMethods(
+ // static fields
+ [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* resolved = class_linker->ResolveField(
- field.GetIndex(), dex_cache, class_loader, /* is_static */ true);
+ field.GetIndex(), dex_cache, class_loader, /*is_static=*/ true);
if (resolved == nullptr) {
CheckAndClearResolveException(soa.Self());
}
- }
- },
- // instance fields
- [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (field.IsFinal()) {
- // We require a constructor barrier if there are final instance fields.
- requires_constructor_barrier = true;
- }
- if (resolve_fields_and_methods) {
+ },
+ // instance fields
+ [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* resolved = class_linker->ResolveField(
- field.GetIndex(), dex_cache, class_loader, /* is_static */ false);
+ field.GetIndex(), dex_cache, class_loader, /*is_static=*/ false);
if (resolved == nullptr) {
CheckAndClearResolveException(soa.Self());
}
- }
- },
- /*direct methods*/ method_visitor,
- /*virtual methods*/ method_visitor);
- manager_->GetCompiler()->SetRequiresConstructorBarrier(self,
- &dex_file,
- class_def_index,
- requires_constructor_barrier);
+ },
+ /*direct_method_visitor=*/ method_visitor,
+ /*virtual_method_visitor=*/ method_visitor);
+ }
}
private:
@@ -2832,31 +2804,6 @@ bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
return is_system_class;
}
-void CompilerDriver::SetRequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index,
- bool requires) {
- WriterMutexLock mu(self, requires_constructor_barrier_lock_);
- requires_constructor_barrier_.emplace(ClassReference(dex_file, class_def_index), requires);
-}
-
-bool CompilerDriver::RequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index) {
- ClassReference class_ref(dex_file, class_def_index);
- {
- ReaderMutexLock mu(self, requires_constructor_barrier_lock_);
- auto it = requires_constructor_barrier_.find(class_ref);
- if (it != requires_constructor_barrier_.end()) {
- return it->second;
- }
- }
- WriterMutexLock mu(self, requires_constructor_barrier_lock_);
- const bool requires = RequiresConstructorBarrier(*dex_file, class_def_index);
- requires_constructor_barrier_.emplace(class_ref, requires);
- return requires;
-}
-
std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
std::ostringstream oss;
const gc::Heap* const heap = Runtime::Current()->GetHeap();
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 9a83e55c96..f42e555410 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -151,50 +151,6 @@ class CompilerDriver {
void AddCompiledMethod(const MethodReference& method_ref, CompiledMethod* const compiled_method);
CompiledMethod* RemoveCompiledMethod(const MethodReference& method_ref);
- void SetRequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index,
- bool requires)
- REQUIRES(!requires_constructor_barrier_lock_);
-
- // Do the <init> methods for this class require a constructor barrier (prior to the return)?
- // The answer is "yes", if and only if this class has any instance final fields.
- // (This must not be called for any non-<init> methods; the answer would be "no").
- //
- // ---
- //
- // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end
- // of the invoked constructor. The constructor barrier is a conservative implementation means of
- // enforcing the freezes happen-before the object being constructed is observable by another
- // thread.
- //
- // Note: This question only makes sense for instance constructors;
- // static constructors (despite possibly having finals) never need
- // a barrier.
- //
- // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes
- // class initialization as:
- //
- // lock(class.lock)
- // class.state = initializing
- // unlock(class.lock)
- //
- // invoke <clinit>
- //
- // lock(class.lock)
- // class.state = initialized
- // unlock(class.lock) <-- acts as a release
- //
- // The last operation in the above example acts as an atomic release
- // for any stores in <clinit>, which ends up being stricter
- // than what a constructor barrier needs.
- //
- // See also QuasiAtomic::ThreadFenceForConstructor().
- bool RequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index)
- REQUIRES(!requires_constructor_barrier_lock_);
-
// Resolve compiling method's class. Returns null on failure.
ObjPtr<mirror::Class> ResolveCompilingMethodsClass(const ScopedObjectAccess& soa,
Handle<mirror::DexCache> dex_cache,
@@ -407,20 +363,12 @@ class CompilerDriver {
void FreeThreadPools();
void CheckThreadPools();
- bool RequiresConstructorBarrier(const DexFile& dex_file, uint16_t class_def_idx) const;
-
const CompilerOptions* const compiler_options_;
VerificationResults* const verification_results_;
std::unique_ptr<Compiler> compiler_;
Compiler::Kind compiler_kind_;
- // All class references that require constructor barriers. If the class reference is not in the
- // set then the result has not yet been computed.
- mutable ReaderWriterMutex requires_constructor_barrier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::map<ClassReference, bool> requires_constructor_barrier_
- GUARDED_BY(requires_constructor_barrier_lock_);
-
// All class references that this compiler has compiled. Indexed by class defs.
using ClassStateTable = AtomicDexRefMap<ClassReference, ClassStatus>;
ClassStateTable compiled_classes_;
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 1ec34ec73a..a593240365 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -52,7 +52,7 @@ COMPILER_OPTIONS_KEY (Unit, Baseline)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
-COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, kIsDebugBuild)
+COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, false)
COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
COMPILER_OPTIONS_KEY (std::string, DumpCFG)
COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index c90c37d54a..e5a6f0e177 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -16,10 +16,14 @@
#include "dex_compilation_unit.h"
+#include "art_field.h"
#include "base/utils.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/descriptors_names.h"
+#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
+#include "scoped_thread_state_change-inl.h"
namespace art {
@@ -31,7 +35,8 @@ DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
uint32_t method_idx,
uint32_t access_flags,
const VerifiedMethod* verified_method,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::Class> compiling_class)
: class_loader_(class_loader),
class_linker_(class_linker),
dex_file_(&dex_file),
@@ -41,7 +46,8 @@ DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
access_flags_(access_flags),
verified_method_(verified_method),
dex_cache_(dex_cache),
- code_item_accessor_(dex_file, code_item) {}
+ code_item_accessor_(dex_file, code_item),
+ compiling_class_(compiling_class) {}
const std::string& DexCompilationUnit::GetSymbol() {
if (symbol_.empty()) {
@@ -51,4 +57,32 @@ const std::string& DexCompilationUnit::GetSymbol() {
return symbol_;
}
+bool DexCompilationUnit::RequiresConstructorBarrier() const {
+ // Constructor barriers are applicable only for <init> methods.
+ DCHECK(!IsStatic());
+ DCHECK(IsConstructor());
+
+ // We require a constructor barrier if there are final instance fields.
+ if (GetCompilingClass().GetReference() != nullptr && !GetCompilingClass().IsNull()) {
+ // Decoding class data can be slow, so iterate over fields of the compiling class if resolved.
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> compiling_class = GetCompilingClass().Get();
+ for (size_t i = 0, size = compiling_class->NumInstanceFields(); i != size; ++i) {
+ ArtField* field = compiling_class->GetInstanceField(i);
+ if (field->IsFinal()) {
+ return true;
+ }
+ }
+ } else {
+ // Iterate over field definitions in the class data.
+ ClassAccessor accessor(*GetDexFile(), GetClassDefIndex());
+ for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
+ if (field.IsFinal()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace art
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index c1ae3c938b..757f0e7695 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -27,6 +27,7 @@
namespace art {
namespace mirror {
+class Class;
class ClassLoader;
class DexCache;
} // namespace mirror
@@ -43,7 +44,8 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
uint32_t method_idx,
uint32_t access_flags,
const VerifiedMethod* verified_method,
- Handle<mirror::DexCache> dex_cache);
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::Class> compiling_class = Handle<mirror::Class>());
Handle<mirror::ClassLoader> GetClassLoader() const {
return class_loader_;
@@ -117,6 +119,45 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
return code_item_accessor_;
}
+ Handle<mirror::Class> GetCompilingClass() const {
+ return compiling_class_;
+ }
+
+ // Does this <init> method require a constructor barrier (prior to the return)?
+ // The answer is "yes", if and only if the class has any instance final fields.
+ // (This must not be called for any non-<init> methods; the answer would be "no").
+ //
+ // ---
+ //
+ // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end
+ // of the invoked constructor. The constructor barrier is a conservative implementation means of
+ // enforcing the freezes happen-before the object being constructed is observable by another
+ // thread.
+ //
+ // Note: This question only makes sense for instance constructors;
+ // static constructors (despite possibly having finals) never need
+ // a barrier.
+ //
+ // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes
+ // class initialization as:
+ //
+ // lock(class.lock)
+ // class.state = initializing
+ // unlock(class.lock)
+ //
+ // invoke <clinit>
+ //
+ // lock(class.lock)
+ // class.state = initialized
+ // unlock(class.lock) <-- acts as a release
+ //
+ // The last operation in the above example acts as an atomic release
+ // for any stores in <clinit>, which ends up being stricter
+ // than what a constructor barrier needs.
+ //
+ // See also QuasiAtomic::ThreadFenceForConstructor().
+ bool RequiresConstructorBarrier() const;
+
private:
const Handle<mirror::ClassLoader> class_loader_;
@@ -134,6 +175,8 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const CodeItemDataAccessor code_item_accessor_;
+ Handle<mirror::Class> compiling_class_;
+
std::string symbol_;
};
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a1a5692ef6..64aa1b9358 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -21,6 +21,7 @@
#include "base/bit_vector-inl.h"
#include "base/logging.h"
#include "block_builder.h"
+#include "code_generator.h"
#include "data_type-inl.h"
#include "dex/verified_method.h"
#include "driver/compiler_options.h"
@@ -40,7 +41,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
const CodeItemDebugInfoAccessor& accessor,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
ArrayRef<const uint8_t> interpreter_metadata,
@@ -50,7 +50,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
code_item_accessor_(accessor),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
- compiler_driver_(driver),
code_generator_(code_generator),
compilation_stats_(compiler_stats),
interpreter_metadata_(interpreter_metadata),
@@ -67,19 +66,18 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
code_item_accessor_(accessor),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(nullptr),
- compiler_driver_(nullptr),
code_generator_(nullptr),
compilation_stats_(nullptr),
handles_(handles),
return_type_(return_type) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
- if (compiler_driver_ == nullptr) {
- // Note that the compiler driver is null when unit testing.
+ if (code_generator_ == nullptr) {
+ // Note that the codegen is null when unit testing.
return false;
}
- const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
+ const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions();
CompilerFilter::Filter compiler_filter = compiler_options.GetCompilerFilter();
if (compiler_filter == CompilerFilter::kEverything) {
return false;
@@ -131,7 +129,6 @@ GraphAnalysisResult HGraphBuilder::BuildGraph() {
return_type_,
dex_compilation_unit_,
outer_compilation_unit_,
- compiler_driver_,
code_generator_,
interpreter_metadata_,
compilation_stats_,
@@ -203,7 +200,6 @@ void HGraphBuilder::BuildIntrinsicGraph(ArtMethod* method) {
return_type_,
dex_compilation_unit_,
outer_compilation_unit_,
- compiler_driver_,
code_generator_,
interpreter_metadata_,
compilation_stats_,
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 5a1914ce08..6152740324 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -22,7 +22,6 @@
#include "dex/code_item_accessors.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file.h"
-#include "driver/compiler_driver.h"
#include "nodes.h"
namespace art {
@@ -38,7 +37,6 @@ class HGraphBuilder : public ValueObject {
const CodeItemDebugInfoAccessor& accessor,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
ArrayRef<const uint8_t> interpreter_metadata,
@@ -70,7 +68,6 @@ class HGraphBuilder : public ValueObject {
// The compilation unit of the enclosing method being compiled.
const DexCompilationUnit* const outer_compilation_unit_;
- CompilerDriver* const compiler_driver_;
CodeGenerator* const code_generator_;
OptimizingCompilerStats* const compilation_stats_;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 3f560780ce..39966ff8ea 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -59,7 +59,6 @@ static constexpr ReadBarrierOption kCompilerReadBarrierOption =
class Assembler;
class CodeGenerator;
-class CompilerDriver;
class CompilerOptions;
class StackMapStream;
class ParallelMoveResolver;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a9acf90762..48b50ea5d6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2319,9 +2319,10 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
if (offset >= kReferenceLoadMinFarOffset) {
locations->AddTemp(FixedTempLocation());
}
- } else {
+ } else if (!instruction->GetArray()->IsIntermediateAddress()) {
// We need a non-scratch temporary for the array data pointer in
- // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier().
+ // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier() for the case with no
+ // intermediate address.
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -2351,11 +2352,12 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
- // The read barrier instrumentation of object ArrayGet instructions
+ // The non-Baker read barrier instrumentation of object ArrayGet instructions
// does not support the HIntermediateAddress instruction.
DCHECK(!((type == DataType::Type::kReference) &&
instruction->GetArray()->IsIntermediateAddress() &&
- kEmitCompilerReadBarrier));
+ kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier));
if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object ArrayGet with Baker's read barrier case.
@@ -2363,6 +2365,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
if (index.IsConstant()) {
+ DCHECK(!instruction->GetArray()->IsIntermediateAddress());
// Array load with a constant index can be treated as a field load.
offset += Int64FromLocation(index) << DataType::SizeShift(type);
Location maybe_temp =
@@ -2375,9 +2378,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
/* needs_null_check */ false,
/* use_load_acquire */ false);
} else {
- Register temp = WRegisterFrom(locations->GetTemp(0));
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- out, obj.W(), offset, index, temp, /* needs_null_check */ false);
+ instruction, out, obj.W(), offset, index, /* needs_null_check */ false);
}
} else {
// General case.
@@ -2426,8 +2428,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
- HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
- DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
}
temp = obj;
} else {
@@ -2539,8 +2541,8 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
- HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
- DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
}
temp = array;
} else {
@@ -5957,11 +5959,11 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
instruction, ref, obj, src, needs_null_check, use_load_acquire);
}
-void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
+void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+ Location ref,
Register obj,
uint32_t data_offset,
Location index,
- Register temp,
bool needs_null_check) {
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
@@ -6000,9 +6002,24 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
DCHECK(temps.IsAvailable(ip0));
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
+
+ Register temp;
+ if (instruction->GetArray()->IsIntermediateAddress()) {
+ // We do not need to compute the intermediate address from the array: the
+ // input instruction has done it already. See the comment in
+ // `TryExtractArrayAccessAddress()`.
+ if (kIsDebugBuild) {
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+ }
+ temp = obj;
+ } else {
+ temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
+ __ Add(temp.X(), obj.X(), Operand(data_offset));
+ }
+
uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
- __ Add(temp.X(), obj.X(), Operand(data_offset));
{
ExactAssemblyScope guard(GetVIXLAssembler(),
(kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 1ba58b1a9f..ada5742fc0 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -694,11 +694,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
bool use_load_acquire);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference array load when Baker's read barriers are used.
- void GenerateArrayLoadWithBakerReadBarrier(Location ref,
+ void GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+ Location ref,
vixl::aarch64::Register obj,
uint32_t data_offset,
Location index,
- vixl::aarch64::Register temp,
bool needs_null_check);
// Emit code checking the status of the Marking Register, and
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index dd781c288f..f56f9cb84f 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1757,6 +1757,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
caller_compilation_unit_.GetClassLoader(),
handles_);
+ Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
class_linker,
@@ -1766,7 +1767,8 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
method_index,
resolved_method->GetAccessFlags(),
/* verified_method */ nullptr,
- dex_cache);
+ dex_cache,
+ compiling_class);
InvokeType invoke_type = invoke_instruction->GetInvokeType();
if (invoke_type == kInterface) {
@@ -1805,7 +1807,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
code_item_accessor,
&dex_compilation_unit,
&outer_compilation_unit_,
- compiler_driver_,
codegen_,
inline_stats_,
resolved_method->GetQuickenedInfo(),
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 63b2705b43..e9b5b5a93d 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -20,11 +20,11 @@
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "block_builder.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
+#include "code_generator.h"
#include "data_type-inl.h"
#include "dex/bytecode_utils.h"
#include "dex/dex_instruction-inl.h"
-#include "driver/compiler_driver-inl.h"
#include "driver/dex_compilation_unit.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
@@ -47,7 +47,6 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
@@ -61,7 +60,6 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- compiler_driver_(compiler_driver),
code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
@@ -73,7 +71,8 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
current_locals_(nullptr),
latest_result_(nullptr),
current_this_parameter_(nullptr),
- loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
+ loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ class_cache_(std::less<dex::TypeIndex>(), local_allocator->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -319,8 +318,8 @@ bool HInstructionBuilder::Build() {
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
// at start of java statement) rather than before every dex instruction.
- const bool native_debuggable = compiler_driver_ != nullptr &&
- compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
+ const bool native_debuggable = code_generator_ != nullptr &&
+ code_generator_->GetCompilerOptions().GetNativeDebuggable();
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
native_debug_info_locations = FindNativeDebugInfoLocations();
@@ -709,20 +708,18 @@ void HInstructionBuilder::Binop_22b(const Instruction& instruction, bool reverse
// Does the method being compiled need any constructor barriers being inserted?
// (Always 'false' for methods that aren't <init>.)
-static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, CompilerDriver* driver) {
+static bool RequiresConstructorBarrier(const DexCompilationUnit* cu) {
// Can be null in unit tests only.
if (UNLIKELY(cu == nullptr)) {
return false;
}
- Thread* self = Thread::Current();
- return cu->IsConstructor()
- && !cu->IsStatic()
- // RequiresConstructorBarrier must only be queried for <init> methods;
- // it's effectively "false" for every other method.
- //
- // See CompilerDriver::RequiresConstructBarrier for more explanation.
- && driver->RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
+ // Constructor barriers are applicable only for <init> methods.
+ if (LIKELY(!cu->IsConstructor() || cu->IsStatic())) {
+ return false;
+ }
+
+ return cu->RequiresConstructorBarrier();
}
// Returns true if `block` has only one successor which starts at the next
@@ -768,7 +765,7 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction,
// Only <init> (which is a return-void) could possibly have a constructor fence.
// This may insert additional redundant constructor fences from the super constructors.
// TODO: remove redundant constructor fences (b/36656456).
- if (RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_)) {
+ if (RequiresConstructorBarrier(dex_compilation_unit_)) {
// Compiling instance constructor.
DCHECK_STREQ("<init>", graph_->GetMethodName());
@@ -782,7 +779,7 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction,
}
AppendInstruction(new (allocator_) HReturnVoid(dex_pc));
} else {
- DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_));
+ DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_));
HInstruction* value = LoadLocal(instruction.VRegA(), type);
AppendInstruction(new (allocator_) HReturn(value, dex_pc));
}
@@ -849,7 +846,7 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
// make this an invoke-unresolved to handle cross-dex invokes or abstract super methods, both of
// which require runtime handling.
if (invoke_type == kSuper) {
- ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+ ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
if (compiling_class == nullptr) {
// We could not determine the method's class we need to wait until runtime.
DCHECK(Runtime::Current()->IsAotCompiler());
@@ -879,8 +876,8 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
// The back-end code generator relies on this check in order to ensure that it will not
// attempt to read the dex_cache with a dex_method_index that is not from the correct
// dex_file. If we didn't do this check then the dex_method_index will not be updated in the
- // builder, which means that the code-generator (and compiler driver during sharpening and
- // inliner, maybe) might invoke an incorrect method.
+ // builder, which means that the code-generator (and sharpening and inliner, maybe)
+ // might invoke an incorrect method.
// TODO: The actual method could still be referenced in the current dex file, so we
// could try locating it.
// TODO: Remove the dex_file restriction.
@@ -969,7 +966,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
if (invoke_type == kStatic) {
clinit_check =
- ProcessClinitCheckForInvoke(soa, dex_pc, resolved_method, &clinit_check_requirement);
+ ProcessClinitCheckForInvoke(dex_pc, resolved_method, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
@@ -1055,7 +1052,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u
HInstruction* cls = load_class;
Handle<mirror::Class> klass = load_class->GetClass();
- if (!IsInitialized(soa, klass)) {
+ if (!IsInitialized(klass)) {
cls = new (allocator_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
@@ -1284,7 +1281,7 @@ static bool HasTrivialInitialization(ObjPtr<mirror::Class> cls,
return true;
}
-bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const {
+bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
if (cls == nullptr) {
return false;
}
@@ -1299,7 +1296,7 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
}
// Assume loaded only if klass is in the boot image. App classes cannot be assumed
// loaded because we don't even know what class loader will be used to load them.
- if (IsInBootImage(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
return true;
}
}
@@ -1312,29 +1309,20 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
// can be completely initialized while the superclass is initializing and the subclass
// remains initialized when the superclass initializer throws afterwards. b/62478025
// Note: The HClinitCheck+HInvokeStaticOrDirect merging can still apply.
- ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
- bool is_outer_static_or_constructor =
- (outer_compilation_unit_->GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u;
- if (is_outer_static_or_constructor && outermost_cls == cls.Get()) {
+ auto is_static_method_or_constructor_of_cls = [cls](const DexCompilationUnit& compilation_unit)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return (compilation_unit.GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u &&
+ compilation_unit.GetCompilingClass().Get() == cls.Get();
+ };
+ if (is_static_method_or_constructor_of_cls(*outer_compilation_unit_) ||
+ // Check also the innermost method. Though excessive copies of ClinitCheck can be
+ // eliminated by GVN, that happens only after the decision whether to inline the
+ // graph or not and that may depend on the presence of the ClinitCheck.
+ // TODO: We should walk over the entire inlined method chain, but we don't pass that
+ // information to the builder.
+ is_static_method_or_constructor_of_cls(*dex_compilation_unit_)) {
return true;
}
- // Remember if the compiled class is a subclass of `cls`. By the time this is used
- // below the `outermost_cls` may be invalidated by calling ResolveCompilingClass().
- bool is_subclass = IsSubClass(outermost_cls, cls.Get());
- if (dex_compilation_unit_ != outer_compilation_unit_) {
- // Check also the innermost method. Though excessive copies of ClinitCheck can be
- // eliminated by GVN, that happens only after the decision whether to inline the
- // graph or not and that may depend on the presence of the ClinitCheck.
- // TODO: We should walk over the entire inlined method chain, but we don't pass that
- // information to the builder.
- ObjPtr<mirror::Class> innermost_cls = ResolveCompilingClass(soa);
- bool is_inner_static_or_constructor =
- (dex_compilation_unit_->GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u;
- if (is_inner_static_or_constructor && innermost_cls == cls.Get()) {
- return true;
- }
- is_subclass = is_subclass || IsSubClass(innermost_cls, cls.Get());
- }
// Otherwise, we may be able to avoid the check if `cls` is a superclass of a method being
// compiled here (anywhere in the inlining chain) as the `cls` must have started initializing
@@ -1355,7 +1343,12 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
// TODO: We should walk over the entire inlined methods chain, but we don't pass that
// information to the builder. (We could also check if we're guaranteed a non-null instance
// of `cls` at this location but that's outside the scope of the instruction builder.)
- if (is_subclass && HasTrivialInitialization(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ bool is_subclass = IsSubClass(outer_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+ if (dex_compilation_unit_ != outer_compilation_unit_) {
+ is_subclass = is_subclass ||
+ IsSubClass(dex_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+ }
+ if (is_subclass && HasTrivialInitialization(cls.Get(), code_generator_->GetCompilerOptions())) {
return true;
}
@@ -1363,18 +1356,16 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
}
HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
- ScopedObjectAccess& soa,
uint32_t dex_pc,
ArtMethod* resolved_method,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
HClinitCheck* clinit_check = nullptr;
- if (IsInitialized(soa, klass)) {
+ if (IsInitialized(klass)) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
} else {
- HLoadClass* cls = BuildLoadClass(soa,
- klass->GetDexTypeIndex(),
+ HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
@@ -1610,43 +1601,6 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
return true;
}
-static ObjPtr<mirror::Class> ResolveClassFrom(ScopedObjectAccess& soa,
- CompilerDriver* driver,
- const DexCompilationUnit& compilation_unit)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
- Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
-
- return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveOutermostCompilingClass(
- ScopedObjectAccess& soa) const {
- return ResolveClassFrom(soa, compiler_driver_, *outer_compilation_unit_);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveCompilingClass(ScopedObjectAccess& soa) const {
- return ResolveClassFrom(soa, compiler_driver_, *dex_compilation_unit_);
-}
-
-bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
- soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
- Handle<mirror::Class> outer_class(hs.NewHandle(ResolveOutermostCompilingClass(soa)));
-
- // GetOutermostCompilingClass returns null when the class is unresolved
- // (e.g. if it derives from an unresolved class). This is bogus knowing that
- // we are compiling it.
- // When this happens we cannot establish a direct relation between the current
- // class and the outer class, so we return false.
- // (Note that this is only used for optimizing invokes and field accesses)
- return (cls != nullptr) && (outer_class.Get() == cls.Get());
-}
-
void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put,
@@ -1666,18 +1620,17 @@ void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& in
ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- Handle<mirror::Class> compiling_class(hs.NewHandle(ResolveCompilingClass(soa)));
ArtField* resolved_field = class_linker->ResolveField(field_idx,
dex_compilation_unit_->GetDexCache(),
class_loader,
is_static);
+ DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
if (UNLIKELY(resolved_field == nullptr)) {
- // Clean up any exception left by type resolution.
+ // Clean up any exception left by field resolution.
soa.Self()->ClearException();
return nullptr;
}
@@ -1689,6 +1642,7 @@ ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static,
}
// Check access.
+ Handle<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass();
if (compiling_class == nullptr) {
if (!resolved_field->IsPublic()) {
return nullptr;
@@ -1731,8 +1685,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index);
Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
- HLoadClass* constant = BuildLoadClass(soa,
- klass->GetDexTypeIndex(),
+ HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
@@ -1748,7 +1701,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
HInstruction* cls = constant;
- if (!IsInitialized(soa, klass)) {
+ if (!IsInitialized(klass)) {
cls = new (allocator_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
}
@@ -1989,12 +1942,11 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint3
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
- return BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+ bool needs_access_check = LoadClassNeedsAccessCheck(klass);
+ return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
}
-HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
- dex::TypeIndex type_index,
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -2011,11 +1963,8 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
}
// Note: `klass` must be from `handles_`.
- bool is_referrers_class = false;
- if (klass != nullptr) {
- ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
- is_referrers_class = (outermost_cls == klass.Get());
- }
+ bool is_referrers_class =
+ (klass != nullptr) && (outer_compilation_unit_->GetCompilingClass().Get() == klass.Get());
HLoadClass* load_class = new (allocator_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
@@ -2041,22 +1990,28 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
Handle<mirror::Class> HInstructionBuilder::ResolveClass(ScopedObjectAccess& soa,
dex::TypeIndex type_index) {
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- ObjPtr<mirror::Class> klass = compiler_driver_->ResolveClass(
- soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_);
- // TODO: Avoid creating excessive handles if the method references the same class repeatedly.
- // (Use a map on the local_allocator_.)
- return handles_->NewHandle(klass);
+ auto it = class_cache_.find(type_index);
+ if (it != class_cache_.end()) {
+ return it->second;
+ }
+
+ ObjPtr<mirror::Class> klass = dex_compilation_unit_->GetClassLinker()->ResolveType(
+ type_index, dex_compilation_unit_->GetDexCache(), dex_compilation_unit_->GetClassLoader());
+ DCHECK_EQ(klass == nullptr, soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException(); // Clean up the exception left by type resolution if any.
+
+ Handle<mirror::Class> h_klass = handles_->NewHandle(klass);
+ class_cache_.Put(type_index, h_klass);
+ return h_klass;
}
-bool HInstructionBuilder::LoadClassNeedsAccessCheck(ScopedObjectAccess& soa,
- Handle<mirror::Class> klass) {
+bool HInstructionBuilder::LoadClassNeedsAccessCheck(Handle<mirror::Class> klass) {
if (klass == nullptr) {
return true;
} else if (klass->IsPublic()) {
return false;
} else {
- ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+ ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
return compiling_class == nullptr || !compiling_class->CanAccess(klass.Get());
}
}
@@ -2085,7 +2040,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
+ bool needs_access_check = LoadClassNeedsAccessCheck(klass);
TypeCheckKind check_kind = HSharpening::ComputeTypeCheckKind(
klass.Get(), code_generator_, needs_access_check);
@@ -2103,7 +2058,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
bitstring_path_to_root = graph_->GetIntConstant(static_cast<int32_t>(path_to_root), dex_pc);
bitstring_mask = graph_->GetIntConstant(static_cast<int32_t>(mask), dex_pc);
} else {
- class_or_null = BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+ class_or_null = BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
}
DCHECK(class_or_null != nullptr);
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 2ab2139216..d701445946 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -34,7 +34,6 @@ class ArenaBitVector;
class ArtField;
class ArtMethod;
class CodeGenerator;
-class CompilerDriver;
class DexCompilationUnit;
class HBasicBlockBuilder;
class Instruction;
@@ -59,7 +58,6 @@ class HInstructionBuilder : public ValueObject {
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
@@ -222,8 +220,7 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadClass` loading the given `type_index`.
HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
- HLoadClass* BuildLoadClass(ScopedObjectAccess& soa,
- dex::TypeIndex type_index,
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -233,7 +230,7 @@ class HInstructionBuilder : public ValueObject {
Handle<mirror::Class> ResolveClass(ScopedObjectAccess& soa, dex::TypeIndex type_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool LoadClassNeedsAccessCheck(ScopedObjectAccess& soa, Handle<mirror::Class> klass)
+ bool LoadClassNeedsAccessCheck(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a `HLoadMethodHandle` loading the given `method_handle_index`.
@@ -242,17 +239,6 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadMethodType` loading the given `proto_index`.
void BuildLoadMethodType(dex::ProtoIndex proto_index, uint32_t dex_pc);
- // Returns the outer-most compiling method's class.
- ObjPtr<mirror::Class> ResolveOutermostCompilingClass(ScopedObjectAccess& soa) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Returns the class whose method is being compiled.
- ObjPtr<mirror::Class> ResolveCompilingClass(ScopedObjectAccess& soa) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Returns whether `type_index` points to the outer-most compiling method's class.
- bool IsOutermostCompilingClass(dex::TypeIndex type_index) const;
-
void PotentiallySimplifyFakeString(uint16_t original_dex_register,
uint32_t dex_pc,
HInvoke* invoke);
@@ -275,7 +261,6 @@ class HInstructionBuilder : public ValueObject {
void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
HClinitCheck* ProcessClinitCheckForInvoke(
- ScopedObjectAccess& soa,
uint32_t dex_pc,
ArtMethod* method,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
@@ -289,7 +274,7 @@ class HInstructionBuilder : public ValueObject {
void BuildConstructorFenceForAllocation(HInstruction* allocation);
// Return whether the compiler can assume `cls` is initialized.
- bool IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const
+ bool IsInitialized(Handle<mirror::Class> cls) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Try to resolve a method using the class linker. Return null if a method could
@@ -320,8 +305,6 @@ class HInstructionBuilder : public ValueObject {
HBasicBlockBuilder* const block_builder_;
SsaBuilder* const ssa_builder_;
- CompilerDriver* const compiler_driver_;
-
CodeGenerator* const code_generator_;
// The compilation unit of the current method being compiled. Note that
@@ -351,6 +334,10 @@ class HInstructionBuilder : public ValueObject {
ScopedArenaVector<HBasicBlock*> loop_headers_;
+ // Cached resolved types for the current compilation unit's DexFile.
+ // Handle<>s reference entries in the `handles_`.
+ ScopedArenaSafeMap<dex::TypeIndex, Handle<mirror::Class>> class_cache_;
+
static constexpr int kDefaultNumberOfLoops = 2;
DISALLOW_COPY_AND_ASSIGN(HInstructionBuilder);
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 24fbb6cb4c..f968c19cf9 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -202,6 +202,11 @@ void InstructionSimplifierArmVisitor::VisitArrayGet(HArrayGet* instruction) {
return;
}
+ // TODO: Support intermediate address for object arrays on arm.
+ if (type == DataType::Type::kReference) {
+ return;
+ }
+
if (type == DataType::Type::kInt64
|| type == DataType::Type::kFloat32
|| type == DataType::Type::kFloat64) {
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index ccdcb3532d..0f30f662cd 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -245,11 +245,11 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
return false;
}
if (kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier &&
access->IsArrayGet() &&
access->GetType() == DataType::Type::kReference) {
- // For object arrays, the read barrier instrumentation requires
+ // For object arrays, the non-Baker read barrier instrumentation requires
// the original array pointer.
- // TODO: This can be relaxed for Baker CC.
return false;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 8245453ab5..5bd1122698 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -24,7 +24,6 @@
namespace art {
-class CompilerDriver;
class DexFile;
// Positive floating-point infinities.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 7684dc79f2..6d04b0e9d9 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2916,6 +2916,40 @@ void IntrinsicLocationsBuilderARM64::VisitReachabilityFence(HInvoke* invoke) {
void IntrinsicCodeGeneratorARM64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+void IntrinsicLocationsBuilderARM64::VisitCRC32Update(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations = new (allocator_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.update(int crc, int b).
+void IntrinsicCodeGeneratorARM64::VisitCRC32Update(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ MacroAssembler* masm = GetVIXLAssembler();
+
+ Register crc = InputRegisterAt(invoke, 0);
+ Register val = InputRegisterAt(invoke, 1);
+ Register out = OutputRegister(invoke);
+
+ // The general algorithm of the CRC32 calculation is:
+ // crc = ~crc
+ // result = crc32_for_byte(crc, b)
+ // crc = ~result
+ // It is directly lowered to three instructions.
+ __ Mvn(out, crc);
+ __ Crc32b(out, out, val);
+ __ Mvn(out, out);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 38e4c8968a..0c463a280e 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3059,6 +3059,7 @@ UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 6f7f5e49c1..21fb7d7f1c 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2696,6 +2696,8 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
+
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 2eb252908c..4b86f5d423 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2346,6 +2346,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitReachabilityFence(HInvoke* invoke ATTRIB
UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 3504d7a6f8..6dd4681847 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2967,6 +2967,7 @@ UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 96f6eaaf33..7db26dc9be 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2732,6 +2732,7 @@ void IntrinsicCodeGeneratorX86_64::VisitReachabilityFence(HInvoke* invoke ATTRIB
UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6ebe89eae1..21243804fb 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -7402,7 +7402,7 @@ class HMemoryBarrier final : public HExpression<0> {
// }
//
// See also:
-// * CompilerDriver::RequiresConstructorBarrier
+// * DexCompilationUnit::RequiresConstructorBarrier
// * QuasiAtomic::ThreadFenceForConstructor
//
class HConstructorFence final : public HVariableInputSizeInstruction {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a95ddff188..4f495b6d81 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -870,7 +870,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
code_item_accessor,
&dex_compilation_unit,
&dex_compilation_unit,
- compiler_driver,
codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
@@ -991,7 +990,6 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
CodeItemDebugInfoAccessor(), // Null code item.
&dex_compilation_unit,
&dex_compilation_unit,
- compiler_driver,
codegen.get(),
compilation_stats_.get(),
/* interpreter_metadata */ ArrayRef<const uint8_t>(),
@@ -1056,6 +1054,15 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
std::unique_ptr<CodeGenerator> codegen;
bool compiled_intrinsic = false;
{
+ ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* method =
+ runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+ method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
+ DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException(); // Suppress exception if any.
+ VariableSizedHandleScope handles(soa.Self());
+ Handle<mirror::Class> compiling_class =
+ handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
DexCompilationUnit dex_compilation_unit(
jclass_loader,
runtime->GetClassLinker(),
@@ -1064,12 +1071,9 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
class_def_idx,
method_idx,
access_flags,
- /* verified_method */ nullptr, // Not needed by the Optimizing compiler.
- dex_cache);
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* method = compiler_driver->ResolveMethod(
- soa, dex_cache, jclass_loader, &dex_compilation_unit, method_idx, invoke_type);
- VariableSizedHandleScope handles(soa.Self());
+ /*verified_method=*/ nullptr, // Not needed by the Optimizing compiler.
+ dex_cache,
+ compiling_class);
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
@@ -1171,21 +1175,23 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
if (compiler_options.IsBootImage()) {
ScopedObjectAccess soa(Thread::Current());
ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
- method_idx, dex_cache.Get(), /* class_loader */ nullptr);
+ method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
+ VariableSizedHandleScope handles(soa.Self());
ScopedNullHandle<mirror::ClassLoader> class_loader; // null means boot class path loader.
+ Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
runtime->GetClassLinker(),
dex_file,
- /* code_item */ nullptr,
- /* class_def_idx */ DexFile::kDexNoIndex16,
+ /*code_item=*/ nullptr,
+ /*class_def_idx=*/ DexFile::kDexNoIndex16,
method_idx,
access_flags,
- /* verified_method */ nullptr,
- dex_cache);
+ /*verified_method=*/ nullptr,
+ dex_cache,
+ compiling_class);
CodeVectorAllocator code_allocator(&allocator);
- VariableSizedHandleScope handles(soa.Self());
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
std::unique_ptr<CodeGenerator> codegen(
@@ -1349,6 +1355,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
std::unique_ptr<CodeGenerator> codegen;
{
+ Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
runtime->GetClassLinker(),
@@ -1357,8 +1364,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
class_def_idx,
method_idx,
access_flags,
- /* verified_method */ nullptr,
- dex_cache);
+ /*verified_method=*/ nullptr,
+ dex_cache,
+ compiling_class);
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 10d2b6f5ce..baeebd9371 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -23,6 +23,7 @@
#include <unistd.h>
#include <android-base/logging.h>
+#include <android-base/macros.h>
#include <android-base/stringprintf.h>
#include "common_runtime_test.h"
@@ -90,6 +91,10 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
args.push_back("--runtime-arg");
args.push_back("-Xnorelocate");
+ // Unless otherwise stated, use a small amount of threads, so that potential aborts are
+ // shorter. This can be overridden with extra_args.
+ args.push_back("-j4");
+
args.insert(args.end(), extra_args.begin(), extra_args.end());
int status = Dex2Oat(args, error_msg);
@@ -99,33 +104,33 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
return status;
}
- void GenerateOdexForTest(
+ ::testing::AssertionResult GenerateOdexForTest(
const std::string& dex_location,
const std::string& odex_location,
CompilerFilter::Filter filter,
const std::vector<std::string>& extra_args = {},
bool expect_success = true,
- bool use_fd = false) {
- GenerateOdexForTest(dex_location,
- odex_location,
- filter,
- extra_args,
- expect_success,
- use_fd,
- [](const OatFile&) {});
+ bool use_fd = false) WARN_UNUSED {
+ return GenerateOdexForTest(dex_location,
+ odex_location,
+ filter,
+ extra_args,
+ expect_success,
+ use_fd,
+ [](const OatFile&) {});
}
bool test_accepts_odex_file_on_failure = false;
template <typename T>
- void GenerateOdexForTest(
+ ::testing::AssertionResult GenerateOdexForTest(
const std::string& dex_location,
const std::string& odex_location,
CompilerFilter::Filter filter,
const std::vector<std::string>& extra_args,
bool expect_success,
bool use_fd,
- T check_oat) {
+ T check_oat) WARN_UNUSED {
std::string error_msg;
int status = GenerateOdexForTestWithStatus({dex_location},
odex_location,
@@ -135,7 +140,10 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
use_fd);
bool success = (WIFEXITED(status) && WEXITSTATUS(status) == 0);
if (expect_success) {
- ASSERT_TRUE(success) << error_msg << std::endl << output_;
+ if (!success) {
+ return ::testing::AssertionFailure()
+ << "Failed to compile odex: " << error_msg << std::endl << output_;
+ }
// Verify the odex file was generated as expected.
std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
@@ -146,12 +154,16 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
dex_location.c_str(),
/*reservation=*/ nullptr,
&error_msg));
- ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
+ if (odex_file == nullptr) {
+ return ::testing::AssertionFailure() << "Could not open odex file: " << error_msg;
+ }
CheckFilter(filter, odex_file->GetCompilerFilter());
check_oat(*(odex_file.get()));
} else {
- ASSERT_FALSE(success) << output_;
+ if (success) {
+ return ::testing::AssertionFailure() << "Succeeded to compile odex: " << output_;
+ }
error_msg_ = error_msg;
@@ -165,9 +177,12 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
dex_location.c_str(),
/*reservation=*/ nullptr,
&error_msg));
- ASSERT_TRUE(odex_file.get() == nullptr);
+ if (odex_file != nullptr) {
+ return ::testing::AssertionFailure() << "Could open odex file: " << error_msg;
+ }
}
}
+ return ::testing::AssertionSuccess();
}
// Check the input compiler filter against the generated oat file's filter. May be overridden
@@ -265,7 +280,7 @@ class Dex2oatSwapTest : public Dex2oatTest {
std::string swap_location = GetOdexDir() + "/Dex2OatSwapTest.odex.swap";
copy.push_back("--swap-file=" + swap_location);
}
- GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, copy);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, copy));
CheckValidity();
ASSERT_TRUE(success_);
@@ -490,7 +505,7 @@ class Dex2oatVeryLargeTest : public Dex2oatTest {
std::vector<std::string> new_args(extra_args);
new_args.push_back("--app-image-file=" + app_image_file);
- GenerateOdexForTest(dex_location, odex_location, filter, new_args);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location, odex_location, filter, new_args));
CheckValidity();
ASSERT_TRUE(success_);
@@ -681,12 +696,12 @@ class Dex2oatLayoutTest : public Dex2oatTest {
copy.push_back("--app-image-file=" + app_image_file_name);
}
}
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kSpeedProfile,
- copy,
- expect_success,
- use_fd);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kSpeedProfile,
+ copy,
+ expect_success,
+ use_fd));
if (app_image_file != nullptr) {
ASSERT_EQ(app_image_file->FlushCloseOrErase(), 0) << "Could not flush and close art file";
}
@@ -877,24 +892,24 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
{
std::string input_vdex = "--input-vdex-fd=-1";
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kQuicken,
- { input_vdex, output_vdex },
- /*expect_success=*/ true,
- /*use_fd=*/ true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kQuicken,
+ { input_vdex, output_vdex },
+ /* expect_success= */ true,
+ /* use_fd= */ true));
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
// Unquicken by running the verify compiler filter on the vdex file.
{
std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kVerify,
- { input_vdex, output_vdex, kDisableCompactDex },
- /*expect_success=*/ true,
- /*use_fd=*/ true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kVerify,
+ { input_vdex, output_vdex, kDisableCompactDex },
+ /* expect_success= */ true,
+ /* use_fd= */ true));
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
CheckResult(dex_location, odex_location);
@@ -918,12 +933,12 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
{
std::string input_vdex = "--input-vdex-fd=-1";
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kQuicken,
- { input_vdex, output_vdex, "--compact-dex-level=fast"},
- /*expect_success=*/ true,
- /*use_fd=*/ true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kQuicken,
+ { input_vdex, output_vdex, "--compact-dex-level=fast"},
+ /* expect_success= */ true,
+ /* use_fd= */ true));
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
@@ -931,12 +946,12 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
{
std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file2->Fd());
- GenerateOdexForTest(dex_location,
- odex_location2,
- CompilerFilter::kVerify,
- { input_vdex, output_vdex, "--compact-dex-level=none"},
- /*expect_success=*/ true,
- /*use_fd=*/ true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location2,
+ CompilerFilter::kVerify,
+ { input_vdex, output_vdex, "--compact-dex-level=none"},
+ /* expect_success= */ true,
+ /* use_fd= */ true));
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
ASSERT_EQ(vdex_file2->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
@@ -992,11 +1007,11 @@ class Dex2oatWatchdogTest : public Dex2oatTest {
std::string swap_location = GetOdexDir() + "/Dex2OatSwapTest.odex.swap";
copy.push_back("--swap-file=" + swap_location);
copy.push_back("-j512"); // Excessive idle threads just slow down dex2oat.
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kSpeed,
- copy,
- expect_success);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kSpeed,
+ copy,
+ expect_success));
}
std::string GetTestDexFileName() {
@@ -1072,13 +1087,13 @@ class Dex2oatClassLoaderContextTest : public Dex2oatTest {
ASSERT_STREQ(expected_classpath_key, classpath);
};
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kQuicken,
- extra_args,
- expected_success,
- /*use_fd*/ false,
- check_oat);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kQuicken,
+ extra_args,
+ expected_success,
+ /*use_fd*/ false,
+ check_oat));
}
std::string GetUsedDexLocation() {
@@ -1135,11 +1150,11 @@ TEST_F(Dex2oatClassLoaderContextTest, ContextWithStrippedDexFilesBackedByOdex) {
Copy(GetDexSrc1(), stripped_classpath);
- GenerateOdexForTest(stripped_classpath,
- odex_for_classpath,
- CompilerFilter::kQuicken,
- {},
- true);
+ ASSERT_TRUE(GenerateOdexForTest(stripped_classpath,
+ odex_for_classpath,
+ CompilerFilter::kQuicken,
+ {},
+ true));
// Strip the dex file
Copy(GetStrippedDexSrc1(), stripped_classpath);
@@ -1216,7 +1231,7 @@ TEST_F(Dex2oatDeterminism, UnloadCompile) {
CompilerFilter::Filter::kQuicken,
&error_msg,
{"--force-determinism", "--avoid-storing-invocation"});
- EXPECT_EQ(res, 0);
+ ASSERT_EQ(res, 0);
Copy(base_oat_name, unload_oat_name);
Copy(base_vdex_name, unload_vdex_name);
std::unique_ptr<File> unload_oat(OS::OpenFileForReading(unload_oat_name.c_str()));
@@ -1233,7 +1248,7 @@ TEST_F(Dex2oatDeterminism, UnloadCompile) {
CompilerFilter::Filter::kQuicken,
&error_msg,
{"--force-determinism", "--avoid-storing-invocation", "--app-image-file=" + app_image_name});
- EXPECT_EQ(res2, 0);
+ ASSERT_EQ(res2, 0);
Copy(base_oat_name, no_unload_oat_name);
Copy(base_vdex_name, no_unload_vdex_name);
std::unique_ptr<File> no_unload_oat(OS::OpenFileForReading(no_unload_oat_name.c_str()));
@@ -1535,26 +1550,26 @@ TEST_F(Dex2oatDedupeCode, DedupeTest) {
std::string out_dir = GetScratchDir();
const std::string base_oat_name = out_dir + "/base.oat";
size_t no_dedupe_size = 0;
- GenerateOdexForTest(dex->GetLocation(),
- base_oat_name,
- CompilerFilter::Filter::kSpeed,
- { "--deduplicate-code=false" },
- true, // expect_success
- false, // use_fd
- [&no_dedupe_size](const OatFile& o) {
- no_dedupe_size = o.Size();
- });
+ ASSERT_TRUE(GenerateOdexForTest(dex->GetLocation(),
+ base_oat_name,
+ CompilerFilter::Filter::kSpeed,
+ { "--deduplicate-code=false" },
+ true, // expect_success
+ false, // use_fd
+ [&no_dedupe_size](const OatFile& o) {
+ no_dedupe_size = o.Size();
+ }));
size_t dedupe_size = 0;
- GenerateOdexForTest(dex->GetLocation(),
- base_oat_name,
- CompilerFilter::Filter::kSpeed,
- { "--deduplicate-code=true" },
- true, // expect_success
- false, // use_fd
- [&dedupe_size](const OatFile& o) {
- dedupe_size = o.Size();
- });
+ ASSERT_TRUE(GenerateOdexForTest(dex->GetLocation(),
+ base_oat_name,
+ CompilerFilter::Filter::kSpeed,
+ { "--deduplicate-code=true" },
+ true, // expect_success
+ false, // use_fd
+ [&dedupe_size](const OatFile& o) {
+ dedupe_size = o.Size();
+ }));
EXPECT_LT(dedupe_size, no_dedupe_size);
}
@@ -1563,15 +1578,15 @@ TEST_F(Dex2oatTest, UncompressedTest) {
std::unique_ptr<const DexFile> dex(OpenTestDexFile("MainUncompressed"));
std::string out_dir = GetScratchDir();
const std::string base_oat_name = out_dir + "/base.oat";
- GenerateOdexForTest(dex->GetLocation(),
- base_oat_name,
- CompilerFilter::Filter::kQuicken,
- { },
- true, // expect_success
- false, // use_fd
- [](const OatFile& o) {
- CHECK(!o.ContainsDexCode());
- });
+ ASSERT_TRUE(GenerateOdexForTest(dex->GetLocation(),
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ { },
+ true, // expect_success
+ false, // use_fd
+ [](const OatFile& o) {
+ CHECK(!o.ContainsDexCode());
+ }));
}
TEST_F(Dex2oatTest, EmptyUncompressedDexTest) {
@@ -1667,15 +1682,15 @@ TEST_F(Dex2oatTest, CompactDexGenerationFailure) {
std::string out_dir = GetScratchDir();
const std::string oat_filename = out_dir + "/base.oat";
// The dex won't pass the method verifier, only use the verify filter.
- GenerateOdexForTest(temp_dex.GetFilename(),
- oat_filename,
- CompilerFilter::Filter::kVerify,
- { },
- true, // expect_success
- false, // use_fd
- [](const OatFile& o) {
- CHECK(o.ContainsDexCode());
- });
+ ASSERT_TRUE(GenerateOdexForTest(temp_dex.GetFilename(),
+ oat_filename,
+ CompilerFilter::Filter::kVerify,
+ { },
+ true, // expect_success
+ false, // use_fd
+ [](const OatFile& o) {
+ CHECK(o.ContainsDexCode());
+ }));
// Open our generated oat file.
std::string error_msg;
std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
@@ -1718,11 +1733,11 @@ TEST_F(Dex2oatTest, CompactDexGenerationFailureMultiDex) {
}
const std::string& dex_location = apk_file.GetFilename();
const std::string odex_location = GetOdexDir() + "/output.odex";
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kQuicken,
- { "--compact-dex-level=fast" },
- true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kQuicken,
+ { "--compact-dex-level=fast" },
+ true));
}
TEST_F(Dex2oatTest, StderrLoggerOutput) {
@@ -1732,11 +1747,11 @@ TEST_F(Dex2oatTest, StderrLoggerOutput) {
// Test file doesn't matter.
Copy(GetDexSrc1(), dex_location);
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kQuicken,
- { "--runtime-arg", "-Xuse-stderr-logger" },
- true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kQuicken,
+ { "--runtime-arg", "-Xuse-stderr-logger" },
+ true));
// Look for some random part of dex2oat logging. With the stderr logger this should be captured,
// even on device.
EXPECT_NE(std::string::npos, output_.find("dex2oat took"));
@@ -1749,11 +1764,11 @@ TEST_F(Dex2oatTest, VerifyCompilationReason) {
// Test file doesn't matter.
Copy(GetDexSrc1(), dex_location);
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kVerify,
- { "--compilation-reason=install" },
- true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kVerify,
+ { "--compilation-reason=install" },
+ true));
std::string error_msg;
std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
@@ -1774,11 +1789,11 @@ TEST_F(Dex2oatTest, VerifyNoCompilationReason) {
// Test file doesn't matter.
Copy(GetDexSrc1(), dex_location);
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kVerify,
- {},
- true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kVerify,
+ {},
+ true));
std::string error_msg;
std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
@@ -1799,14 +1814,13 @@ TEST_F(Dex2oatTest, DontExtract) {
const std::string dex_location = dex->GetLocation();
const std::string odex_location = out_dir + "/base.oat";
const std::string vdex_location = out_dir + "/base.vdex";
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::Filter::kVerify,
- { "--copy-dex-files=false" },
- true, // expect_success
- false, // use_fd
- [](const OatFile&) {
- });
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::Filter::kVerify,
+ { "--copy-dex-files=false" },
+ true, // expect_success
+ false, // use_fd
+ [](const OatFile&) {}));
{
// Check the vdex doesn't have dex.
std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_location.c_str(),
@@ -1856,19 +1870,21 @@ TEST_F(Dex2oatTest, DontExtract) {
}
auto generate_and_check = [&](CompilerFilter::Filter filter) {
- GenerateOdexForTest(dex_location,
- odex_location,
- filter,
- { "--dump-timings",
- "--dm-file=" + dm_file.GetFilename(),
- // Pass -Xuse-stderr-logger have dex2oat output in output_ on target.
- "--runtime-arg",
- "-Xuse-stderr-logger" },
- true, // expect_success
- false, // use_fd
- [](const OatFile& o) {
- CHECK(o.ContainsDexCode());
- });
+ output_.clear();
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ filter,
+ { "--dump-timings",
+ "--dm-file=" + dm_file.GetFilename(),
+ // Pass -Xuse-stderr-logger have dex2oat output in output_ on
+ // target.
+ "--runtime-arg",
+ "-Xuse-stderr-logger" },
+ true, // expect_success
+ false, // use_fd
+ [](const OatFile& o) {
+ CHECK(o.ContainsDexCode());
+ }));
// Check the output for "Fast verify", this is printed from --dump-timings.
std::istringstream iss(output_);
std::string line;
@@ -1916,14 +1932,14 @@ TEST_F(Dex2oatTest, QuickenedInput) {
{
std::string input_vdex = "--input-vdex-fd=-1";
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_output->Fd());
- GenerateOdexForTest(dex_location,
- odex_location,
- CompilerFilter::kQuicken,
- // Disable cdex since we want to compare against the original dex file
- // after unquickening.
- { input_vdex, output_vdex, kDisableCompactDex },
- /*expect_success=*/ true,
- /*use_fd=*/ true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kQuicken,
+ // Disable cdex since we want to compare against the original
+ // dex file after unquickening.
+ { input_vdex, output_vdex, kDisableCompactDex },
+ /* expect_success= */ true,
+ /* use_fd= */ true));
}
// Unquicken by running the verify compiler filter on the vdex file and verify it matches.
std::string odex_location2 = GetOdexDir() + "/unquickened.odex";
@@ -1932,13 +1948,14 @@ TEST_F(Dex2oatTest, QuickenedInput) {
{
std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_output->Fd());
std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_unquickened->Fd());
- GenerateOdexForTest(dex_location,
- odex_location2,
- CompilerFilter::kVerify,
- // Disable cdex to avoid needing to write out the shared section.
- { input_vdex, output_vdex, kDisableCompactDex },
- /*expect_success=*/ true,
- /*use_fd=*/ true);
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location2,
+ CompilerFilter::kVerify,
+ // Disable cdex to avoid needing to write out the shared
+ // section.
+ { input_vdex, output_vdex, kDisableCompactDex },
+ /* expect_success= */ true,
+ /* use_fd= */ true));
}
ASSERT_EQ(vdex_unquickened->Flush(), 0) << "Could not flush and close vdex file";
ASSERT_TRUE(success_);
@@ -2046,13 +2063,13 @@ TEST_F(Dex2oatTest, AppImageNoProfile) {
ScratchFile app_image_file;
const std::string out_dir = GetScratchDir();
const std::string odex_location = out_dir + "/base.odex";
- GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
- odex_location,
- CompilerFilter::Filter::kSpeedProfile,
- { "--app-image-fd=" + std::to_string(app_image_file.GetFd()) },
- true, // expect_success
- false, // use_fd
- [](const OatFile&) {});
+ ASSERT_TRUE(GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+ odex_location,
+ CompilerFilter::Filter::kSpeedProfile,
+ { "--app-image-fd=" + std::to_string(app_image_file.GetFd()) },
+ true, // expect_success
+ false, // use_fd
+ [](const OatFile&) {}));
// Open our generated oat file.
std::string error_msg;
std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
@@ -2105,15 +2122,15 @@ TEST_F(Dex2oatTest, AppImageResolveStrings) {
const std::string out_dir = GetScratchDir();
const std::string odex_location = out_dir + "/base.odex";
const std::string app_image_location = out_dir + "/base.art";
- GenerateOdexForTest(GetTestDexFileName("StringLiterals"),
- odex_location,
- CompilerFilter::Filter::kSpeedProfile,
- { "--app-image-file=" + app_image_location,
- "--resolve-startup-const-strings=true",
- "--profile-file=" + profile_file.GetFilename()},
- /*expect_success=*/ true,
- /*use_fd=*/ false,
- [](const OatFile&) {});
+ ASSERT_TRUE(GenerateOdexForTest(GetTestDexFileName("StringLiterals"),
+ odex_location,
+ CompilerFilter::Filter::kSpeedProfile,
+ { "--app-image-file=" + app_image_location,
+ "--resolve-startup-const-strings=true",
+ "--profile-file=" + profile_file.GetFilename()},
+ /* expect_success= */ true,
+ /* use_fd= */ false,
+ [](const OatFile&) {}));
// Open our generated oat file.
std::string error_msg;
std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
@@ -2178,6 +2195,22 @@ TEST_F(Dex2oatTest, AppImageResolveStrings) {
EXPECT_TRUE(preresolved_seen.find("Other class init") == preresolved_seen.end());
// Expect the sets match.
EXPECT_GE(seen.size(), preresolved_seen.size());
+
+ // Verify what strings are marked as boot image.
+ std::set<std::string> boot_image_strings;
+ std::set<std::string> app_image_strings;
+
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ intern_table.VisitInterns([&](const GcRoot<mirror::String>& root)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ boot_image_strings.insert(root.Read()->ToModifiedUtf8());
+ }, /*visit_boot_images=*/true, /*visit_non_boot_images=*/false);
+ intern_table.VisitInterns([&](const GcRoot<mirror::String>& root)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ app_image_strings.insert(root.Read()->ToModifiedUtf8());
+ }, /*visit_boot_images=*/false, /*visit_non_boot_images=*/true);
+ EXPECT_EQ(boot_image_strings.size(), 0u);
+ EXPECT_TRUE(app_image_strings == seen);
}
}
@@ -2204,27 +2237,27 @@ TEST_F(Dex2oatClassLoaderContextTest, StoredClassLoaderContext) {
}
expected_stored_context += + "]";
// The class path should not be valid and should fail being stored.
- GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
- odex_location,
- CompilerFilter::Filter::kQuicken,
- { "--class-loader-context=" + stored_context },
- true, // expect_success
- false, // use_fd
- [&](const OatFile& oat_file) {
+ EXPECT_TRUE(GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+ odex_location,
+ CompilerFilter::Filter::kQuicken,
+ { "--class-loader-context=" + stored_context },
+ true, // expect_success
+ false, // use_fd
+ [&](const OatFile& oat_file) {
EXPECT_NE(oat_file.GetClassLoaderContext(), stored_context) << output_;
EXPECT_NE(oat_file.GetClassLoaderContext(), valid_context) << output_;
- });
+ }));
// The stored context should match what we expect even though it's invalid.
- GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
- odex_location,
- CompilerFilter::Filter::kQuicken,
- { "--class-loader-context=" + valid_context,
- "--stored-class-loader-context=" + stored_context },
- true, // expect_success
- false, // use_fd
- [&](const OatFile& oat_file) {
+ EXPECT_TRUE(GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+ odex_location,
+ CompilerFilter::Filter::kQuicken,
+ { "--class-loader-context=" + valid_context,
+ "--stored-class-loader-context=" + stored_context },
+ true, // expect_success
+ false, // use_fd
+ [&](const OatFile& oat_file) {
EXPECT_EQ(oat_file.GetClassLoaderContext(), expected_stored_context) << output_;
- });
+ }));
}
} // namespace art
diff --git a/dex2oat/linker/image_test.cc b/dex2oat/linker/image_test.cc
index b628c9e763..69dac19df9 100644
--- a/dex2oat/linker/image_test.cc
+++ b/dex2oat/linker/image_test.cc
@@ -99,9 +99,9 @@ TEST_F(ImageTest, ImageHeaderIsValid) {
TEST_F(ImageTest, TestDefaultMethods) {
CompilationHelper helper;
Compile(ImageHeader::kStorageModeUncompressed,
- helper,
- "DefaultMethods",
- {"LIface;", "LImpl;", "LIterableBase;"});
+ helper,
+ "DefaultMethods",
+ {"LIface;", "LImpl;", "LIterableBase;"});
PointerSize pointer_size = class_linker_->GetImagePointerSize();
Thread* self = Thread::Current();
@@ -152,5 +152,17 @@ TEST_F(ImageTest, TestDefaultMethods) {
ASSERT_TRUE(class_linker_->IsQuickToInterpreterBridge(code));
}
+// Regression test for dex2oat crash for soft verification failure during
+// class initialization check from the transactional interpreter while
+// running the class initializer for another class.
+TEST_F(ImageTest, TestSoftVerificationFailureDuringClassInitialization) {
+ CompilationHelper helper;
+ Compile(ImageHeader::kStorageModeUncompressed,
+ helper,
+ "VerifySoftFailDuringClinit",
+ /*image_classes=*/ {"LClassToInitialize;"},
+ /*image_classes_failing_aot_clinit=*/ {"LClassToInitialize;"});
+}
+
} // namespace linker
} // namespace art
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 443ee52d15..9d1a4e7b44 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -28,6 +28,7 @@
#include "art_method-inl.h"
#include "base/file_utils.h"
#include "base/hash_set.h"
+#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "base/utils.h"
#include "class_linker-inl.h"
@@ -81,7 +82,8 @@ class ImageTest : public CommonCompilerTest {
void Compile(ImageHeader::StorageMode storage_mode,
/*out*/ CompilationHelper& out_helper,
const std::string& extra_dex = "",
- const std::initializer_list<std::string>& image_classes = {});
+ const std::initializer_list<std::string>& image_classes = {},
+ const std::initializer_list<std::string>& image_classes_failing_aot_clinit = {});
void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonCompilerTest::SetUpRuntimeOptions(options);
@@ -370,10 +372,15 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
}
}
-inline void ImageTest::Compile(ImageHeader::StorageMode storage_mode,
- CompilationHelper& helper,
- const std::string& extra_dex,
- const std::initializer_list<std::string>& image_classes) {
+inline void ImageTest::Compile(
+ ImageHeader::StorageMode storage_mode,
+ CompilationHelper& helper,
+ const std::string& extra_dex,
+ const std::initializer_list<std::string>& image_classes,
+ const std::initializer_list<std::string>& image_classes_failing_aot_clinit) {
+ for (const std::string& image_class : image_classes_failing_aot_clinit) {
+ ASSERT_TRUE(ContainsElement(image_classes, image_class));
+ }
for (const std::string& image_class : image_classes) {
image_classes_.insert(image_class);
}
@@ -394,7 +401,12 @@ inline void ImageTest::Compile(ImageHeader::StorageMode storage_mode,
ObjPtr<mirror::Class> klass =
class_linker->FindSystemClass(Thread::Current(), image_class.c_str());
EXPECT_TRUE(klass != nullptr);
- EXPECT_TRUE(klass->IsInitialized());
+ EXPECT_TRUE(klass->IsResolved());
+ if (ContainsElement(image_classes_failing_aot_clinit, image_class)) {
+ EXPECT_FALSE(klass->IsInitialized());
+ } else {
+ EXPECT_TRUE(klass->IsInitialized());
+ }
}
}
}
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index fd10b6b23c..5ca7f0733d 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -2614,17 +2614,19 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
// Fixup the pointers in the newly written intern table to contain image addresses.
InternTable temp_intern_table;
- // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
- // the VisitRoots() will update the memory directly rather than the copies.
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements so
+ // that the VisitRoots() will update the memory directly rather than the copies.
// This also relies on visit roots not doing any verification which could fail after we update
// the roots to be the image addresses.
- temp_intern_table.AddTableFromMemory(intern_table_memory_ptr, VoidFunctor());
+ temp_intern_table.AddTableFromMemory(intern_table_memory_ptr,
+ VoidFunctor(),
+ /*is_boot_image=*/ false);
CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
// Record relocations. (The root visitor does not get to see the slot addresses.)
MutexLock lock(Thread::Current(), *Locks::intern_table_lock_);
DCHECK(!temp_intern_table.strong_interns_.tables_.empty());
- DCHECK(!temp_intern_table.strong_interns_.tables_[0].empty()); // Inserted at the beginning.
+ DCHECK(!temp_intern_table.strong_interns_.tables_[0].Empty()); // Inserted at the beginning.
}
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
diff --git a/libartbase/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc
index a187663062..d47f9362d4 100644
--- a/libartbase/arch/instruction_set.cc
+++ b/libartbase/arch/instruction_set.cc
@@ -105,18 +105,7 @@ size_t GetInstructionSetAlignment(InstructionSet isa) {
UNREACHABLE();
}
-#if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
- !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \
- !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64)
-#error "Missing defines for stack overflow gap"
-#endif
-
-static constexpr size_t kArmStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm;
-static constexpr size_t kArm64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm64;
-static constexpr size_t kMipsStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips;
-static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64;
-static constexpr size_t kX86StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86;
-static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64;
+namespace instruction_set_details {
static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
@@ -144,32 +133,10 @@ static_assert(ART_FRAME_SIZE_LIMIT < kX86StackOverflowReservedBytes,
static_assert(ART_FRAME_SIZE_LIMIT < kX86_64StackOverflowReservedBytes,
"Frame size limit too large");
-size_t GetStackOverflowReservedBytes(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm: // Intentional fall-through.
- case InstructionSet::kThumb2:
- return kArmStackOverflowReservedBytes;
+} // namespace instruction_set_details
- case InstructionSet::kArm64:
- return kArm64StackOverflowReservedBytes;
-
- case InstructionSet::kMips:
- return kMipsStackOverflowReservedBytes;
-
- case InstructionSet::kMips64:
- return kMips64StackOverflowReservedBytes;
-
- case InstructionSet::kX86:
- return kX86StackOverflowReservedBytes;
-
- case InstructionSet::kX86_64:
- return kX86_64StackOverflowReservedBytes;
-
- case InstructionSet::kNone:
- LOG(FATAL) << "kNone has no stack overflow size";
- UNREACHABLE();
- }
- LOG(FATAL) << "Unknown instruction set" << isa;
+NO_RETURN void GetStackOverflowReservedBytesFailure(const char* error_msg) {
+ LOG(FATAL) << error_msg;
UNREACHABLE();
}
diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h
index 06bd53a6a9..7e071bd9e2 100644
--- a/libartbase/arch/instruction_set.h
+++ b/libartbase/arch/instruction_set.h
@@ -226,7 +226,53 @@ constexpr size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
InstructionSetAbort(isa);
}
-size_t GetStackOverflowReservedBytes(InstructionSet isa);
+namespace instruction_set_details {
+
+#if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
+ !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \
+ !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64)
+#error "Missing defines for stack overflow gap"
+#endif
+
+static constexpr size_t kArmStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm;
+static constexpr size_t kArm64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm64;
+static constexpr size_t kMipsStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips;
+static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64;
+static constexpr size_t kX86StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86;
+static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64;
+
+NO_RETURN void GetStackOverflowReservedBytesFailure(const char* error_msg);
+
+} // namespace instruction_set_details
+
+ALWAYS_INLINE
+constexpr size_t GetStackOverflowReservedBytes(InstructionSet isa) {
+ switch (isa) {
+ case InstructionSet::kArm: // Intentional fall-through.
+ case InstructionSet::kThumb2:
+ return instruction_set_details::kArmStackOverflowReservedBytes;
+
+ case InstructionSet::kArm64:
+ return instruction_set_details::kArm64StackOverflowReservedBytes;
+
+ case InstructionSet::kMips:
+ return instruction_set_details::kMipsStackOverflowReservedBytes;
+
+ case InstructionSet::kMips64:
+ return instruction_set_details::kMips64StackOverflowReservedBytes;
+
+ case InstructionSet::kX86:
+ return instruction_set_details::kX86StackOverflowReservedBytes;
+
+ case InstructionSet::kX86_64:
+ return instruction_set_details::kX86_64StackOverflowReservedBytes;
+
+ case InstructionSet::kNone:
+ instruction_set_details::GetStackOverflowReservedBytesFailure(
+ "kNone has no stack overflow size");
+ }
+ instruction_set_details::GetStackOverflowReservedBytesFailure("Unknown instruction set");
+}
// The following definitions create return types for two word-sized entities that will be passed
// in registers so that memory operations for the interface trampolines can be avoided. The entities
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index eae7efc2bc..c884eee88b 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -110,6 +110,14 @@ inline const char* DexFile::GetMethodName(const MethodId& method_id) const {
return StringDataByIdx(method_id.name_idx_);
}
+inline const char* DexFile::GetMethodName(const MethodId& method_id, uint32_t* utf_length) const {
+ return StringDataAndUtf16LengthByIdx(method_id.name_idx_, utf_length);
+}
+
+inline const char* DexFile::GetMethodName(uint32_t idx, uint32_t* utf_length) const {
+ return StringDataAndUtf16LengthByIdx(GetMethodId(idx).name_idx_, utf_length);
+}
+
inline const char* DexFile::GetMethodShorty(uint32_t idx) const {
return StringDataByIdx(GetProtoId(GetMethodId(idx).proto_idx_).shorty_idx_);
}
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 6a52f67646..b3e7ad46c6 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -641,6 +641,8 @@ class DexFile {
// Returns the name of a method id.
const char* GetMethodName(const MethodId& method_id) const;
+ const char* GetMethodName(const MethodId& method_id, uint32_t* utf_length) const;
+ const char* GetMethodName(uint32_t idx, uint32_t* utf_length) const;
// Returns the shorty of a method by its index.
const char* GetMethodShorty(uint32_t idx) const;
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 0cc5535f8e..c9774a7d1b 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -309,12 +309,13 @@ class StubTest : public CommonRuntimeTest {
// Use the result from r0
: [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
[referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
+ // X18 is a reserved register, cannot be clobbered.
// Leave one register unclobbered, which is needed for compiling with
// -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
// which means we should unclobber one of the callee-saved registers that are unused.
// Here we use x20.
// http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
+ : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19",
"x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index e9c17eeab5..d8da9129ed 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -402,6 +402,7 @@ inline hiddenapi::ApiList ArtMethod::GetHiddenApiAccessFlags()
case Intrinsics::kUnsafeLoadFence:
case Intrinsics::kUnsafeStoreFence:
case Intrinsics::kUnsafeFullFence:
+ case Intrinsics::kCRC32Update:
// These intrinsics are on the light greylist and will fail a DCHECK in
// SetIntrinsic() if their flags change on the respective dex methods.
// Note that the DCHECK currently won't fail if the dex methods are
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 74db31256b..e775fe4505 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -196,7 +196,7 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
if (num_pending_writers_.load(std::memory_order_seq_cst) > 0 ||
num_pending_readers_.load(std::memory_order_seq_cst) > 0) {
// Wake any exclusive waiters as there are now no readers.
- futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
}
}
} else {
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 0adcb37a59..ca2ed80306 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -447,7 +447,7 @@ void Mutex::ExclusiveLock(Thread* self) {
if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
self->CheckEmptyCheckpointFromMutex();
}
- if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, 1, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -551,7 +551,7 @@ void Mutex::ExclusiveUnlock(Thread* self) {
if (LIKELY(done)) { // Spurious fail?
// Wake a contender.
if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
- futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
+ futex(state_.Address(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
}
}
} else {
@@ -594,7 +594,7 @@ void Mutex::WakeupToRespondToEmptyCheckpoint() {
// Wake up all the waiters so they will respond to the emtpy checkpoint.
DCHECK(should_respond_to_empty_checkpoint_request_);
if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
- futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
}
#else
LOG(FATAL) << "Non futex case isn't supported.";
@@ -648,7 +648,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
self->CheckEmptyCheckpointFromMutex();
}
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -689,7 +689,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
// Wake any waiters.
if (UNLIKELY(num_pending_readers_.load(std::memory_order_seq_cst) > 0 ||
num_pending_writers_.load(std::memory_order_seq_cst) > 0)) {
- futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -727,7 +727,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
self->CheckEmptyCheckpointFromMutex();
}
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
--num_pending_writers_;
return false; // Timed out.
@@ -768,7 +768,7 @@ void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_sta
if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
self->CheckEmptyCheckpointFromMutex();
}
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
if (errno != EAGAIN && errno != EINTR) {
PLOG(FATAL) << "futex wait failed for " << name_;
}
@@ -846,7 +846,7 @@ void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
DCHECK(should_respond_to_empty_checkpoint_request_);
if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 ||
num_pending_writers_.load(std::memory_order_relaxed) > 0)) {
- futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
}
#else
LOG(FATAL) << "Non futex case isn't supported.";
@@ -908,7 +908,7 @@ void ConditionVariable::RequeueWaiters(int32_t count) {
// Move waiters from the condition variable's futex to the guard's futex,
// so that they will be woken up when the mutex is released.
bool done = futex(sequence_.Address(),
- FUTEX_REQUEUE,
+ FUTEX_REQUEUE_PRIVATE,
/* Threads to wake */ 0,
/* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
guard_.state_.Address(),
@@ -947,7 +947,7 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) {
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) {
// Futex failed, check it is an expected error.
// EAGAIN == EWOULDBLK, so we let the caller try again.
// EINTR implies a signal was sent to this thread.
@@ -998,7 +998,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
// Timed out we're done.
timed_out = true;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f43791ab06..9ba52c429d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1486,7 +1486,6 @@ void AppImageLoadingHelper::HandleAppImageStrings(gc::space::ImageSpace* space)
// the strings they point to.
ScopedTrace timing("AppImage:InternString");
- Thread* const self = Thread::Current();
InternTable* const intern_table = Runtime::Current()->GetInternTable();
// Add the intern table, removing any conflicts. For conflicts, store the new address in a map
@@ -1494,19 +1493,46 @@ void AppImageLoadingHelper::HandleAppImageStrings(gc::space::ImageSpace* space)
// TODO: Optimize with a bitmap or bloom filter
SafeMap<mirror::String*, mirror::String*> intern_remap;
intern_table->AddImageStringsToTable(space, [&](InternTable::UnorderedSet& interns)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::intern_table_lock_) {
+ const size_t non_boot_image_strings = intern_table->CountInterns(
+ /*visit_boot_images=*/false,
+ /*visit_non_boot_images=*/true);
VLOG(image) << "AppImage:stringsInInternTableSize = " << interns.size();
- for (auto it = interns.begin(); it != interns.end(); ) {
- ObjPtr<mirror::String> string = it->Read();
- ObjPtr<mirror::String> existing = intern_table->LookupWeak(self, string);
- if (existing == nullptr) {
- existing = intern_table->LookupStrong(self, string);
- }
- if (existing != nullptr) {
- intern_remap.Put(string.Ptr(), existing.Ptr());
- it = interns.erase(it);
- } else {
- ++it;
+ VLOG(image) << "AppImage:nonBootImageInternStrings = " << non_boot_image_strings;
+ // Visit the smaller of the two sets to compute the intersection.
+ if (interns.size() < non_boot_image_strings) {
+ for (auto it = interns.begin(); it != interns.end(); ) {
+ ObjPtr<mirror::String> string = it->Read();
+ ObjPtr<mirror::String> existing = intern_table->LookupWeakLocked(string);
+ if (existing == nullptr) {
+ existing = intern_table->LookupStrongLocked(string);
+ }
+ if (existing != nullptr) {
+ intern_remap.Put(string.Ptr(), existing.Ptr());
+ it = interns.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ } else {
+ intern_table->VisitInterns([&](const GcRoot<mirror::String>& root)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::intern_table_lock_) {
+ auto it = interns.find(root);
+ if (it != interns.end()) {
+ ObjPtr<mirror::String> existing = root.Read();
+ intern_remap.Put(it->Read(), existing.Ptr());
+ it = interns.erase(it);
+ }
+ }, /*visit_boot_images=*/false, /*visit_non_boot_images=*/true);
+ }
+ // Sanity check to ensure correctness.
+ if (kIsDebugBuild) {
+ for (GcRoot<mirror::String>& root : interns) {
+ ObjPtr<mirror::String> string = root.Read();
+ CHECK(intern_table->LookupWeakLocked(string) == nullptr) << string->ToModifiedUtf8();
+ CHECK(intern_table->LookupStrongLocked(string) == nullptr) << string->ToModifiedUtf8();
}
}
});
@@ -4903,7 +4929,10 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
} else {
CHECK(Runtime::Current()->IsAotCompiler());
CHECK_EQ(klass->GetStatus(), ClassStatus::kRetryVerificationAtRuntime);
+ self->AssertNoPendingException();
+ self->SetException(Runtime::Current()->GetPreAllocatedNoClassDefFoundError());
}
+ self->AssertPendingException();
return false;
} else {
self->AssertNoPendingException();
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index dd10f3c4dd..de9fe221ff 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -42,6 +42,9 @@ static constexpr char kPathClassLoaderString[] = "PCL";
static constexpr char kDelegateLastClassLoaderString[] = "DLC";
static constexpr char kClassLoaderOpeningMark = '[';
static constexpr char kClassLoaderClosingMark = ']';
+static constexpr char kClassLoaderSharedLibraryOpeningMark = '{';
+static constexpr char kClassLoaderSharedLibraryClosingMark = '}';
+static constexpr char kClassLoaderSharedLibrarySeparator = '#';
static constexpr char kClassLoaderSeparator = ';';
static constexpr char kClasspathSeparator = ':';
static constexpr char kDexFileChecksumSeparator = '*';
@@ -58,17 +61,35 @@ ClassLoaderContext::ClassLoaderContext(bool owns_the_dex_files)
dex_files_open_result_(true),
owns_the_dex_files_(owns_the_dex_files) {}
+// Utility method to add parent and shared libraries of `info` into
+// the `work_list`.
+static void AddToWorkList(
+ ClassLoaderContext::ClassLoaderInfo* info,
+ std::vector<ClassLoaderContext::ClassLoaderInfo*>& work_list) {
+ if (info->parent != nullptr) {
+ work_list.push_back(info->parent.get());
+ }
+ for (size_t i = 0; i < info->shared_libraries.size(); ++i) {
+ work_list.push_back(info->shared_libraries[i].get());
+ }
+}
+
ClassLoaderContext::~ClassLoaderContext() {
- if (!owns_the_dex_files_) {
+ if (!owns_the_dex_files_ && class_loader_chain_ != nullptr) {
// If the context does not own the dex/oat files release the unique pointers to
// make sure we do not de-allocate them.
- for (ClassLoaderInfo& info : class_loader_chain_) {
- for (std::unique_ptr<OatFile>& oat_file : info.opened_oat_files) {
+ std::vector<ClassLoaderInfo*> work_list;
+ work_list.push_back(class_loader_chain_.get());
+ while (!work_list.empty()) {
+ ClassLoaderInfo* info = work_list.back();
+ work_list.pop_back();
+ for (std::unique_ptr<OatFile>& oat_file : info->opened_oat_files) {
oat_file.release(); // NOLINT b/117926937
}
- for (std::unique_ptr<const DexFile>& dex_file : info.opened_dex_files) {
+ for (std::unique_ptr<const DexFile>& dex_file : info->opened_dex_files) {
dex_file.release(); // NOLINT b/117926937
}
+ AddToWorkList(info, work_list);
}
}
}
@@ -86,11 +107,16 @@ std::unique_ptr<ClassLoaderContext> ClassLoaderContext::Create(const std::string
}
}
-// The expected format is: "ClassLoaderType1[ClasspathElem1*Checksum1:ClasspathElem2*Checksum2...]".
+// The expected format is:
+// "ClassLoaderType1[ClasspathElem1*Checksum1:ClasspathElem2*Checksum2...]{ClassLoaderType2[...]}".
// The checksum part of the format is expected only if parse_cheksums is true.
-bool ClassLoaderContext::ParseClassLoaderSpec(const std::string& class_loader_spec,
- ClassLoaderType class_loader_type,
- bool parse_checksums) {
+std::unique_ptr<ClassLoaderContext::ClassLoaderInfo> ClassLoaderContext::ParseClassLoaderSpec(
+ const std::string& class_loader_spec,
+ bool parse_checksums) {
+ ClassLoaderType class_loader_type = ExtractClassLoaderType(class_loader_spec);
+ if (class_loader_type == kInvalidClassLoader) {
+ return nullptr;
+ }
const char* class_loader_type_str = GetClassLoaderTypeName(class_loader_type);
size_t type_str_size = strlen(class_loader_type_str);
@@ -98,21 +124,24 @@ bool ClassLoaderContext::ParseClassLoaderSpec(const std::string& class_loader_sp
// Check the opening and closing markers.
if (class_loader_spec[type_str_size] != kClassLoaderOpeningMark) {
- return false;
+ return nullptr;
}
- if (class_loader_spec[class_loader_spec.length() - 1] != kClassLoaderClosingMark) {
- return false;
+ if ((class_loader_spec[class_loader_spec.length() - 1] != kClassLoaderClosingMark) &&
+ (class_loader_spec[class_loader_spec.length() - 1] != kClassLoaderSharedLibraryClosingMark)) {
+ return nullptr;
}
+ size_t closing_index = class_loader_spec.find_first_of(kClassLoaderClosingMark);
+
// At this point we know the format is ok; continue and extract the classpath.
// Note that class loaders with an empty class path are allowed.
std::string classpath = class_loader_spec.substr(type_str_size + 1,
- class_loader_spec.length() - type_str_size - 2);
+ closing_index - type_str_size - 1);
- class_loader_chain_.push_back(ClassLoaderInfo(class_loader_type));
+ std::unique_ptr<ClassLoaderInfo> info(new ClassLoaderInfo(class_loader_type));
if (!parse_checksums) {
- Split(classpath, kClasspathSeparator, &class_loader_chain_.back().classpath);
+ Split(classpath, kClasspathSeparator, &info->classpath);
} else {
std::vector<std::string> classpath_elements;
Split(classpath, kClasspathSeparator, &classpath_elements);
@@ -120,18 +149,37 @@ bool ClassLoaderContext::ParseClassLoaderSpec(const std::string& class_loader_sp
std::vector<std::string> dex_file_with_checksum;
Split(element, kDexFileChecksumSeparator, &dex_file_with_checksum);
if (dex_file_with_checksum.size() != 2) {
- return false;
+ return nullptr;
}
uint32_t checksum = 0;
if (!android::base::ParseUint(dex_file_with_checksum[1].c_str(), &checksum)) {
- return false;
+ return nullptr;
}
- class_loader_chain_.back().classpath.push_back(dex_file_with_checksum[0]);
- class_loader_chain_.back().checksums.push_back(checksum);
+ info->classpath.push_back(dex_file_with_checksum[0]);
+ info->checksums.push_back(checksum);
}
}
- return true;
+ if (class_loader_spec[class_loader_spec.length() - 1] == kClassLoaderSharedLibraryClosingMark) {
+ size_t start_index = class_loader_spec.find_first_of(kClassLoaderSharedLibraryOpeningMark);
+ if (start_index == std::string::npos) {
+ return nullptr;
+ }
+ std::string shared_libraries_spec =
+ class_loader_spec.substr(start_index + 1, class_loader_spec.length() - start_index - 2);
+ std::vector<std::string> shared_libraries;
+ Split(shared_libraries_spec, kClassLoaderSharedLibrarySeparator, &shared_libraries);
+ for (const std::string& shared_library_spec : shared_libraries) {
+ std::unique_ptr<ClassLoaderInfo> shared_library(
+ ParseInternal(shared_library_spec, parse_checksums));
+ if (shared_library == nullptr) {
+ return nullptr;
+ }
+ info->shared_libraries.push_back(std::move(shared_library));
+ }
+ }
+
+ return info;
}
// Extracts the class loader type from the given spec.
@@ -157,7 +205,7 @@ bool ClassLoaderContext::Parse(const std::string& spec, bool parse_checksums) {
// By default we load the dex files in a PathClassLoader.
// So an empty spec is equivalent to an empty PathClassLoader (this happens when running
// tests)
- class_loader_chain_.push_back(ClassLoaderInfo(kPathClassLoader));
+ class_loader_chain_.reset(new ClassLoaderInfo(kPathClassLoader));
return true;
}
@@ -169,21 +217,102 @@ bool ClassLoaderContext::Parse(const std::string& spec, bool parse_checksums) {
return true;
}
- std::vector<std::string> class_loaders;
- Split(spec, kClassLoaderSeparator, &class_loaders);
+ CHECK(class_loader_chain_ == nullptr);
+ class_loader_chain_.reset(ParseInternal(spec, parse_checksums));
+ return class_loader_chain_ != nullptr;
+}
- for (const std::string& class_loader : class_loaders) {
- ClassLoaderType type = ExtractClassLoaderType(class_loader);
- if (type == kInvalidClassLoader) {
- LOG(ERROR) << "Invalid class loader type: " << class_loader;
- return false;
+ClassLoaderContext::ClassLoaderInfo* ClassLoaderContext::ParseInternal(
+ const std::string& spec, bool parse_checksums) {
+ CHECK(!spec.empty());
+ CHECK_NE(spec, OatFile::kSpecialSharedLibrary);
+ std::string remaining = spec;
+ std::unique_ptr<ClassLoaderInfo> first(nullptr);
+ ClassLoaderInfo* previous_iteration = nullptr;
+ while (!remaining.empty()) {
+ std::string class_loader_spec;
+ size_t first_class_loader_separator = remaining.find_first_of(kClassLoaderSeparator);
+ size_t first_shared_library_open =
+ remaining.find_first_of(kClassLoaderSharedLibraryOpeningMark);
+ if (first_class_loader_separator == std::string::npos) {
+ // Only one class loader, for example:
+ // PCL[...]
+ class_loader_spec = remaining;
+ remaining = "";
+ } else if ((first_shared_library_open == std::string::npos) ||
+ (first_shared_library_open > first_class_loader_separator)) {
+ // We found a class loader spec without shared libraries, for example:
+ // PCL[...];PCL[...]{...}
+ class_loader_spec = remaining.substr(0, first_class_loader_separator);
+ remaining = remaining.substr(first_class_loader_separator + 1,
+ remaining.size() - first_class_loader_separator - 1);
+ } else {
+ // The class loader spec contains shared libraries. Find the matching closing
+ // shared library marker for it.
+
+ // Counter of opened shared library marker we've encountered so far.
+ uint32_t counter = 1;
+ // The index at which we're operating in the loop.
+ uint32_t string_index = first_shared_library_open + 1;
+ while (counter != 0) {
+ size_t shared_library_close =
+ remaining.find_first_of(kClassLoaderSharedLibraryClosingMark, string_index);
+ size_t shared_library_open =
+ remaining.find_first_of(kClassLoaderSharedLibraryOpeningMark, string_index);
+ if (shared_library_close == std::string::npos) {
+ // No matching closing market. Return an error.
+ LOG(ERROR) << "Invalid class loader spec: " << class_loader_spec;
+ return nullptr;
+ }
+
+ if ((shared_library_open == std::string::npos) ||
+ (shared_library_close < shared_library_open)) {
+ // We have seen a closing marker. Decrement the counter.
+ --counter;
+ if (counter == 0) {
+ // Found the matching closing marker.
+ class_loader_spec = remaining.substr(0, shared_library_close + 1);
+
+ // Compute the remaining string to analyze.
+ if (remaining.size() == shared_library_close + 1) {
+ remaining = "";
+ } else if ((remaining.size() == shared_library_close + 2) ||
+ (remaining.at(shared_library_close + 1) != kClassLoaderSeparator)) {
+ LOG(ERROR) << "Invalid class loader spec: " << class_loader_spec;
+ return nullptr;
+ } else {
+ remaining = remaining.substr(shared_library_close + 2,
+ remaining.size() - shared_library_close - 2);
+ }
+ } else {
+ // Move the search index forward.
+ string_index = shared_library_close + 1;
+ }
+ } else {
+ // New nested opening marker. Increment the counter and move the search
+ // index after the marker.
+ ++counter;
+ string_index = shared_library_open + 1;
+ }
+ }
}
- if (!ParseClassLoaderSpec(class_loader, type, parse_checksums)) {
- LOG(ERROR) << "Invalid class loader spec: " << class_loader;
- return false;
+
+ std::unique_ptr<ClassLoaderInfo> info =
+ ParseClassLoaderSpec(class_loader_spec, parse_checksums);
+ if (info == nullptr) {
+ LOG(ERROR) << "Invalid class loader spec: " << class_loader_spec;
+ return nullptr;
+ }
+ if (first == nullptr) {
+ first.reset(info.release());
+ previous_iteration = first.get();
+ } else {
+ CHECK(previous_iteration != nullptr);
+ previous_iteration->parent.reset(info.release());
+ previous_iteration = previous_iteration->parent.get();
}
}
- return true;
+ return first.release();
}
// Opens requested class path files and appends them to opened_dex_files. If the dex files have
@@ -208,9 +337,13 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
// TODO(calin): Refine the dex opening interface to be able to tell if an archive contains
// no dex files. So that we can distinguish the real failures...
const ArtDexFileLoader dex_file_loader;
- for (ClassLoaderInfo& info : class_loader_chain_) {
- size_t opened_dex_files_index = info.opened_dex_files.size();
- for (const std::string& cp_elem : info.classpath) {
+ std::vector<ClassLoaderInfo*> work_list;
+ work_list.push_back(class_loader_chain_.get());
+ while (!work_list.empty()) {
+ ClassLoaderInfo* info = work_list.back();
+ work_list.pop_back();
+ size_t opened_dex_files_index = info->opened_dex_files.size();
+ for (const std::string& cp_elem : info->classpath) {
// If path is relative, append it to the provided base directory.
std::string location = cp_elem;
if (location[0] != '/' && !classpath_dir.empty()) {
@@ -225,7 +358,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
Runtime::Current()->IsVerificationEnabled(),
/*verify_checksum=*/ true,
&error_msg,
- &info.opened_dex_files)) {
+ &info->opened_dex_files)) {
// If we fail to open the dex file because it's been stripped, try to open the dex file
// from its corresponding oat file.
// This could happen when we need to recompile a pre-build whose dex code has been stripped.
@@ -237,10 +370,10 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
std::vector<std::unique_ptr<const DexFile>> oat_dex_files;
if (oat_file != nullptr &&
OatFileAssistant::LoadDexFiles(*oat_file, location, &oat_dex_files)) {
- info.opened_oat_files.push_back(std::move(oat_file));
- info.opened_dex_files.insert(info.opened_dex_files.end(),
- std::make_move_iterator(oat_dex_files.begin()),
- std::make_move_iterator(oat_dex_files.end()));
+ info->opened_oat_files.push_back(std::move(oat_file));
+ info->opened_dex_files.insert(info->opened_dex_files.end(),
+ std::make_move_iterator(oat_dex_files.begin()),
+ std::make_move_iterator(oat_dex_files.end()));
} else {
LOG(WARNING) << "Could not open dex files from location: " << location;
dex_files_open_result_ = false;
@@ -257,14 +390,15 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
// This will allow the context to VerifyClassLoaderContextMatch which expects or multidex
// location in the class paths.
// Note that this will also remove the paths that could not be opened.
- info.original_classpath = std::move(info.classpath);
- info.classpath.clear();
- info.checksums.clear();
- for (size_t k = opened_dex_files_index; k < info.opened_dex_files.size(); k++) {
- std::unique_ptr<const DexFile>& dex = info.opened_dex_files[k];
- info.classpath.push_back(dex->GetLocation());
- info.checksums.push_back(dex->GetLocationChecksum());
+ info->original_classpath = std::move(info->classpath);
+ info->classpath.clear();
+ info->checksums.clear();
+ for (size_t k = opened_dex_files_index; k < info->opened_dex_files.size(); k++) {
+ std::unique_ptr<const DexFile>& dex = info->opened_dex_files[k];
+ info->classpath.push_back(dex->GetLocation());
+ info->checksums.push_back(dex->GetLocationChecksum());
}
+ AddToWorkList(info, work_list);
}
return dex_files_open_result_;
@@ -275,24 +409,33 @@ bool ClassLoaderContext::RemoveLocationsFromClassPaths(
CHECK(!dex_files_open_attempted_)
<< "RemoveLocationsFromClasspaths cannot be call after OpenDexFiles";
+ if (class_loader_chain_ == nullptr) {
+ return false;
+ }
+
std::set<std::string> canonical_locations;
for (const std::string& location : locations) {
canonical_locations.insert(DexFileLoader::GetDexCanonicalLocation(location.c_str()));
}
bool removed_locations = false;
- for (ClassLoaderInfo& info : class_loader_chain_) {
- size_t initial_size = info.classpath.size();
+ std::vector<ClassLoaderInfo*> work_list;
+ work_list.push_back(class_loader_chain_.get());
+ while (!work_list.empty()) {
+ ClassLoaderInfo* info = work_list.back();
+ work_list.pop_back();
+ size_t initial_size = info->classpath.size();
auto kept_it = std::remove_if(
- info.classpath.begin(),
- info.classpath.end(),
+ info->classpath.begin(),
+ info->classpath.end(),
[canonical_locations](const std::string& location) {
return ContainsElement(canonical_locations,
DexFileLoader::GetDexCanonicalLocation(location.c_str()));
});
- info.classpath.erase(kept_it, info.classpath.end());
- if (initial_size != info.classpath.size()) {
+ info->classpath.erase(kept_it, info->classpath.end());
+ if (initial_size != info->classpath.size()) {
removed_locations = true;
}
+ AddToWorkList(info, work_list);
}
return removed_locations;
}
@@ -315,11 +458,11 @@ std::string ClassLoaderContext::EncodeContext(const std::string& base_dir,
}
if (stored_context != nullptr) {
- DCHECK_EQ(class_loader_chain_.size(), stored_context->class_loader_chain_.size());
+ DCHECK_EQ(GetParentChainSize(), stored_context->GetParentChainSize());
}
std::ostringstream out;
- if (class_loader_chain_.empty()) {
+ if (class_loader_chain_ == nullptr) {
// We can get in this situation if the context was created with a class path containing the
// source dex files which were later removed (happens during run-tests).
out << GetClassLoaderTypeName(kPathClassLoader)
@@ -328,60 +471,120 @@ std::string ClassLoaderContext::EncodeContext(const std::string& base_dir,
return out.str();
}
- for (size_t i = 0; i < class_loader_chain_.size(); i++) {
- const ClassLoaderInfo& info = class_loader_chain_[i];
- if (i > 0) {
- out << kClassLoaderSeparator;
+ EncodeContextInternal(
+ *class_loader_chain_,
+ base_dir,
+ for_dex2oat,
+ (stored_context == nullptr ? nullptr : stored_context->class_loader_chain_.get()),
+ out);
+ return out.str();
+}
+
+void ClassLoaderContext::EncodeContextInternal(const ClassLoaderInfo& info,
+ const std::string& base_dir,
+ bool for_dex2oat,
+ ClassLoaderInfo* stored_info,
+ std::ostringstream& out) const {
+ out << GetClassLoaderTypeName(info.type);
+ out << kClassLoaderOpeningMark;
+ std::set<std::string> seen_locations;
+ SafeMap<std::string, std::string> remap;
+ if (stored_info != nullptr) {
+ for (size_t k = 0; k < info.original_classpath.size(); ++k) {
+ // Note that we don't care if the same name appears twice.
+ remap.Put(info.original_classpath[k], stored_info->classpath[k]);
}
- out << GetClassLoaderTypeName(info.type);
- out << kClassLoaderOpeningMark;
- std::set<std::string> seen_locations;
- SafeMap<std::string, std::string> remap;
- if (stored_context != nullptr) {
- DCHECK_EQ(info.original_classpath.size(),
- stored_context->class_loader_chain_[i].classpath.size());
- for (size_t k = 0; k < info.original_classpath.size(); ++k) {
- // Note that we don't care if the same name appears twice.
- remap.Put(info.original_classpath[k], stored_context->class_loader_chain_[i].classpath[k]);
+ }
+ for (size_t k = 0; k < info.opened_dex_files.size(); k++) {
+ const std::unique_ptr<const DexFile>& dex_file = info.opened_dex_files[k];
+ if (for_dex2oat) {
+ // dex2oat only needs the base location. It cannot accept multidex locations.
+ // So ensure we only add each file once.
+ bool new_insert = seen_locations.insert(
+ DexFileLoader::GetBaseLocation(dex_file->GetLocation())).second;
+ if (!new_insert) {
+ continue;
}
}
- for (size_t k = 0; k < info.opened_dex_files.size(); k++) {
- const std::unique_ptr<const DexFile>& dex_file = info.opened_dex_files[k];
- if (for_dex2oat) {
- // dex2oat only needs the base location. It cannot accept multidex locations.
- // So ensure we only add each file once.
- bool new_insert = seen_locations.insert(
- DexFileLoader::GetBaseLocation(dex_file->GetLocation())).second;
- if (!new_insert) {
- continue;
- }
- }
- std::string location = dex_file->GetLocation();
- // If there is a stored class loader remap, fix up the multidex strings.
- if (!remap.empty()) {
- std::string base_dex_location = DexFileLoader::GetBaseLocation(location);
- auto it = remap.find(base_dex_location);
- CHECK(it != remap.end()) << base_dex_location;
- location = it->second + DexFileLoader::GetMultiDexSuffix(location);
- }
- if (k > 0) {
- out << kClasspathSeparator;
- }
- // Find paths that were relative and convert them back from absolute.
- if (!base_dir.empty() && location.substr(0, base_dir.length()) == base_dir) {
- out << location.substr(base_dir.length() + 1).c_str();
- } else {
- out << location.c_str();
- }
- // dex2oat does not need the checksums.
- if (!for_dex2oat) {
- out << kDexFileChecksumSeparator;
- out << dex_file->GetLocationChecksum();
+ std::string location = dex_file->GetLocation();
+ // If there is a stored class loader remap, fix up the multidex strings.
+ if (!remap.empty()) {
+ std::string base_dex_location = DexFileLoader::GetBaseLocation(location);
+ auto it = remap.find(base_dex_location);
+ CHECK(it != remap.end()) << base_dex_location;
+ location = it->second + DexFileLoader::GetMultiDexSuffix(location);
+ }
+ if (k > 0) {
+ out << kClasspathSeparator;
+ }
+ // Find paths that were relative and convert them back from absolute.
+ if (!base_dir.empty() && location.substr(0, base_dir.length()) == base_dir) {
+ out << location.substr(base_dir.length() + 1).c_str();
+ } else {
+ out << location.c_str();
+ }
+ // dex2oat does not need the checksums.
+ if (!for_dex2oat) {
+ out << kDexFileChecksumSeparator;
+ out << dex_file->GetLocationChecksum();
+ }
+ }
+ out << kClassLoaderClosingMark;
+
+ if (!info.shared_libraries.empty()) {
+ out << kClassLoaderSharedLibraryOpeningMark;
+ for (uint32_t i = 0; i < info.shared_libraries.size(); ++i) {
+ if (i > 0) {
+ out << kClassLoaderSharedLibrarySeparator;
}
+ EncodeContextInternal(
+ *info.shared_libraries[i].get(),
+ base_dir,
+ for_dex2oat,
+ (stored_info == nullptr ? nullptr : stored_info->shared_libraries[i].get()),
+ out);
}
- out << kClassLoaderClosingMark;
+ out << kClassLoaderSharedLibraryClosingMark;
}
- return out.str();
+ if (info.parent != nullptr) {
+ out << kClassLoaderSeparator;
+ EncodeContextInternal(
+ *info.parent.get(),
+ base_dir,
+ for_dex2oat,
+ (stored_info == nullptr ? nullptr : stored_info->parent.get()),
+ out);
+ }
+}
+
+// Returns the WellKnownClass for the given class loader type.
+static jclass GetClassLoaderClass(ClassLoaderContext::ClassLoaderType type) {
+ switch (type) {
+ case ClassLoaderContext::kPathClassLoader:
+ return WellKnownClasses::dalvik_system_PathClassLoader;
+ case ClassLoaderContext::kDelegateLastClassLoader:
+ return WellKnownClasses::dalvik_system_DelegateLastClassLoader;
+ case ClassLoaderContext::kInvalidClassLoader: break; // will fail after the switch.
+ }
+ LOG(FATAL) << "Invalid class loader type " << type;
+ UNREACHABLE();
+}
+
+static jobject CreateClassLoaderInternal(Thread* self,
+ const ClassLoaderContext::ClassLoaderInfo& info)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK(info.shared_libraries.empty()) << "Class loader shared library not implemented yet";
+ jobject parent = nullptr;
+ if (info.parent != nullptr) {
+ parent = CreateClassLoaderInternal(self, *info.parent.get());
+ }
+ std::vector<const DexFile*> class_path_files = MakeNonOwningPointerVector(
+ info.opened_dex_files);
+ return Runtime::Current()->GetClassLinker()->CreateWellKnownClassLoader(
+ self,
+ class_path_files,
+ GetClassLoaderClass(info.type),
+ parent);
}
jobject ClassLoaderContext::CreateClassLoader(
@@ -393,22 +596,14 @@ jobject ClassLoaderContext::CreateClassLoader(
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- if (class_loader_chain_.empty()) {
+ if (class_loader_chain_ == nullptr) {
return class_linker->CreatePathClassLoader(self, compilation_sources);
}
- // Create the class loaders starting from the top most parent (the one on the last position
- // in the chain) but omit the first class loader which will contain the compilation_sources and
- // needs special handling.
- jobject current_parent = nullptr; // the starting parent is the BootClassLoader.
- for (size_t i = class_loader_chain_.size() - 1; i > 0; i--) {
- std::vector<const DexFile*> class_path_files = MakeNonOwningPointerVector(
- class_loader_chain_[i].opened_dex_files);
- current_parent = class_linker->CreateWellKnownClassLoader(
- self,
- class_path_files,
- GetClassLoaderClass(class_loader_chain_[i].type),
- current_parent);
+ // Create the class loader of the parent.
+ jobject parent = nullptr;
+ if (class_loader_chain_->parent != nullptr) {
+ parent = CreateClassLoaderInternal(self, *class_loader_chain_->parent.get());
}
// We set up all the parents. Move on to create the first class loader.
@@ -416,26 +611,34 @@ jobject ClassLoaderContext::CreateClassLoader(
// we need to resolve classes from it the classpath elements come first.
std::vector<const DexFile*> first_class_loader_classpath = MakeNonOwningPointerVector(
- class_loader_chain_[0].opened_dex_files);
+ class_loader_chain_->opened_dex_files);
first_class_loader_classpath.insert(first_class_loader_classpath.end(),
- compilation_sources.begin(),
- compilation_sources.end());
+ compilation_sources.begin(),
+ compilation_sources.end());
return class_linker->CreateWellKnownClassLoader(
self,
first_class_loader_classpath,
- GetClassLoaderClass(class_loader_chain_[0].type),
- current_parent);
+ GetClassLoaderClass(class_loader_chain_->type),
+ parent);
}
std::vector<const DexFile*> ClassLoaderContext::FlattenOpenedDexFiles() const {
CheckDexFilesOpened("FlattenOpenedDexFiles");
std::vector<const DexFile*> result;
- for (const ClassLoaderInfo& info : class_loader_chain_) {
- for (const std::unique_ptr<const DexFile>& dex_file : info.opened_dex_files) {
+ if (class_loader_chain_ == nullptr) {
+ return result;
+ }
+ std::vector<ClassLoaderInfo*> work_list;
+ work_list.push_back(class_loader_chain_.get());
+ while (!work_list.empty()) {
+ ClassLoaderInfo* info = work_list.back();
+ work_list.pop_back();
+ for (const std::unique_ptr<const DexFile>& dex_file : info->opened_dex_files) {
result.push_back(dex_file.get());
}
+ AddToWorkList(info, work_list);
}
return result;
}
@@ -632,12 +835,21 @@ bool ClassLoaderContext::AddInfoToContextFromClassLoader(
GetDexFilesFromDexElementsArray(soa, dex_elements, &dex_files_loaded);
}
- class_loader_chain_.push_back(ClassLoaderContext::ClassLoaderInfo(type));
- ClassLoaderInfo& info = class_loader_chain_.back();
+ ClassLoaderInfo* info = new ClassLoaderContext::ClassLoaderInfo(type);
+ if (class_loader_chain_ == nullptr) {
+ class_loader_chain_.reset(info);
+ } else {
+ ClassLoaderInfo* child = class_loader_chain_.get();
+ while (child->parent != nullptr) {
+ child = child->parent.get();
+ }
+ child->parent.reset(info);
+ }
+
for (const DexFile* dex_file : dex_files_loaded) {
- info.classpath.push_back(dex_file->GetLocation());
- info.checksums.push_back(dex_file->GetLocationChecksum());
- info.opened_dex_files.emplace_back(dex_file);
+ info->classpath.push_back(dex_file->GetLocation());
+ info->checksums.push_back(dex_file->GetLocationChecksum());
+ info->opened_dex_files.emplace_back(dex_file);
}
// We created the ClassLoaderInfo for the current loader. Move on to its parent.
@@ -696,7 +908,7 @@ ClassLoaderContext::VerificationResult ClassLoaderContext::VerifyClassLoaderCont
// collision check.
if (expected_context.special_shared_library_) {
// Special case where we are the only entry in the class path.
- if (class_loader_chain_.size() == 1 && class_loader_chain_[0].classpath.size() == 0) {
+ if (class_loader_chain_->parent == nullptr && class_loader_chain_->classpath.size() == 0) {
return VerificationResult::kVerifies;
}
return VerificationResult::kForcedToSkipChecks;
@@ -704,41 +916,43 @@ ClassLoaderContext::VerificationResult ClassLoaderContext::VerifyClassLoaderCont
return VerificationResult::kForcedToSkipChecks;
}
- if (expected_context.class_loader_chain_.size() != class_loader_chain_.size()) {
- LOG(WARNING) << "ClassLoaderContext size mismatch. expected="
- << expected_context.class_loader_chain_.size()
- << ", actual=" << class_loader_chain_.size()
- << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+ ClassLoaderInfo* info = class_loader_chain_.get();
+ ClassLoaderInfo* expected = expected_context.class_loader_chain_.get();
+ CHECK(info != nullptr);
+ CHECK(expected != nullptr);
+ if (!ClassLoaderInfoMatch(*info, *expected, context_spec, verify_names, verify_checksums)) {
return VerificationResult::kMismatch;
}
+ return VerificationResult::kVerifies;
+}
- for (size_t i = 0; i < class_loader_chain_.size(); i++) {
- const ClassLoaderInfo& info = class_loader_chain_[i];
- const ClassLoaderInfo& expected_info = expected_context.class_loader_chain_[i];
- if (info.type != expected_info.type) {
- LOG(WARNING) << "ClassLoaderContext type mismatch for position " << i
- << ". expected=" << GetClassLoaderTypeName(expected_info.type)
- << ", found=" << GetClassLoaderTypeName(info.type)
+bool ClassLoaderContext::ClassLoaderInfoMatch(
+ const ClassLoaderInfo& info,
+ const ClassLoaderInfo& expected_info,
+ const std::string& context_spec,
+ bool verify_names,
+ bool verify_checksums) const {
+ if (info.type != expected_info.type) {
+ LOG(WARNING) << "ClassLoaderContext type mismatch"
+ << ". expected=" << GetClassLoaderTypeName(expected_info.type)
+ << ", found=" << GetClassLoaderTypeName(info.type)
+ << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+ return false;
+ }
+ if (info.classpath.size() != expected_info.classpath.size()) {
+ LOG(WARNING) << "ClassLoaderContext classpath size mismatch"
+ << ". expected=" << expected_info.classpath.size()
+ << ", found=" << info.classpath.size()
<< " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
- return VerificationResult::kMismatch;
- }
- if (info.classpath.size() != expected_info.classpath.size()) {
- LOG(WARNING) << "ClassLoaderContext classpath size mismatch for position " << i
- << ". expected=" << expected_info.classpath.size()
- << ", found=" << info.classpath.size()
- << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
- return VerificationResult::kMismatch;
- }
-
- if (verify_checksums) {
- DCHECK_EQ(info.classpath.size(), info.checksums.size());
- DCHECK_EQ(expected_info.classpath.size(), expected_info.checksums.size());
- }
+ return false;
+ }
- if (!verify_names) {
- continue;
- }
+ if (verify_checksums) {
+ DCHECK_EQ(info.classpath.size(), info.checksums.size());
+ DCHECK_EQ(expected_info.classpath.size(), expected_info.checksums.size());
+ }
+ if (verify_names) {
for (size_t k = 0; k < info.classpath.size(); k++) {
// Compute the dex location that must be compared.
// We shouldn't do a naive comparison `info.classpath[k] == expected_info.classpath[k]`
@@ -778,34 +992,58 @@ ClassLoaderContext::VerificationResult ClassLoaderContext::VerifyClassLoaderCont
// Compare the locations.
if (dex_name != expected_dex_name) {
- LOG(WARNING) << "ClassLoaderContext classpath element mismatch for position " << i
+ LOG(WARNING) << "ClassLoaderContext classpath element mismatch"
<< ". expected=" << expected_info.classpath[k]
<< ", found=" << info.classpath[k]
<< " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
- return VerificationResult::kMismatch;
+ return false;
}
// Compare the checksums.
if (info.checksums[k] != expected_info.checksums[k]) {
- LOG(WARNING) << "ClassLoaderContext classpath element checksum mismatch for position " << i
+ LOG(WARNING) << "ClassLoaderContext classpath element checksum mismatch"
<< ". expected=" << expected_info.checksums[k]
<< ", found=" << info.checksums[k]
<< " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
- return VerificationResult::kMismatch;
+ return false;
}
}
}
- return VerificationResult::kVerifies;
-}
-jclass ClassLoaderContext::GetClassLoaderClass(ClassLoaderType type) {
- switch (type) {
- case kPathClassLoader: return WellKnownClasses::dalvik_system_PathClassLoader;
- case kDelegateLastClassLoader: return WellKnownClasses::dalvik_system_DelegateLastClassLoader;
- case kInvalidClassLoader: break; // will fail after the switch.
+ if (info.shared_libraries.size() != expected_info.shared_libraries.size()) {
+ LOG(WARNING) << "ClassLoaderContext shared library size mismatch. "
+ << "Expected=" << expected_info.classpath.size()
+ << ", found=" << info.classpath.size()
+ << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+ return false;
+ }
+ for (size_t i = 0; i < info.shared_libraries.size(); ++i) {
+ if (!ClassLoaderInfoMatch(*info.shared_libraries[i].get(),
+ *expected_info.shared_libraries[i].get(),
+ context_spec,
+ verify_names,
+ verify_checksums)) {
+ return false;
+ }
+ }
+ if (info.parent.get() == nullptr) {
+ if (expected_info.parent.get() != nullptr) {
+ LOG(WARNING) << "ClassLoaderContext parent mismatch. "
+ << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+ return false;
+ }
+ return true;
+ } else if (expected_info.parent.get() == nullptr) {
+ LOG(WARNING) << "ClassLoaderContext parent mismatch. "
+ << " (" << context_spec << " | " << EncodeContextForOatFile("") << ")";
+ return false;
+ } else {
+ return ClassLoaderInfoMatch(*info.parent.get(),
+ *expected_info.parent.get(),
+ context_spec,
+ verify_names,
+ verify_checksums);
}
- LOG(FATAL) << "Invalid class loader type " << type;
- UNREACHABLE();
}
} // namespace art
diff --git a/runtime/class_loader_context.h b/runtime/class_loader_context.h
index a4268aa09a..37cef8196d 100644
--- a/runtime/class_loader_context.h
+++ b/runtime/class_loader_context.h
@@ -154,10 +154,11 @@ class ClassLoaderContext {
// This will return a context with a single and empty PathClassLoader.
static std::unique_ptr<ClassLoaderContext> Default();
- private:
struct ClassLoaderInfo {
// The type of this class loader.
ClassLoaderType type;
+ // Shared libraries this context has.
+ std::vector<std::unique_ptr<ClassLoaderInfo>> shared_libraries;
// The list of class path elements that this loader loads.
// Note that this list may contain relative paths.
std::vector<std::string> classpath;
@@ -171,13 +172,35 @@ class ClassLoaderContext {
// After OpenDexFiles, in case some of the dex files were opened from their oat files
// this holds the list of opened oat files.
std::vector<std::unique_ptr<OatFile>> opened_oat_files;
+ // The parent class loader.
+ std::unique_ptr<ClassLoaderInfo> parent;
explicit ClassLoaderInfo(ClassLoaderType cl_type) : type(cl_type) {}
};
+ private:
// Creates an empty context (with no class loaders).
ClassLoaderContext();
+ // Get the parent of the class loader chain at depth `index`.
+ ClassLoaderInfo* GetParent(size_t index) const {
+ ClassLoaderInfo* result = class_loader_chain_.get();
+ while ((result != nullptr) && (index-- != 0)) {
+ result = result->parent.get();
+ }
+ return result;
+ }
+
+ size_t GetParentChainSize() const {
+ size_t result = 0;
+ ClassLoaderInfo* info = class_loader_chain_.get();
+ while (info != nullptr) {
+ ++result;
+ info = info->parent.get();
+ }
+ return result;
+ }
+
// Constructs an empty context.
// `owns_the_dex_files` specifies whether or not the context will own the opened dex files
// present in the class loader chain. If `owns_the_dex_files` is true then OpenDexFiles cannot
@@ -188,13 +211,13 @@ class ClassLoaderContext {
// Reads the class loader spec in place and returns true if the spec is valid and the
// compilation context was constructed.
bool Parse(const std::string& spec, bool parse_checksums = false);
+ ClassLoaderInfo* ParseInternal(const std::string& spec, bool parse_checksums);
- // Attempts to parse a single class loader spec for the given class_loader_type.
- // If successful the class loader spec will be added to the chain.
- // Returns whether or not the operation was successful.
- bool ParseClassLoaderSpec(const std::string& class_loader_spec,
- ClassLoaderType class_loader_type,
- bool parse_checksums = false);
+ // Attempts to parse a single class loader spec.
+ // Returns the ClassLoaderInfo abstraction for this spec, or null if it cannot be parsed.
+ std::unique_ptr<ClassLoaderInfo> ParseClassLoaderSpec(
+ const std::string& class_loader_spec,
+ bool parse_checksums = false);
// CHECKs that the dex files were opened (OpenDexFiles was called and set dex_files_open_result_
// to true). Aborts if not. The `calling_method` is used in the log message to identify the source
@@ -219,6 +242,20 @@ class ClassLoaderContext {
bool for_dex2oat,
ClassLoaderContext* stored_context) const;
+ // Internal version of `EncodeContext`, which will be called recursively
+ // on the parent and shared libraries.
+ void EncodeContextInternal(const ClassLoaderInfo& info,
+ const std::string& base_dir,
+ bool for_dex2oat,
+ ClassLoaderInfo* stored_info,
+ std::ostringstream& out) const;
+
+ bool ClassLoaderInfoMatch(const ClassLoaderInfo& info,
+ const ClassLoaderInfo& expected_info,
+ const std::string& context_spec,
+ bool verify_names,
+ bool verify_checksums) const;
+
// Extracts the class loader type from the given spec.
// Return ClassLoaderContext::kInvalidClassLoader if the class loader type is not
// recognized.
@@ -228,13 +265,8 @@ class ClassLoaderContext {
// The returned format can be used when parsing a context spec.
static const char* GetClassLoaderTypeName(ClassLoaderType type);
- // Returns the WellKnownClass for the given class loader type.
- static jclass GetClassLoaderClass(ClassLoaderType type);
-
- // The class loader chain represented as a vector.
- // The parent of class_loader_chain_[i] is class_loader_chain_[i++].
- // The parent of the last element is assumed to be the boot class loader.
- std::vector<ClassLoaderInfo> class_loader_chain_;
+ // The class loader chain.
+ std::unique_ptr<ClassLoaderInfo> class_loader_chain_;
// Whether or not the class loader context should be ignored at runtime when loading the oat
// files. When true, dex2oat will use OatFile::kSpecialSharedLibrary as the classpath key in
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index ea624f1e9c..cb3dc6506f 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -40,7 +40,7 @@ class ClassLoaderContextTest : public CommonRuntimeTest {
public:
void VerifyContextSize(ClassLoaderContext* context, size_t expected_size) {
ASSERT_TRUE(context != nullptr);
- ASSERT_EQ(expected_size, context->class_loader_chain_.size());
+ ASSERT_EQ(expected_size, context->GetParentChainSize());
}
void VerifyClassLoaderPCL(ClassLoaderContext* context,
@@ -57,6 +57,33 @@ class ClassLoaderContextTest : public CommonRuntimeTest {
context, index, ClassLoaderContext::kDelegateLastClassLoader, classpath);
}
+ void VerifyClassLoaderSharedLibraryPCL(ClassLoaderContext* context,
+ size_t loader_index,
+ size_t shared_library_index,
+ const std::string& classpath) {
+ VerifyClassLoaderInfoSL(
+ context, loader_index, shared_library_index, ClassLoaderContext::kPathClassLoader,
+ classpath);
+ }
+
+ void VerifySharedLibrariesSize(ClassLoaderContext* context,
+ size_t loader_index,
+ size_t expected_size) {
+ ASSERT_TRUE(context != nullptr);
+ ASSERT_GT(context->GetParentChainSize(), loader_index);
+ const ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(loader_index);
+ ASSERT_EQ(info.shared_libraries.size(), expected_size);
+ }
+
+ void VerifyClassLoaderSharedLibraryDLC(ClassLoaderContext* context,
+ size_t loader_index,
+ size_t shared_library_index,
+ const std::string& classpath) {
+ VerifyClassLoaderInfoSL(
+ context, loader_index, shared_library_index, ClassLoaderContext::kDelegateLastClassLoader,
+ classpath);
+ }
+
void VerifyClassLoaderPCLFromTestDex(ClassLoaderContext* context,
size_t index,
const std::string& test_name) {
@@ -91,7 +118,7 @@ class ClassLoaderContextTest : public CommonRuntimeTest {
ASSERT_TRUE(context != nullptr);
ASSERT_TRUE(context->dex_files_open_attempted_);
ASSERT_TRUE(context->dex_files_open_result_);
- ClassLoaderContext::ClassLoaderInfo& info = context->class_loader_chain_[index];
+ ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(index);
ASSERT_EQ(all_dex_files->size(), info.classpath.size());
ASSERT_EQ(all_dex_files->size(), info.opened_dex_files.size());
size_t cur_open_dex_index = 0;
@@ -168,14 +195,31 @@ class ClassLoaderContextTest : public CommonRuntimeTest {
ClassLoaderContext::ClassLoaderType type,
const std::string& classpath) {
ASSERT_TRUE(context != nullptr);
- ASSERT_GT(context->class_loader_chain_.size(), index);
- ClassLoaderContext::ClassLoaderInfo& info = context->class_loader_chain_[index];
+ ASSERT_GT(context->GetParentChainSize(), index);
+ ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(index);
ASSERT_EQ(type, info.type);
std::vector<std::string> expected_classpath;
Split(classpath, ':', &expected_classpath);
ASSERT_EQ(expected_classpath, info.classpath);
}
+ void VerifyClassLoaderInfoSL(ClassLoaderContext* context,
+ size_t loader_index,
+ size_t shared_library_index,
+ ClassLoaderContext::ClassLoaderType type,
+ const std::string& classpath) {
+ ASSERT_TRUE(context != nullptr);
+ ASSERT_GT(context->GetParentChainSize(), loader_index);
+ const ClassLoaderContext::ClassLoaderInfo& info = *context->GetParent(loader_index);
+ ASSERT_GT(info.shared_libraries.size(), shared_library_index);
+ const ClassLoaderContext::ClassLoaderInfo& sl =
+ *info.shared_libraries[shared_library_index].get();
+ ASSERT_EQ(type, info.type);
+ std::vector<std::string> expected_classpath;
+ Split(classpath, ':', &expected_classpath);
+ ASSERT_EQ(expected_classpath, sl.classpath);
+ }
+
void VerifyClassLoaderFromTestDex(ClassLoaderContext* context,
size_t index,
ClassLoaderContext::ClassLoaderType type,
@@ -223,6 +267,23 @@ TEST_F(ClassLoaderContextTest, ParseValidContextChain) {
VerifyClassLoaderPCL(context.get(), 2, "e.dex");
}
+TEST_F(ClassLoaderContextTest, ParseSharedLibraries) {
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(
+ "PCL[a.dex:b.dex]{PCL[s1.dex]#PCL[s2.dex:s3.dex]};DLC[c.dex:d.dex]{DLC[s4.dex]}");
+ VerifyContextSize(context.get(), 2);
+ VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "s1.dex");
+ VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 1, "s2.dex:s3.dex");
+ VerifyClassLoaderDLC(context.get(), 1, "c.dex:d.dex");
+ VerifyClassLoaderSharedLibraryDLC(context.get(), 1, 0, "s4.dex");
+}
+
+TEST_F(ClassLoaderContextTest, ParseEnclosingSharedLibraries) {
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(
+ "PCL[a.dex:b.dex]{PCL[s1.dex]{PCL[s2.dex:s3.dex];PCL[s4.dex]}}");
+ VerifyContextSize(context.get(), 1);
+ VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "s1.dex");
+}
+
TEST_F(ClassLoaderContextTest, ParseValidEmptyContextDLC) {
std::unique_ptr<ClassLoaderContext> context =
ClassLoaderContext::Create("DLC[]");
@@ -230,6 +291,13 @@ TEST_F(ClassLoaderContextTest, ParseValidEmptyContextDLC) {
VerifyClassLoaderDLC(context.get(), 0, "");
}
+TEST_F(ClassLoaderContextTest, ParseValidEmptyContextSharedLibrary) {
+ std::unique_ptr<ClassLoaderContext> context =
+ ClassLoaderContext::Create("DLC[]{}");
+ VerifyContextSize(context.get(), 1);
+ VerifySharedLibrariesSize(context.get(), 0, 0);
+}
+
TEST_F(ClassLoaderContextTest, ParseValidContextSpecialSymbol) {
std::unique_ptr<ClassLoaderContext> context =
ClassLoaderContext::Create(OatFile::kSpecialSharedLibrary);
@@ -243,6 +311,11 @@ TEST_F(ClassLoaderContextTest, ParseInvalidValidContexts) {
ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCLa.dex]"));
ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL{a.dex}"));
ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL[a.dex];DLC[b.dex"));
+ ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL[a.dex]{ABC};DLC[b.dex"));
+ ASSERT_TRUE(nullptr == ClassLoaderContext::Create("PCL[a.dex]{};DLC[b.dex"));
+ ASSERT_TRUE(nullptr == ClassLoaderContext::Create("DLC[s4.dex]}"));
+ ASSERT_TRUE(nullptr == ClassLoaderContext::Create("DLC[s4.dex]{"));
+ ASSERT_TRUE(nullptr == ClassLoaderContext::Create("DLC{DLC[s4.dex]}"));
}
TEST_F(ClassLoaderContextTest, OpenInvalidDexFiles) {
@@ -662,6 +735,62 @@ TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatch) {
ClassLoaderContext::VerificationResult::kMismatch);
}
+TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchWithSL) {
+ std::string context_spec =
+ "PCL[a.dex*123:b.dex*456]{PCL[d.dex*321];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999]}"
+ ";DLC[c.dex*890]";
+ std::unique_ptr<ClassLoaderContext> context = ParseContextWithChecksums(context_spec);
+ // Pretend that we successfully open the dex files to pass the DCHECKS.
+ // (as it's much easier to test all the corner cases without relying on actual dex files).
+ PretendContextOpenedDexFiles(context.get());
+
+ VerifyContextSize(context.get(), 2);
+ VerifyClassLoaderPCL(context.get(), 0, "a.dex:b.dex");
+ VerifyClassLoaderDLC(context.get(), 1, "c.dex");
+ VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 0, "d.dex");
+ VerifyClassLoaderSharedLibraryPCL(context.get(), 0, 1, "f.dex:g.dex");
+
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(context_spec),
+ ClassLoaderContext::VerificationResult::kVerifies);
+
+ std::string wrong_class_loader_type =
+ "PCL[a.dex*123:b.dex*456]{DLC[d.dex*321];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999]}"
+ ";DLC[c.dex*890]";
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_class_loader_type),
+ ClassLoaderContext::VerificationResult::kMismatch);
+
+ std::string wrong_class_loader_order =
+ "PCL[a.dex*123:b.dex*456]{PCL[f.dex#098:g.dex#999}#PCL[d.dex*321];PCL[e.dex*654]}"
+ ";DLC[c.dex*890]";
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_class_loader_order),
+ ClassLoaderContext::VerificationResult::kMismatch);
+
+ std::string wrong_classpath_order =
+ "PCL[a.dex*123:b.dex*456]{PCL[d.dex*321];PCL[e.dex*654]#PCL[g.dex*999:f.dex*098]}"
+ ";DLC[c.dex*890]";
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_classpath_order),
+ ClassLoaderContext::VerificationResult::kMismatch);
+
+ std::string wrong_checksum =
+ "PCL[a.dex*123:b.dex*456]{PCL[d.dex*333];PCL[e.dex*654]#PCL[g.dex*999:f.dex*098]}"
+ ";DLC[c.dex*890]";
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_checksum),
+ ClassLoaderContext::VerificationResult::kMismatch);
+
+ std::string wrong_extra_class_loader =
+ "PCL[a.dex*123:b.dex*456]"
+ "{PCL[d.dex*321];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999];PCL[i.dex#444]}"
+ ";DLC[c.dex*890]";
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_extra_class_loader),
+ ClassLoaderContext::VerificationResult::kMismatch);
+
+ std::string wrong_extra_classpath =
+ "PCL[a.dex*123:b.dex*456]{PCL[d.dex*321:i.dex#444];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999]}"
+ ";DLC[c.dex*890]";
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(wrong_extra_classpath),
+ ClassLoaderContext::VerificationResult::kMismatch);
+}
+
TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchAfterEncoding) {
jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
jobject class_loader_b = LoadDexInDelegateLastClassLoader("ForClassLoaderB", class_loader_a);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index e0bbf43622..26a8d1310b 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1658,7 +1658,9 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
<< " type=" << to_ref->PrettyTypeOf()
<< " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
<< " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
- << " region_type=" << rtype;
+ << " region_type=" << rtype
+ // TODO: Temporary; remove this when this is no longer needed (b/116087961).
+ << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
}
bool add_to_live_bytes = false;
// Invariant: There should be no object from a newly-allocated
@@ -1702,7 +1704,9 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
<< " type=" << to_ref->PrettyTypeOf()
<< " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
<< " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
- << " region_type=" << rtype;
+ << " region_type=" << rtype
+ // TODO: Temporary; remove this when this is no longer needed (b/116087961).
+ << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
}
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
mirror::Object* referent = nullptr;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 96a2cea39f..b46abfbf6e 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1241,7 +1241,7 @@ class ImageSpace::Loader {
for (GcRoot<mirror::String>& root : strings) {
root = GcRoot<mirror::String>(fixup_adapter(root.Read<kWithoutReadBarrier>()));
}
- });
+ }, /*is_boot_image=*/ false);
}
}
if (VLOG_IS_ON(image)) {
diff --git a/runtime/handle.h b/runtime/handle.h
index 18e503d1de..b13c43ef34 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -62,8 +62,9 @@ class Handle : public ValueObject {
return down_cast<T*>(reference_->AsMirrorPtr());
}
- ALWAYS_INLINE bool IsNull() const REQUIRES_SHARED(Locks::mutator_lock_) {
- return Get() == nullptr;
+ ALWAYS_INLINE bool IsNull() const {
+ // It's safe to null-check it without a read barrier.
+ return reference_->IsNull();
}
ALWAYS_INLINE jobject ToJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/image.cc b/runtime/image.cc
index 376742afbc..59ac283779 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '6', '\0' }; // Add metadata section.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '7', '\0' }; // Added CRC32 intrinsic
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/intern_table-inl.h b/runtime/intern_table-inl.h
index 8c7fb42952..6fc53e9f20 100644
--- a/runtime/intern_table-inl.h
+++ b/runtime/intern_table-inl.h
@@ -29,26 +29,34 @@ inline void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_spa
const Visitor& visitor) {
DCHECK(image_space != nullptr);
// Only add if we have the interned strings section.
- const ImageSection& section = image_space->GetImageHeader().GetInternedStringsSection();
+ const ImageHeader& header = image_space->GetImageHeader();
+ const ImageSection& section = header.GetInternedStringsSection();
if (section.Size() > 0) {
- AddTableFromMemory(image_space->Begin() + section.Offset(), visitor);
+ AddTableFromMemory(image_space->Begin() + section.Offset(), visitor, !header.IsAppImage());
}
}
template <typename Visitor>
-inline size_t InternTable::AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor) {
+inline size_t InternTable::AddTableFromMemory(const uint8_t* ptr,
+ const Visitor& visitor,
+ bool is_boot_image) {
size_t read_count = 0;
UnorderedSet set(ptr, /*make copy*/false, &read_count);
- // Visit the unordered set, may remove elements.
- visitor(set);
- if (!set.empty()) {
+ {
+ // Hold the lock while calling the visitor to prevent possible race
+ // conditions with another thread adding intern strings.
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- strong_interns_.AddInternStrings(std::move(set));
+ // Visit the unordered set, may remove elements.
+ visitor(set);
+ if (!set.empty()) {
+ strong_interns_.AddInternStrings(std::move(set), is_boot_image);
+ }
}
return read_count;
}
-inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings) {
+inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings,
+ bool is_boot_image) {
static constexpr bool kCheckDuplicates = kIsDebugBuild;
if (kCheckDuplicates) {
// Avoid doing read barriers since the space might not yet be added to the heap.
@@ -60,7 +68,50 @@ inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings)
}
}
// Insert at the front since we add new interns into the back.
- tables_.insert(tables_.begin(), std::move(intern_strings));
+ tables_.insert(tables_.begin(),
+ InternalTable(std::move(intern_strings), is_boot_image));
+}
+
+template <typename Visitor>
+inline void InternTable::VisitInterns(const Visitor& visitor,
+ bool visit_boot_images,
+ bool visit_non_boot_images) {
+ auto visit_tables = [&](std::vector<Table::InternalTable>& tables)
+ NO_THREAD_SAFETY_ANALYSIS {
+ for (Table::InternalTable& table : tables) {
+ // Determine if we want to visit the table based on the flags..
+ const bool visit =
+ (visit_boot_images && table.IsBootImage()) ||
+ (visit_non_boot_images && !table.IsBootImage());
+ if (visit) {
+ for (auto& intern : table.set_) {
+ visitor(intern);
+ }
+ }
+ }
+ };
+ visit_tables(strong_interns_.tables_);
+ visit_tables(weak_interns_.tables_);
+}
+
+inline size_t InternTable::CountInterns(bool visit_boot_images,
+ bool visit_non_boot_images) const {
+ size_t ret = 0u;
+ auto visit_tables = [&](const std::vector<Table::InternalTable>& tables)
+ NO_THREAD_SAFETY_ANALYSIS {
+ for (const Table::InternalTable& table : tables) {
+ // Determine if we want to visit the table based on the flags..
+ const bool visit =
+ (visit_boot_images && table.IsBootImage()) ||
+ (visit_non_boot_images && !table.IsBootImage());
+ if (visit) {
+ ret += table.set_.size();
+ }
+ }
+ };
+ visit_tables(strong_interns_.tables_);
+ visit_tables(weak_interns_.tables_);
+ return ret;
}
} // namespace art
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2449121548..9ac9927cf4 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -351,22 +351,22 @@ size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
UnorderedSet combined;
if (tables_.size() > 1) {
table_to_write = &combined;
- for (UnorderedSet& table : tables_) {
- for (GcRoot<mirror::String>& string : table) {
+ for (InternalTable& table : tables_) {
+ for (GcRoot<mirror::String>& string : table.set_) {
combined.insert(string);
}
}
} else {
- table_to_write = &tables_.back();
+ table_to_write = &tables_.back().set_;
}
return table_to_write->WriteToMemory(ptr);
}
void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
- for (UnorderedSet& table : tables_) {
- auto it = table.find(GcRoot<mirror::String>(s));
- if (it != table.end()) {
- table.erase(it);
+ for (InternalTable& table : tables_) {
+ auto it = table.set_.find(GcRoot<mirror::String>(s));
+ if (it != table.set_.end()) {
+ table.set_.erase(it);
return;
}
}
@@ -375,9 +375,9 @@ void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- for (UnorderedSet& table : tables_) {
- auto it = table.find(GcRoot<mirror::String>(s));
- if (it != table.end()) {
+ for (InternalTable& table : tables_) {
+ auto it = table.set_.find(GcRoot<mirror::String>(s));
+ if (it != table.set_.end()) {
return it->Read();
}
}
@@ -386,9 +386,9 @@ ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- for (UnorderedSet& table : tables_) {
- auto it = table.find(string);
- if (it != table.end()) {
+ for (InternalTable& table : tables_) {
+ auto it = table.set_.find(string);
+ if (it != table.set_.end()) {
return it->Read();
}
}
@@ -396,29 +396,29 @@ ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
}
void InternTable::Table::AddNewTable() {
- tables_.push_back(UnorderedSet());
+ tables_.push_back(InternalTable());
}
void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
// Always insert the last table, the image tables are before and we avoid inserting into these
// to prevent dirty pages.
DCHECK(!tables_.empty());
- tables_.back().insert(GcRoot<mirror::String>(s));
+ tables_.back().set_.insert(GcRoot<mirror::String>(s));
}
void InternTable::Table::VisitRoots(RootVisitor* visitor) {
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootInternedString));
- for (UnorderedSet& table : tables_) {
- for (auto& intern : table) {
+ for (InternalTable& table : tables_) {
+ for (auto& intern : table.set_) {
buffered_visitor.VisitRoot(intern);
}
}
}
void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
- for (UnorderedSet& table : tables_) {
- SweepWeaks(&table, visitor);
+ for (InternalTable& table : tables_) {
+ SweepWeaks(&table.set_, visitor);
}
}
@@ -440,8 +440,8 @@ size_t InternTable::Table::Size() const {
return std::accumulate(tables_.begin(),
tables_.end(),
0U,
- [](size_t sum, const UnorderedSet& set) {
- return sum + set.size();
+ [](size_t sum, const InternalTable& table) {
+ return sum + table.Size();
});
}
@@ -460,10 +460,10 @@ void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
InternTable::Table::Table() {
Runtime* const runtime = Runtime::Current();
- // Initial table.
- tables_.push_back(UnorderedSet());
- tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
- runtime->GetHashTableMaxLoadFactor());
+ InternalTable initial_table;
+ initial_table.set_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+ tables_.push_back(std::move(initial_table));
}
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 1bc89a1048..165d56cf66 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -145,11 +145,15 @@ class InternTable {
ObjPtr<mirror::String> LookupStrong(Thread* self, uint32_t utf16_length, const char* utf8_data)
REQUIRES(!Locks::intern_table_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::String> LookupStrongLocked(ObjPtr<mirror::String> s)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Lookup a weak intern, returns null if not found.
ObjPtr<mirror::String> LookupWeak(Thread* self, ObjPtr<mirror::String> s)
REQUIRES(!Locks::intern_table_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::String> LookupWeakLocked(ObjPtr<mirror::String> s)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Total number of interned strings.
size_t Size() const REQUIRES(!Locks::intern_table_lock_);
@@ -163,6 +167,17 @@ class InternTable {
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ // Visit all of the interns in the table.
+ template <typename Visitor>
+ void VisitInterns(const Visitor& visitor,
+ bool visit_boot_images,
+ bool visit_non_boot_images)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+
+ // Count the number of intern strings in the table.
+ size_t CountInterns(bool visit_boot_images, bool visit_non_boot_images) const
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+
void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
void BroadcastForNewInterns();
@@ -194,6 +209,33 @@ class InternTable {
// weak interns and strong interns.
class Table {
public:
+ class InternalTable {
+ public:
+ InternalTable() = default;
+ InternalTable(UnorderedSet&& set, bool is_boot_image)
+ : set_(std::move(set)), is_boot_image_(is_boot_image) {}
+
+ bool Empty() const {
+ return set_.empty();
+ }
+
+ size_t Size() const {
+ return set_.size();
+ }
+
+ bool IsBootImage() const {
+ return is_boot_image_;
+ }
+
+ private:
+ UnorderedSet set_;
+ bool is_boot_image_ = false;
+
+ friend class InternTable;
+ friend class Table;
+ ART_FRIEND_TEST(InternTableTest, CrossHash);
+ };
+
Table();
ObjPtr<mirror::String> Find(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
@@ -215,7 +257,7 @@ class InternTable {
// debug builds. Returns how many bytes were read.
// NO_THREAD_SAFETY_ANALYSIS for the visitor that may require locks.
template <typename Visitor>
- size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor)
+ size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Write the intern tables to ptr, if there are multiple tables they are combined into a single
// one. Returns how many bytes were written.
@@ -227,12 +269,12 @@ class InternTable {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Add a table to the front of the tables vector.
- void AddInternStrings(UnorderedSet&& intern_strings)
+ void AddInternStrings(UnorderedSet&& intern_strings, bool is_boot_image)
REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// We call AddNewTable when we create the zygote to reduce private dirty pages caused by
// modifying the zygote intern table. The back of table is modified when strings are interned.
- std::vector<UnorderedSet> tables_;
+ std::vector<InternalTable> tables_;
friend class InternTable;
friend class linker::ImageWriter;
@@ -247,13 +289,9 @@ class InternTable {
// Add a table from memory to the strong interns.
template <typename Visitor>
- size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor)
+ size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- ObjPtr<mirror::String> LookupStrongLocked(ObjPtr<mirror::String> s)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- ObjPtr<mirror::String> LookupWeakLocked(ObjPtr<mirror::String> s)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
ObjPtr<mirror::String> InsertStrong(ObjPtr<mirror::String> s)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
ObjPtr<mirror::String> InsertWeak(ObjPtr<mirror::String> s)
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b3bf1ba41c..b64ca7dafb 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -78,9 +78,9 @@ TEST_F(InternTableTest, CrossHash) {
GcRoot<mirror::String> str(mirror::String::AllocFromModifiedUtf8(soa.Self(), "00000000"));
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- for (InternTable::UnorderedSet& table : t.strong_interns_.tables_) {
+ for (InternTable::Table::InternalTable& table : t.strong_interns_.tables_) {
// The negative hash value shall be 32-bit wide on every host.
- ASSERT_TRUE(IsUint<32>(table.hashfn_(str)));
+ ASSERT_TRUE(IsUint<32>(table.set_.hashfn_(str)));
}
}
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 17b3cd45aa..24a026a92e 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -558,6 +558,7 @@ bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */)
UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
+ UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
INTRINSIC_CASE(VarHandleFullFence)
INTRINSIC_CASE(VarHandleAcquireFence)
INTRINSIC_CASE(VarHandleReleaseFence)
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index 2f91f5dfe0..093dd7f400 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -219,6 +219,7 @@
V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
+ V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
#endif // ART_RUNTIME_INTRINSICS_LIST_H_
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index e65505298f..e33e407149 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -630,9 +630,14 @@ ArtMethod* Class::FindClassMethod(ObjPtr<DexCache> dex_cache,
// If we do not have a dex_cache match, try to find the declared method in this class now.
if (this_dex_cache != dex_cache && !GetDeclaredMethodsSlice(pointer_size).empty()) {
DCHECK(name.empty());
- name = dex_file.StringDataByIdx(method_id.name_idx_);
+ // Avoid string comparisons by comparing the respective unicode lengths first.
+ uint32_t length, other_length; // UTF16 length.
+ name = dex_file.GetMethodName(method_id, &length);
for (ArtMethod& method : GetDeclaredMethodsSlice(pointer_size)) {
- if (method.GetName() == name && method.GetSignature() == signature) {
+ DCHECK_NE(method.GetDexMethodIndex(), dex::kDexNoIndex);
+ const char* other_name = method.GetDexFile()->GetMethodName(
+ method.GetDexMethodIndex(), &other_length);
+ if (length == other_length && name == other_name && signature == method.GetSignature()) {
return &method;
}
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0d7160ee9e..32b88e6fa1 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -76,10 +76,6 @@ static void VMRuntime_startJitCompilation(JNIEnv*, jobject) {
static void VMRuntime_disableJitCompilation(JNIEnv*, jobject) {
}
-static jboolean VMRuntime_hasUsedHiddenApi(JNIEnv*, jobject) {
- return Runtime::Current()->HasPendingHiddenApiWarning() ? JNI_TRUE : JNI_FALSE;
-}
-
static void VMRuntime_setHiddenApiExemptions(JNIEnv* env,
jclass,
jobjectArray exemptions) {
@@ -697,7 +693,6 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, concurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, disableJitCompilation, "()V"),
FAST_NATIVE_METHOD(VMRuntime, hasBootImageSpaces, "()Z"), // Could be CRITICAL.
- NATIVE_METHOD(VMRuntime, hasUsedHiddenApi, "()Z"),
NATIVE_METHOD(VMRuntime, setHiddenApiExemptions, "([Ljava/lang/String;)V"),
NATIVE_METHOD(VMRuntime, setHiddenApiAccessLogSamplingRate, "(I)V"),
NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index e021b77dae..a739c2d16e 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -31,8 +31,10 @@
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "art_field-inl.h"
#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
+#include "well_known_classes.h"
namespace art {
@@ -504,6 +506,33 @@ static void Unsafe_fullFence(JNIEnv*, jobject) {
std::atomic_thread_fence(std::memory_order_seq_cst);
}
+static void Unsafe_park(JNIEnv* env, jobject, jboolean isAbsolute, jlong time) {
+ ScopedObjectAccess soa(env);
+ Thread::Current()->Park(isAbsolute, time);
+}
+
+static void Unsafe_unpark(JNIEnv* env, jobject, jobject jthread) {
+ art::ScopedFastNativeObjectAccess soa(env);
+ if (jthread == nullptr || !env->IsInstanceOf(jthread, WellKnownClasses::java_lang_Thread)) {
+ ThrowIllegalArgumentException("Argument to unpark() was not a Thread");
+ return;
+ }
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
+ if (thread != nullptr) {
+ thread->Unpark();
+ } else {
+ // If thread is null, that means that either the thread is not started yet,
+ // or the thread has already terminated. Setting the field to true will be
+ // respected when the thread does start, and is harmless if the thread has
+ // already terminated.
+ ArtField* unparked =
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+ // JNI must use non transactional mode.
+ unparked->SetBoolean<false>(soa.Decode<mirror::Object>(jthread), JNI_TRUE);
+ }
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Unsafe, compareAndSwapInt, "(Ljava/lang/Object;JII)Z"),
FAST_NATIVE_METHOD(Unsafe, compareAndSwapLong, "(Ljava/lang/Object;JJJ)Z"),
@@ -546,6 +575,8 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Unsafe, putShort, "(Ljava/lang/Object;JS)V"),
FAST_NATIVE_METHOD(Unsafe, putFloat, "(Ljava/lang/Object;JF)V"),
FAST_NATIVE_METHOD(Unsafe, putDouble, "(Ljava/lang/Object;JD)V"),
+ FAST_NATIVE_METHOD(Unsafe, unpark, "(Ljava/lang/Object;)V"),
+ NATIVE_METHOD(Unsafe, park, "(ZJ)V"),
// Each of the getFoo variants are overloaded with a call that operates
// directively on a native pointer.
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 7c320d8101..af87637d57 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1039,11 +1039,8 @@ bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
}
#ifdef ART_TARGET_ANDROID
android_dlextinfo extinfo = {};
- extinfo.flags = ANDROID_DLEXT_FORCE_LOAD | // Force-load, don't reuse handle
- // (open oat files multiple
- // times).
- ANDROID_DLEXT_FORCE_FIXED_VADDR; // Take a non-zero vaddr as absolute
- // (non-pic boot image).
+ extinfo.flags = ANDROID_DLEXT_FORCE_LOAD; // Force-load, don't reuse handle
+ // (open oat files multiple times).
if (reservation != nullptr) {
if (!reservation->IsValid()) {
*error_msg = StringPrintf("Invalid reservation for %s", elf_filename.c_str());
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 34b84f52c6..3c0125b59e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -791,10 +791,6 @@ bool Runtime::Start() {
std::string error_msg;
if (!jit::Jit::LoadCompiler(&error_msg)) {
LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
- } else if (!IsZygote()) {
- // If we are the zygote then we need to wait until after forking to create the code cache
- // due to SELinux restrictions on r/w/x memory regions.
- CreateJitCodeCache(/*rwx_memory_allowed=*/true);
}
}
@@ -900,6 +896,11 @@ void Runtime::InitNonZygoteOrPostFork(
}
if (jit_ == nullptr) {
+ // The system server's code cache was initialized specially. For other zygote forks or
+ // processes create it now.
+ if (!is_system_server) {
+ CreateJitCodeCache(/*rwx_memory_allowed=*/true);
+ }
// Note that when running ART standalone (not zygote, nor zygote fork),
// the jit may have already been created.
CreateJit();
@@ -1101,6 +1102,10 @@ void Runtime::SetSentinel(mirror::Object* sentinel) {
sentinel_ = GcRoot<mirror::Object>(sentinel);
}
+GcRoot<mirror::Object> Runtime::GetSentinel() {
+ return sentinel_;
+}
+
static inline void CreatePreAllocatedException(Thread* self,
Runtime* runtime,
GcRoot<mirror::Throwable>* exception,
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 4fb0d2ede0..be5b3c119c 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -691,6 +691,9 @@ class Runtime {
// Called from class linker.
void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
+ // For testing purpose only.
+ // TODO: Remove this when this is no longer needed (b/116087961).
+ GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
// Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
LinearAlloc* CreateLinearAlloc();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b5d214def4..66e852a216 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -44,6 +44,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/atomic.h"
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/file_utils.h"
@@ -285,6 +286,116 @@ void Thread::AssertHasDeoptimizationContext() {
<< "No deoptimization context for thread " << *this;
}
+enum {
+ kPermitAvailable = 0, // Incrementing consumes the permit
+ kNoPermit = 1, // Incrementing marks as waiter waiting
+ kNoPermitWaiterWaiting = 2
+};
+
+void Thread::Park(bool is_absolute, int64_t time) {
+ DCHECK(this == Thread::Current());
+#if ART_USE_FUTEXES
+ // Consume the permit, or mark as waiting. This cannot cause park_state to go
+ // outside of its valid range (0, 1, 2), because in all cases where 2 is
+ // assigned it is set back to 1 before returning, and this method cannot run
+ // concurrently with itself since it operates on the current thread.
+ int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
+ if (old_state == kNoPermit) {
+ // no permit was available. block thread until later.
+ // TODO: Call to signal jvmti here
+ int result = 0;
+ if (!is_absolute && time == 0) {
+ // Thread.getState() is documented to return waiting for untimed parks.
+ ScopedThreadSuspension sts(this, ThreadState::kWaiting);
+ DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_PRIVATE,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ /* timeout */ nullptr,
+ nullptr,
+ 0);
+ } else if (time > 0) {
+ // Only actually suspend and futex_wait if we're going to wait for some
+ // positive amount of time - the kernel will reject negative times with
+ // EINVAL, and a zero time will just noop.
+
+ // Thread.getState() is documented to return timed wait for timed parks.
+ ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting);
+ DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+ timespec timespec;
+ if (is_absolute) {
+ // Time is millis when scheduled for an absolute time
+ timespec.tv_nsec = (time % 1000) * 1000000;
+ timespec.tv_sec = time / 1000;
+ // This odd looking pattern is recommended by futex documentation to
+ // wait until an absolute deadline, with otherwise identical behavior to
+ // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the
+ // correct time when the system clock changes.
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ &timespec,
+ nullptr,
+ FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Time is nanos when scheduled for a relative time
+ timespec.tv_sec = time / 1000000000;
+ timespec.tv_nsec = time % 1000000000;
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_PRIVATE,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ &timespec,
+ nullptr,
+ 0);
+ }
+ }
+ if (result == -1) {
+ switch (errno) {
+ case EAGAIN:
+ case ETIMEDOUT:
+ case EINTR: break; // park() is allowed to spuriously return
+ default: PLOG(FATAL) << "Failed to park";
+ }
+ }
+ // Mark as no longer waiting, and consume permit if there is one.
+ tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
+ // TODO: Call to signal jvmti here
+ } else {
+ // the fetch_add has consumed the permit. immediately return.
+ DCHECK_EQ(old_state, kPermitAvailable);
+ }
+#else
+ #pragma clang diagnostic push
+ #pragma clang diagnostic warning "-W#warnings"
+ #warning "LockSupport.park/unpark implemented as noops without FUTEX support."
+ #pragma clang diagnostic pop
+ UNUSED(is_absolute, time);
+ UNIMPLEMENTED(WARNING);
+ sched_yield();
+#endif
+}
+
+void Thread::Unpark() {
+#if ART_USE_FUTEXES
+ // Set permit available; will be consumed either by fetch_add (when the thread
+ // tries to park) or store (when the parked thread is woken up)
+ if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed)
+ == kNoPermitWaiterWaiting) {
+ int result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAKE_PRIVATE,
+ /* number of waiters = */ 1,
+ nullptr,
+ nullptr,
+ 0);
+ if (result == -1) {
+ PLOG(FATAL) << "Failed to unpark";
+ }
+ }
+#else
+ UNIMPLEMENTED(WARNING);
+#endif
+}
+
void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
sf, type, tlsPtr_.stacked_shadow_frame_record);
@@ -489,6 +600,22 @@ void* Thread::CreateCallback(void* arg) {
runtime->GetRuntimeCallbacks()->ThreadStart(self);
+ // Unpark ourselves if the java peer was unparked before it started (see
+ // b/28845097#comment49 for more information)
+
+ ArtField* unparkedField = jni::DecodeArtField(
+ WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+ bool should_unpark = false;
+ {
+ // Hold the lock here, so that if another thread calls unpark before the thread starts
+ // we don't observe the unparkedBeforeStart field before the unparker writes to it,
+ // which could cause a lost unpark.
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE;
+ }
+ if (should_unpark) {
+ self->Unpark();
+ }
// Invoke the 'run' method of our java.lang.Thread.
ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
jmethodID mid = WellKnownClasses::java_lang_Thread_run;
@@ -1351,7 +1478,7 @@ bool Thread::PassActiveSuspendBarriers(Thread* self) {
done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1);
#if ART_USE_FUTEXES
if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously.
- futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ futex(pending_threads->Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
}
#endif
} while (!done);
@@ -2133,6 +2260,9 @@ Thread::Thread(bool daemon)
tls32_.state_and_flags.as_struct.flags = 0;
tls32_.state_and_flags.as_struct.state = kNative;
tls32_.interrupted.store(false, std::memory_order_relaxed);
+ // Initialize with no permit; if the java Thread was unparked before being
+ // started, it will unpark itself before calling into java code.
+ tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
std::fill(tlsPtr_.rosalloc_runs,
tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
@@ -2449,12 +2579,15 @@ bool Thread::IsInterrupted() {
}
void Thread::Interrupt(Thread* self) {
- MutexLock mu(self, *wait_mutex_);
- if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
- return;
+ {
+ MutexLock mu(self, *wait_mutex_);
+ if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
+ return;
+ }
+ tls32_.interrupted.store(true, std::memory_order_seq_cst);
+ NotifyLocked(self);
}
- tls32_.interrupted.store(true, std::memory_order_seq_cst);
- NotifyLocked(self);
+ Unpark();
}
void Thread::Notify() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 941867ce2d..b304cef74d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -581,6 +581,11 @@ class Thread {
return poison_object_cookie_;
}
+ // Parking for 0ns of relative time means an untimed park, negative (though
+ // should be handled in java code) returns immediately
+ void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Unpark();
+
private:
void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
@@ -1543,6 +1548,8 @@ class Thread {
// Thread "interrupted" status; stays raised until queried or thrown.
Atomic<bool32_t> interrupted;
+ AtomicInteger park_state_;
+
// True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
// weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
// processing of the CC collector only. This is thread local so that we can enable/disable weak
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d21b600566..23e57a2f21 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -764,7 +764,8 @@ void ThreadList::SuspendAllInternal(Thread* self,
int32_t cur_val = pending_threads.load(std::memory_order_relaxed);
if (LIKELY(cur_val > 0)) {
#if ART_USE_FUTEXES
- if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) {
+ if (futex(pending_threads.Address(), FUTEX_WAIT_PRIVATE, cur_val, &wait_timeout, nullptr, 0)
+ != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
if ((errno != EAGAIN) && (errno != EINTR)) {
if (errno == ETIMEDOUT) {
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 206418fbc6..94faa626f6 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -128,6 +128,7 @@ jfieldID WellKnownClasses::java_lang_Thread_lock;
jfieldID WellKnownClasses::java_lang_Thread_name;
jfieldID WellKnownClasses::java_lang_Thread_priority;
jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
+jfieldID WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_ngroups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
@@ -376,6 +377,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Thread_name = CacheField(env, java_lang_Thread, false, "name", "Ljava/lang/String;");
java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
+ java_lang_Thread_unparkedBeforeStart = CacheField(env, java_lang_Thread, false, "unparkedBeforeStart", "Z");
java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "[Ljava/lang/ThreadGroup;");
java_lang_ThreadGroup_ngroups = CacheField(env, java_lang_ThreadGroup, false, "ngroups", "I");
java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index ce5ab1df84..8c85228dfc 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -137,6 +137,7 @@ struct WellKnownClasses {
static jfieldID java_lang_Thread_name;
static jfieldID java_lang_Thread_priority;
static jfieldID java_lang_Thread_nativePeer;
+ static jfieldID java_lang_Thread_unparkedBeforeStart;
static jfieldID java_lang_ThreadGroup_groups;
static jfieldID java_lang_ThreadGroup_ngroups;
static jfieldID java_lang_ThreadGroup_mainThreadGroup;
diff --git a/test/004-ThreadStress/src-art/Main.java b/test/004-ThreadStress/src-art/Main.java
index 3a89f4f166..e7178523d3 100644
--- a/test/004-ThreadStress/src-art/Main.java
+++ b/test/004-ThreadStress/src-art/Main.java
@@ -26,6 +26,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Semaphore;
+import java.util.concurrent.locks.LockSupport;
// Run on host with:
// javac ThreadTest.java && java ThreadStress && rm *.class
@@ -52,6 +53,7 @@ import java.util.concurrent.Semaphore;
// -sleep:X .......... frequency of Sleep (double)
// -wait:X ........... frequency of Wait (double)
// -timedwait:X ...... frequency of TimedWait (double)
+// -timedpark:X ...... frequency of TimedPark (double)
// -syncandwork:X .... frequency of SyncAndWork (double)
// -queuedwait:X ..... frequency of QueuedWait (double)
@@ -251,6 +253,18 @@ public class Main implements Runnable {
}
}
+ private final static class TimedPark extends Operation {
+ private final static int SLEEP_TIME = 100;
+
+ public TimedPark() {}
+
+ @Override
+ public boolean perform() {
+ LockSupport.parkNanos(this, 100*1000000);
+ return true;
+ }
+ }
+
private final static class SyncAndWork extends Operation {
private final Object lock;
@@ -320,7 +334,8 @@ public class Main implements Runnable {
frequencyMap.put(new NonMovingAlloc(), 0.025); // 5/200
frequencyMap.put(new StackTrace(), 0.1); // 20/200
frequencyMap.put(new Exit(), 0.225); // 45/200
- frequencyMap.put(new Sleep(), 0.125); // 25/200
+ frequencyMap.put(new Sleep(), 0.075); // 15/200
+ frequencyMap.put(new TimedPark(), 0.05); // 10/200
frequencyMap.put(new TimedWait(lock), 0.05); // 10/200
frequencyMap.put(new Wait(lock), 0.075); // 15/200
frequencyMap.put(new QueuedWait(semaphore), 0.05); // 10/200
@@ -341,9 +356,10 @@ public class Main implements Runnable {
private final static Map<Operation, Double> createLockFrequencyMap(Object lock) {
Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
frequencyMap.put(new Sleep(), 0.2); // 40/200
- frequencyMap.put(new TimedWait(lock), 0.2); // 40/200
+ frequencyMap.put(new TimedWait(lock), 0.1); // 20/200
frequencyMap.put(new Wait(lock), 0.2); // 40/200
frequencyMap.put(new SyncAndWork(lock), 0.4); // 80/200
+ frequencyMap.put(new TimedPark(), 0.1); // 20/200
return frequencyMap;
}
@@ -389,6 +405,8 @@ public class Main implements Runnable {
op = new Wait(lock);
} else if (split[0].equals("-timedwait")) {
op = new TimedWait(lock);
+ } else if (split[0].equals("-timedpark")) {
+ op = new TimedPark();
} else if (split[0].equals("-syncandwork")) {
op = new SyncAndWork(lock);
} else if (split[0].equals("-queuedwait")) {
@@ -693,7 +711,7 @@ public class Main implements Runnable {
}
// The notifier thread is a daemon just loops forever to wake
- // up threads in operation Wait.
+ // up threads in operations Wait and Park.
if (lock != null) {
Thread notifier = new Thread("Notifier") {
public void run() {
@@ -701,6 +719,11 @@ public class Main implements Runnable {
synchronized (lock) {
lock.notifyAll();
}
+ for (Thread runner : runners) {
+ if (runner != null) {
+ LockSupport.unpark(runner);
+ }
+ }
}
}
};
diff --git a/test/050-sync-test/src/Main.java b/test/050-sync-test/src/Main.java
index 734b51e811..ba378187d9 100644
--- a/test/050-sync-test/src/Main.java
+++ b/test/050-sync-test/src/Main.java
@@ -133,11 +133,13 @@ class CpuThread extends Thread {
class SleepyThread extends Thread {
private SleepyThread mOther;
private Integer[] mWaitOnMe; // any type of object will do
+ private volatile boolean otherDone;
private static int count = 0;
SleepyThread(SleepyThread other) {
mOther = other;
+ otherDone = false;
mWaitOnMe = new Integer[] { 1, 2 };
setName("thread#" + count);
@@ -158,9 +160,11 @@ class SleepyThread extends Thread {
boolean intr = false;
try {
+ do {
synchronized (mWaitOnMe) {
mWaitOnMe.wait(9000);
}
+ } while (!otherDone);
} catch (InterruptedException ie) {
// Expecting this; interrupted should be false.
System.out.println(Thread.currentThread().getName() +
@@ -182,6 +186,7 @@ class SleepyThread extends Thread {
System.out.println("interrupting other (isAlive="
+ mOther.isAlive() + ")");
mOther.interrupt();
+ mOther.otherDone = true;
}
}
}
diff --git a/test/160-read-barrier-stress/src/Main.java b/test/160-read-barrier-stress/src/Main.java
index 5865094002..5e49e664be 100644
--- a/test/160-read-barrier-stress/src/Main.java
+++ b/test/160-read-barrier-stress/src/Main.java
@@ -121,6 +121,23 @@ public class Main {
assertSameObject(f4444, la[i4444]);
assertDifferentObject(f4999, la[i4998]);
assertSameObject(f4999, la[i4999]);
+
+ la = largeArray;
+ // Group the ArrayGets so they aren't divided by a function call; this will enable
+ // interm. address sharing for arm64.
+ Object tmp1 = la[i0];
+ Object tmp2 = la[i0 + 1];
+ Object tmp3 = la[i0 + 1024];
+ Object tmp4 = la[i0 + 4444];
+ Object tmp5 = la[i0 + 4998];
+ Object tmp6 = la[i0 + 4999];
+
+ assertSameObject(f0000, tmp1);
+ assertDifferentObject(f0000, tmp2);
+ assertSameObject(f1024, tmp3);
+ assertSameObject(f4444, tmp4);
+ assertDifferentObject(f4999, tmp5);
+ assertSameObject(f4999, tmp6);
}
}
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
index f83c924de9..f39b5e26a0 100644
--- a/test/527-checker-array-access-split/src/Main.java
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -572,6 +572,75 @@ public class Main {
buf1[end] = 'n';
}
+ //
+ // Check that IntermediateAddress can be shared for object ArrayGets.
+ //
+ /// CHECK-START-ARM64: int Main.checkObjectArrayGet(int, java.lang.Integer[], java.lang.Integer[]) instruction_simplifier_arm64 (before)
+ /// CHECK: <<Parameter:l\d+>> ParameterValue
+ /// CHECK: <<Array:l\d+>> NullCheck [<<Parameter>>]
+ /// CHECK: ArrayGet [<<Array>>,{{i\d+}}]
+ /// CHECK: ArrayGet [<<Array>>,{{i\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ /// CHECK: ArrayGet [<<Array>>,{{i\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+
+ /// CHECK-START-ARM64: int Main.checkObjectArrayGet(int, java.lang.Integer[], java.lang.Integer[]) instruction_simplifier_arm64 (after)
+ /// CHECK: <<Parameter:l\d+>> ParameterValue
+ /// CHECK: <<DataOffset:i\d+>> IntConstant 12
+ /// CHECK: <<Array:l\d+>> NullCheck [<<Parameter>>]
+ /// CHECK: <<IntAddr1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: ArrayGet [<<IntAddr1>>,{{i\d+}}]
+ /// CHECK: <<IntAddr2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: ArrayGet [<<IntAddr2>>,{{i\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ /// CHECK: <<IntAddr3:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: ArrayGet [<<IntAddr3>>,{{i\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ //
+ /// CHECK-NOT: IntermediateAddress
+
+ /// CHECK-START-ARM64: int Main.checkObjectArrayGet(int, java.lang.Integer[], java.lang.Integer[]) GVN$after_arch (after)
+ /// CHECK: <<Parameter:l\d+>> ParameterValue
+ /// CHECK: <<DataOffset:i\d+>> IntConstant 12
+ /// CHECK: <<Array:l\d+>> NullCheck [<<Parameter>>]
+ /// CHECK: <<IntAddr1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: ArrayGet [<<IntAddr1>>,{{i\d+}}]
+ /// CHECK: ArrayGet [<<IntAddr1>>,{{i\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ /// CHECK: <<IntAddr3:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: ArrayGet [<<IntAddr3>>,{{i\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ /// CHECK: ArraySet [<<Array>>,{{i\d+}},{{l\d+}}]
+ //
+ /// CHECK-NOT: IntermediateAddress
+ public final static int checkObjectArrayGet(int index, Integer[] a, Integer[] b) {
+ Integer five = Integer.valueOf(5);
+ int tmp1 = a[index];
+ tmp1 += a[index + 1];
+ a[index + 1] = five;
+ tmp1 += a[index + 2];
+ a[index + 2] = five;
+ a[index + 3] = five;
+ return tmp1;
+ }
+
+ /// CHECK-START-ARM64: int Main.testIntAddressObjDisasm(java.lang.Integer[], int) disassembly (after)
+ /// CHECK: <<IntAddr:i\d+>> IntermediateAddress
+ /// CHECK: add w<<AddrReg:\d+>>, {{w\d+}}, #0xc
+ /// CHECK: ArrayGet [<<IntAddr>>,{{i\d+}}]
+ /// CHECK: ldr {{w\d+}}, [x<<AddrReg>>, x{{\d+}}, lsl #2]
+ /// CHECK: ArrayGet [<<IntAddr>>,{{i\d+}}]
+ /// CHECK: ldr {{w\d+}}, [x<<AddrReg>>, x{{\d+}}, lsl #2]
+
+ /// CHECK-START-ARM64: int Main.testIntAddressObjDisasm(java.lang.Integer[], int) disassembly (after)
+ /// CHECK: add {{w\d+}}, {{w\d+}}, #0xc
+ /// CHECK-NOT: add {{w\d+}}, {{w\d+}}, #0xc
+ private int testIntAddressObjDisasm(Integer[] obj, int x) {
+ return obj[x] + obj[x + 1];
+ }
+
public final static int ARRAY_SIZE = 128;
public static void main(String[] args) {
diff --git a/test/580-crc32/expected.txt b/test/580-crc32/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/580-crc32/expected.txt
diff --git a/test/580-crc32/info.txt b/test/580-crc32/info.txt
new file mode 100644
index 0000000000..24f31e0e1b
--- /dev/null
+++ b/test/580-crc32/info.txt
@@ -0,0 +1 @@
+This test case is used to test java.util.zip.CRC32.
diff --git a/test/580-crc32/src/Main.java b/test/580-crc32/src/Main.java
new file mode 100644
index 0000000000..7fc1273600
--- /dev/null
+++ b/test/580-crc32/src/Main.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.zip.CRC32;
+
+/**
+ * The ART compiler can use intrinsics for the java.util.zip.CRC32 method:
+ * private native static int update(int crc, int b)
+ *
+ * As the method is private it is not possible to check the use of intrinsics
+ * for it directly.
+ * The tests check that correct checksums are produced.
+ */
+public class Main {
+ private static CRC32 crc32 = new CRC32();
+
+ public Main() {
+ }
+
+ public static long TestInt(int value) {
+ crc32.reset();
+ crc32.update(value);
+ return crc32.getValue();
+ }
+
+ public static long TestInt(int... values) {
+ crc32.reset();
+ for (int value : values) {
+ crc32.update(value);
+ }
+ return crc32.getValue();
+ }
+
+ public static void assertEqual(long expected, long actual) {
+ if (expected != actual) {
+ throw new Error("Expected: " + expected + ", found: " + actual);
+ }
+ }
+
+ public static void main(String args[]) {
+ // public void update(int b)
+ //
+ // Tests for checksums of the byte 0x0
+ assertEqual(0xD202EF8DL, TestInt(0x0));
+ assertEqual(0xD202EF8DL, TestInt(0x0100));
+ assertEqual(0xD202EF8DL, TestInt(0x010000));
+ assertEqual(0xD202EF8DL, TestInt(0x01000000));
+ assertEqual(0xD202EF8DL, TestInt(0xff00));
+ assertEqual(0xD202EF8DL, TestInt(0xffff00));
+ assertEqual(0xD202EF8DL, TestInt(0xffffff00));
+ assertEqual(0xD202EF8DL, TestInt(0x1200));
+ assertEqual(0xD202EF8DL, TestInt(0x123400));
+ assertEqual(0xD202EF8DL, TestInt(0x12345600));
+ assertEqual(0xD202EF8DL, TestInt(Integer.MIN_VALUE));
+
+ // Tests for checksums of the byte 0x1
+ assertEqual(0xA505DF1BL, TestInt(0x1));
+ assertEqual(0xA505DF1BL, TestInt(0x0101));
+ assertEqual(0xA505DF1BL, TestInt(0x010001));
+ assertEqual(0xA505DF1BL, TestInt(0x01000001));
+ assertEqual(0xA505DF1BL, TestInt(0xff01));
+ assertEqual(0xA505DF1BL, TestInt(0xffff01));
+ assertEqual(0xA505DF1BL, TestInt(0xffffff01));
+ assertEqual(0xA505DF1BL, TestInt(0x1201));
+ assertEqual(0xA505DF1BL, TestInt(0x123401));
+ assertEqual(0xA505DF1BL, TestInt(0x12345601));
+
+ // Tests for checksums of the byte 0x0f
+ assertEqual(0x42BDF21CL, TestInt(0x0f));
+ assertEqual(0x42BDF21CL, TestInt(0x010f));
+ assertEqual(0x42BDF21CL, TestInt(0x01000f));
+ assertEqual(0x42BDF21CL, TestInt(0x0100000f));
+ assertEqual(0x42BDF21CL, TestInt(0xff0f));
+ assertEqual(0x42BDF21CL, TestInt(0xffff0f));
+ assertEqual(0x42BDF21CL, TestInt(0xffffff0f));
+ assertEqual(0x42BDF21CL, TestInt(0x120f));
+ assertEqual(0x42BDF21CL, TestInt(0x12340f));
+ assertEqual(0x42BDF21CL, TestInt(0x1234560f));
+
+ // Tests for checksums of the byte 0xff
+ assertEqual(0xFF000000L, TestInt(0x00ff));
+ assertEqual(0xFF000000L, TestInt(0x01ff));
+ assertEqual(0xFF000000L, TestInt(0x0100ff));
+ assertEqual(0xFF000000L, TestInt(0x010000ff));
+ assertEqual(0xFF000000L, TestInt(0x0000ffff));
+ assertEqual(0xFF000000L, TestInt(0x00ffffff));
+ assertEqual(0xFF000000L, TestInt(0xffffffff));
+ assertEqual(0xFF000000L, TestInt(0x12ff));
+ assertEqual(0xFF000000L, TestInt(0x1234ff));
+ assertEqual(0xFF000000L, TestInt(0x123456ff));
+ assertEqual(0xFF000000L, TestInt(Integer.MAX_VALUE));
+
+ // Tests for sequences
+ assertEqual(0xFF41D912L, TestInt(0, 0, 0));
+ assertEqual(0xFF41D912L, TestInt(0x0100, 0x010000, 0x01000000));
+ assertEqual(0xFF41D912L, TestInt(0xff00, 0xffff00, 0xffffff00));
+ assertEqual(0xFF41D912L, TestInt(0x1200, 0x123400, 0x12345600));
+
+ assertEqual(0x909FB2F2L, TestInt(1, 1, 1));
+ assertEqual(0x909FB2F2L, TestInt(0x0101, 0x010001, 0x01000001));
+ assertEqual(0x909FB2F2L, TestInt(0xff01, 0xffff01, 0xffffff01));
+ assertEqual(0x909FB2F2L, TestInt(0x1201, 0x123401, 0x12345601));
+
+ assertEqual(0xE33A9F71L, TestInt(0x0f, 0x0f, 0x0f));
+ assertEqual(0xE33A9F71L, TestInt(0x010f, 0x01000f, 0x0100000f));
+ assertEqual(0xE33A9F71L, TestInt(0xff0f, 0xffff0f, 0xffffff0f));
+ assertEqual(0xE33A9F71L, TestInt(0x120f, 0x12340f, 0x1234560f));
+
+ assertEqual(0xFFFFFF00L, TestInt(0x0ff, 0x0ff, 0x0ff));
+ assertEqual(0xFFFFFF00L, TestInt(0x01ff, 0x0100ff, 0x010000ff));
+ assertEqual(0xFFFFFF00L, TestInt(0x00ffff, 0x00ffffff, 0xffffffff));
+ assertEqual(0xFFFFFF00L, TestInt(0x12ff, 0x1234ff, 0x123456ff));
+
+ assertEqual(0xB6CC4292L, TestInt(0x01, 0x02));
+
+ assertEqual(0xB2DE047CL, TestInt(0x0, -1, Integer.MIN_VALUE, Integer.MAX_VALUE));
+ }
+}
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 065b854a6a..1bd56d1a53 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -1,9 +1,9 @@
---
true true
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780000, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780004, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780001, length=-1]
@@ -44,14 +44,14 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780005, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780009, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780006, length=-1]
@@ -90,18 +90,18 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
5@1002 --(field@9)--> 6@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123456780005, length=-1]
---
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
3@1001 --(class)--> 1001@0 [size=123456780011, length=-1]
---
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
3@1001 --(class)--> 1001@0 [size=123456780016, length=-1]
---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123456780020, length=-1]
3@1001 --(class)--> 1001@0 [size=123456780021, length=-1]
@@ -110,14 +110,14 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123456780025, length=-1]
3@1001 --(class)--> 1001@0 [size=123456780026, length=-1]
@@ -198,10 +198,10 @@ root@root --(thread)--> 1@1000 [size=16, length=-1]
---
---
---- untagged objects
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780050, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780054, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780051, length=-1]
@@ -242,14 +242,14 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780055, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780059, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780056, length=-1]
@@ -289,9 +289,9 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
6@1000 --(class)--> 1000@0 [size=123456780055, length=-1]
---
---- tagged classes
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780060, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780064, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780061, length=-1]
@@ -316,9 +316,9 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
5@1002 --(field@8)--> 500@0 [size=20, length=2]
6@1000 --(class)--> 1000@0 [size=123456780060, length=-1]
---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780065, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780069, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780066, length=-1]
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 64c1d4f1b8..148aea48ae 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -59,6 +59,7 @@ ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(HOST_OUT_EXECUTABLES)/hprof-conv \
+ $(HOST_OUT_EXECUTABLES)/timeout_dumper \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagent) \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagentd) \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtistress) \
diff --git a/test/VerifySoftFailDuringClinit/ClassToInitialize.smali b/test/VerifySoftFailDuringClinit/ClassToInitialize.smali
new file mode 100644
index 0000000000..0d12ec8d4d
--- /dev/null
+++ b/test/VerifySoftFailDuringClinit/ClassToInitialize.smali
@@ -0,0 +1,22 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LClassToInitialize;
+.super Ljava/lang/Object;
+
+.method public static constructor <clinit>()V
+ .registers 0
+ invoke-static {}, LVerifySoftFail;->empty()V
+ return-void
+.end method
diff --git a/test/VerifySoftFailDuringClinit/VerifySoftFail.smali b/test/VerifySoftFailDuringClinit/VerifySoftFail.smali
new file mode 100644
index 0000000000..e0f49467f0
--- /dev/null
+++ b/test/VerifySoftFailDuringClinit/VerifySoftFail.smali
@@ -0,0 +1,27 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LVerifySoftFail;
+.super Ljava/lang/Object;
+
+.method public static empty()V
+ .registers 0
+ return-void
+.end method
+
+.method public static softFail()V
+ .registers 0
+ invoke-static {}, LMissingClass;->test()V
+ return-void
+.end method
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 900b1d7759..adb838d965 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -942,7 +942,7 @@ else
# Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
# before abort. However, dumping threads might deadlock, so we also use the "-k"
# option to definitely kill the child.
- cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
+ cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s timeout_dumper $cmdline"
fi
if [ "$DEV_MODE" = "y" ]; then
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 7b401609c3..f0eacfae8b 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1110,11 +1110,5 @@
"tests": ["454-get-vreg", "457-regs"],
"variant": "baseline",
"description": ["Tests are expected to fail with baseline."]
- },
- {
- "tests": ["050-sync-test"],
- "variant": "target & gcstress & debug",
- "bug": "b/117597114",
- "description": ["Looks timing dependent"]
}
]
diff --git a/tools/timeout_dumper/Android.bp b/tools/timeout_dumper/Android.bp
new file mode 100644
index 0000000000..dfd54421fd
--- /dev/null
+++ b/tools/timeout_dumper/Android.bp
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+art_cc_binary {
+ name: "timeout_dumper",
+
+ host_supported: true,
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+ device_supported: false,
+
+ defaults: ["art_defaults"],
+
+ srcs: ["timeout_dumper.cc"],
+
+ shared_libs: [
+ "libbacktrace",
+ "libbase",
+ ],
+ sanitize: {
+ address: true,
+ },
+}
diff --git a/tools/timeout_dumper/timeout_dumper.cc b/tools/timeout_dumper/timeout_dumper.cc
new file mode 100644
index 0000000000..cb3eaa594e
--- /dev/null
+++ b/tools/timeout_dumper/timeout_dumper.cc
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <poll.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <csignal>
+#include <cstdlib>
+#include <cstring>
+#include <thread>
+#include <memory>
+#include <set>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <android-base/unique_fd.h>
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
+
+namespace art {
+namespace {
+
+using android::base::StringPrintf;
+using android::base::unique_fd;
+
+constexpr bool kUseAddr2line = true;
+
+namespace timeout_signal {
+
+class SignalSet {
+ public:
+ SignalSet() {
+ if (sigemptyset(&set_) == -1) {
+ PLOG(FATAL) << "sigemptyset failed";
+ }
+ }
+
+ void Add(int signal) {
+ if (sigaddset(&set_, signal) == -1) {
+ PLOG(FATAL) << "sigaddset " << signal << " failed";
+ }
+ }
+
+ void Block() {
+ if (pthread_sigmask(SIG_BLOCK, &set_, nullptr) != 0) {
+ PLOG(FATAL) << "pthread_sigmask failed";
+ }
+ }
+
+ int Wait() {
+ // Sleep in sigwait() until a signal arrives. gdb causes EINTR failures.
+ int signal_number;
+ int rc = TEMP_FAILURE_RETRY(sigwait(&set_, &signal_number));
+ if (rc != 0) {
+ PLOG(FATAL) << "sigwait failed";
+ }
+ return signal_number;
+ }
+
+ private:
+ sigset_t set_;
+};
+
+int GetTimeoutSignal() {
+ return SIGRTMIN + 2;
+}
+
+} // namespace timeout_signal
+
+namespace addr2line {
+
+constexpr const char* kAddr2linePath =
+ "/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux-addr2line";
+
+std::unique_ptr<std::string> FindAddr2line() {
+ const char* env_value = getenv("ANDROID_BUILD_TOP");
+ if (env_value != nullptr) {
+ std::string path = std::string(env_value) + kAddr2linePath;
+ if (access(path.c_str(), X_OK) == 0) {
+ return std::make_unique<std::string>(path);
+ }
+ }
+
+ std::string path = std::string(".") + kAddr2linePath;
+ if (access(path.c_str(), X_OK) == 0) {
+ return std::make_unique<std::string>(path);
+ }
+
+ constexpr const char* kHostAddr2line = "/usr/bin/addr2line";
+ if (access(kHostAddr2line, F_OK) == 0) {
+ return std::make_unique<std::string>(kHostAddr2line);
+ }
+
+ return nullptr;
+}
+
+// The state of an open pipe to addr2line. In "server" mode, addr2line takes input on stdin
+// and prints the result to stdout. This struct keeps the state of the open connection.
+struct Addr2linePipe {
+ Addr2linePipe(int in_fd, int out_fd, const std::string& file_name, pid_t pid)
+ : in(in_fd), out(out_fd), file(file_name), child_pid(pid), odd(true) {}
+
+ ~Addr2linePipe() {
+ kill(child_pid, SIGKILL);
+ }
+
+ unique_fd in; // The file descriptor that is connected to the output of addr2line.
+ unique_fd out; // The file descriptor that is connected to the input of addr2line.
+
+ const std::string file; // The file addr2line is working on, so that we know when to close
+ // and restart.
+ const pid_t child_pid; // The pid of the child, which we should kill when we're done.
+ bool odd; // Print state for indentation of lines.
+};
+
+std::unique_ptr<Addr2linePipe> Connect(const std::string& name, const char* args[]) {
+ int caller_to_addr2line[2];
+ int addr2line_to_caller[2];
+
+ if (pipe(caller_to_addr2line) == -1) {
+ return nullptr;
+ }
+ if (pipe(addr2line_to_caller) == -1) {
+ close(caller_to_addr2line[0]);
+ close(caller_to_addr2line[1]);
+ return nullptr;
+ }
+
+ pid_t pid = fork();
+ if (pid == -1) {
+ close(caller_to_addr2line[0]);
+ close(caller_to_addr2line[1]);
+ close(addr2line_to_caller[0]);
+ close(addr2line_to_caller[1]);
+ return nullptr;
+ }
+
+ if (pid == 0) {
+ dup2(caller_to_addr2line[0], STDIN_FILENO);
+ dup2(addr2line_to_caller[1], STDOUT_FILENO);
+
+ close(caller_to_addr2line[0]);
+ close(caller_to_addr2line[1]);
+ close(addr2line_to_caller[0]);
+ close(addr2line_to_caller[1]);
+
+ execv(args[0], const_cast<char* const*>(args));
+ exit(1);
+ } else {
+ close(caller_to_addr2line[0]);
+ close(addr2line_to_caller[1]);
+ return std::make_unique<Addr2linePipe>(addr2line_to_caller[0],
+ caller_to_addr2line[1],
+ name,
+ pid);
+ }
+}
+
+void WritePrefix(std::ostream& os, const char* prefix, bool odd) {
+ if (prefix != nullptr) {
+ os << prefix;
+ }
+ os << " ";
+ if (!odd) {
+ os << " ";
+ }
+}
+
+void Drain(size_t expected,
+ const char* prefix,
+ std::unique_ptr<Addr2linePipe>* pipe /* inout */,
+ std::ostream& os) {
+ DCHECK(pipe != nullptr);
+ DCHECK(pipe->get() != nullptr);
+ int in = pipe->get()->in.get();
+ DCHECK_GE(in, 0);
+
+ bool prefix_written = false;
+
+ for (;;) {
+ constexpr uint32_t kWaitTimeExpectedMilli = 500;
+ constexpr uint32_t kWaitTimeUnexpectedMilli = 50;
+
+ int timeout = expected > 0 ? kWaitTimeExpectedMilli : kWaitTimeUnexpectedMilli;
+ struct pollfd read_fd{in, POLLIN, 0};
+ int retval = TEMP_FAILURE_RETRY(poll(&read_fd, 1, timeout));
+ if (retval == -1) {
+ // An error occurred.
+ pipe->reset();
+ return;
+ }
+
+ if (retval == 0) {
+ // Timeout.
+ return;
+ }
+
+ if (!(read_fd.revents & POLLIN)) {
+ // addr2line call exited.
+ pipe->reset();
+ return;
+ }
+
+ constexpr size_t kMaxBuffer = 128; // Relatively small buffer. Should be OK as we're on an
+ // alt stack, but just to be sure...
+ char buffer[kMaxBuffer];
+ memset(buffer, 0, kMaxBuffer);
+ int bytes_read = TEMP_FAILURE_RETRY(read(in, buffer, kMaxBuffer - 1));
+ if (bytes_read <= 0) {
+ // This should not really happen...
+ pipe->reset();
+ return;
+ }
+ buffer[bytes_read] = '\0';
+
+ char* tmp = buffer;
+ while (*tmp != 0) {
+ if (!prefix_written) {
+ WritePrefix(os, prefix, (*pipe)->odd);
+ prefix_written = true;
+ }
+ char* new_line = strchr(tmp, '\n');
+ if (new_line == nullptr) {
+ os << tmp;
+
+ break;
+ } else {
+ char saved = *(new_line + 1);
+ *(new_line + 1) = 0;
+ os << tmp;
+ *(new_line + 1) = saved;
+
+ tmp = new_line + 1;
+ prefix_written = false;
+ (*pipe)->odd = !(*pipe)->odd;
+
+ if (expected > 0) {
+ expected--;
+ }
+ }
+ }
+ }
+}
+
+void Addr2line(const std::string& addr2line,
+ const std::string& map_src,
+ uintptr_t offset,
+ std::ostream& os,
+ const char* prefix,
+ std::unique_ptr<Addr2linePipe>* pipe /* inout */) {
+ DCHECK(pipe != nullptr);
+
+ if (map_src == "[vdso]" || android::base::EndsWith(map_src, ".vdex")) {
+ // addr2line will not work on the vdso.
+ // vdex files are special frames injected for the interpreter
+ // so they don't have any line number information available.
+ return;
+ }
+
+ if (*pipe == nullptr || (*pipe)->file != map_src) {
+ if (*pipe != nullptr) {
+ Drain(0, prefix, pipe, os);
+ }
+ pipe->reset(); // Close early.
+
+ const char* args[7] = {
+ addr2line.c_str(),
+ "--functions",
+ "--inlines",
+ "--demangle",
+ "-e",
+ map_src.c_str(),
+ nullptr
+ };
+ *pipe = Connect(map_src, args);
+ }
+
+ Addr2linePipe* pipe_ptr = pipe->get();
+ if (pipe_ptr == nullptr) {
+ // Failed...
+ return;
+ }
+
+ // Send the offset.
+ const std::string hex_offset = StringPrintf("%zx\n", offset);
+
+ if (!android::base::WriteFully(pipe_ptr->out.get(), hex_offset.data(), hex_offset.length())) {
+ // Error. :-(
+ pipe->reset();
+ return;
+ }
+
+ // Now drain (expecting two lines).
+ Drain(2U, prefix, pipe, os);
+}
+
+} // namespace addr2line
+
+namespace ptrace {
+
+std::set<pid_t> PtraceSiblings(pid_t pid) {
+ std::set<pid_t> ret;
+ std::string task_path = android::base::StringPrintf("/proc/%d/task", pid);
+
+ std::unique_ptr<DIR, int (*)(DIR*)> d(opendir(task_path.c_str()), closedir);
+
+ // Bail early if the task directory cannot be opened.
+ if (d == nullptr) {
+ PLOG(ERROR) << "Failed to scan task folder";
+ return ret;
+ }
+
+ struct dirent* de;
+ while ((de = readdir(d.get())) != nullptr) {
+ // Ignore "." and "..".
+ if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) {
+ continue;
+ }
+
+ char* end;
+ pid_t tid = strtoul(de->d_name, &end, 10);
+ if (*end) {
+ continue;
+ }
+
+ if (tid == pid) {
+ continue;
+ }
+
+ if (::ptrace(PTRACE_ATTACH, tid, 0, 0) != 0) {
+ PLOG(ERROR) << "Failed to attach to tid " << tid;
+ continue;
+ }
+
+ ret.insert(tid);
+ }
+ return ret;
+}
+
+} // namespace ptrace
+
+bool WaitForSigStoppedOrError(pid_t pid) {
+ int status;
+ pid_t res = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
+ if (res == -1) {
+ PLOG(ERROR) << "Failed to waitpid for " << pid;
+ return false;
+ }
+ if (!(WIFSTOPPED(status))) {
+ LOG(ERROR) << "Did not get expected stopped signal for " << pid;
+ return false;
+ }
+ return true;
+}
+
+#ifdef __LP64__
+constexpr bool kIs64Bit = true;
+#else
+constexpr bool kIs64Bit = false;
+#endif
+
+void DumpThread(pid_t pid,
+ pid_t tid,
+ const std::string* addr2line_path,
+ const char* prefix,
+ BacktraceMap* map) {
+ if (pid != tid && !WaitForSigStoppedOrError(tid)) {
+ return;
+ }
+
+ std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map));
+ if (backtrace == nullptr) {
+ LOG(ERROR) << prefix << "(failed to create Backtrace for thread " << tid << ")";
+ return;
+ }
+ backtrace->SetSkipFrames(0);
+ if (!backtrace->Unwind(0, nullptr)) {
+ LOG(ERROR) << prefix << "(backtrace::Unwind failed for thread " << tid
+ << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")";
+ return;
+ }
+ if (backtrace->NumFrames() == 0) {
+ LOG(ERROR) << prefix << "(no native stack frames for thread " << tid << ")";
+ return;
+ }
+
+ std::unique_ptr<addr2line::Addr2linePipe> addr2line_state;
+
+ for (Backtrace::const_iterator it = backtrace->begin();
+ it != backtrace->end(); ++it) {
+ std::ostringstream oss;
+ oss << prefix << StringPrintf("#%02zu pc ", it->num);
+ bool try_addr2line = false;
+ if (!BacktraceMap::IsValid(it->map)) {
+ oss << StringPrintf(kIs64Bit ? "%016" PRIx64 " ???" : "%08" PRIx64 " ???", it->pc);
+ } else {
+ oss << StringPrintf(kIs64Bit ? "%016" PRIx64 " " : "%08" PRIx64 " ", it->rel_pc);
+ if (it->map.name.empty()) {
+ oss << StringPrintf("<anonymous:%" PRIx64 ">", it->map.start);
+ } else {
+ oss << it->map.name;
+ }
+ if (it->map.offset != 0) {
+ oss << StringPrintf(" (offset %" PRIx64 ")", it->map.offset);
+ }
+ oss << " (";
+ if (!it->func_name.empty()) {
+ oss << it->func_name;
+ if (it->func_offset != 0) {
+ oss << "+" << it->func_offset;
+ }
+ // Functions found using the gdb jit interface will be in an empty
+ // map that cannot be found using addr2line.
+ if (!it->map.name.empty()) {
+ try_addr2line = true;
+ }
+ } else {
+ oss << "???";
+ }
+ oss << ")";
+ }
+ LOG(ERROR) << oss.str();
+ if (try_addr2line && addr2line_path != nullptr) {
+ addr2line::Addr2line(*addr2line_path,
+ it->map.name,
+ it->pc - it->map.start,
+ LOG_STREAM(ERROR),
+ prefix,
+ &addr2line_state);
+ }
+ }
+
+ if (addr2line_state != nullptr) {
+ addr2line::Drain(0, prefix, &addr2line_state, LOG_STREAM(ERROR));
+ }
+}
+
+bool WaitForMainSigStop(const std::atomic<bool>& saw_wif_stopped_for_main) {
+ constexpr uint32_t kWaitMicros = 10;
+ constexpr size_t kMaxLoopCount = 10 * 1000 * 1000 / kWaitMicros; // 10s wait.
+
+ for (size_t loop_count = 0; !saw_wif_stopped_for_main; ++loop_count) {
+ if (loop_count > kMaxLoopCount) {
+ LOG(ERROR) << "Waited too long for main pid to stop";
+ return false;
+ }
+
+ timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = kWaitMicros * 1000;
+ nanosleep(&tm, nullptr);
+ }
+ return true;
+}
+
+void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_main) {
+ CHECK_EQ(0, ::ptrace(PTRACE_ATTACH, forked_pid, 0, 0));
+ std::set<pid_t> tids = ptrace::PtraceSiblings(forked_pid);
+ tids.insert(forked_pid);
+
+ // Check whether we have and should use addr2line.
+ std::unique_ptr<std::string> addr2line_path = addr2line::FindAddr2line();
+ if (addr2line_path != nullptr) {
+ LOG(ERROR) << "Found addr2line at " << *addr2line_path;
+ } else {
+ LOG(ERROR) << "Did not find usable addr2line";
+ }
+ bool use_addr2line = kUseAddr2line && addr2line_path != nullptr;
+ LOG(ERROR) << (use_addr2line ? "U" : "Not u") << "sing addr2line";
+
+ if (!WaitForMainSigStop(saw_wif_stopped_for_main)) {
+ return;
+ }
+
+ std::unique_ptr<BacktraceMap> backtrace_map(BacktraceMap::Create(forked_pid));
+ if (backtrace_map == nullptr) {
+ LOG(ERROR) << "Could not create BacktraceMap";
+ return;
+ }
+
+ for (pid_t tid : tids) {
+ LOG(ERROR) << "pid: " << forked_pid << " tid: " << tid;
+ DumpThread(forked_pid,
+ tid,
+ use_addr2line ? addr2line_path.get() : nullptr,
+ " ",
+ backtrace_map.get());
+ }
+}
+
+[[noreturn]]
+void WaitMainLoop(pid_t forked_pid, std::atomic<bool>* saw_wif_stopped_for_main) {
+ for (;;) {
+ // Consider switching to waitid to not get woken up for WIFSTOPPED.
+ int status;
+ pid_t res = TEMP_FAILURE_RETRY(waitpid(forked_pid, &status, 0));
+ if (res == -1) {
+ PLOG(FATAL) << "Failure during waitpid";
+ __builtin_unreachable();
+ }
+
+ if (WIFEXITED(status)) {
+ _exit(WEXITSTATUS(status));
+ __builtin_unreachable();
+ }
+ if (WIFSIGNALED(status)) {
+ _exit(1);
+ __builtin_unreachable();
+ }
+ if (WIFSTOPPED(status)) {
+ *saw_wif_stopped_for_main = true;
+ continue;
+ }
+ if (WIFCONTINUED(status)) {
+ continue;
+ }
+
+ LOG(FATAL) << "Unknown status " << std::hex << status;
+ }
+}
+
+[[noreturn]]
+void SetupAndWait(pid_t forked_pid) {
+ timeout_signal::SignalSet signals;
+ signals.Add(timeout_signal::GetTimeoutSignal());
+ signals.Block();
+
+ std::atomic<bool> saw_wif_stopped_for_main(false);
+
+ std::thread signal_catcher([&]() {
+ signals.Block();
+ int sig = signals.Wait();
+ CHECK_EQ(sig, timeout_signal::GetTimeoutSignal());
+
+ DumpProcess(forked_pid, saw_wif_stopped_for_main);
+
+ // Don't clean up. Just kill the child and exit.
+ kill(forked_pid, SIGKILL);
+ _exit(1);
+ });
+
+ WaitMainLoop(forked_pid, &saw_wif_stopped_for_main);
+}
+
+} // namespace
+} // namespace art
+
+int main(int argc ATTRIBUTE_UNUSED, char** argv) {
+ pid_t orig_ppid = getpid();
+
+ pid_t pid = fork();
+ if (pid == 0) {
+ if (prctl(PR_SET_PDEATHSIG, SIGTERM) == -1) {
+ _exit(1);
+ }
+
+ if (getppid() != orig_ppid) {
+ _exit(2);
+ }
+
+ execvp(argv[1], &argv[1]);
+
+ _exit(3);
+ __builtin_unreachable();
+ }
+
+ art::SetupAndWait(pid);
+ __builtin_unreachable();
+}