diff options
364 files changed, 8037 insertions, 5216 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index 5d4feb8136..05cfc42172 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -33,7 +33,13 @@ ART_BUILD_TARGET_NDEBUG ?= true ART_BUILD_TARGET_DEBUG ?= true ART_BUILD_HOST_NDEBUG ?= true ART_BUILD_HOST_DEBUG ?= true -ART_BUILD_HOST_STATIC ?= true + +# Enable the static builds only for checkbuilds. +ifneq (,$(filter checkbuild,$(MAKECMDGOALS))) + ART_BUILD_HOST_STATIC ?= true +else + ART_BUILD_HOST_STATIC ?= false +endif # Asan does not support static linkage ifdef SANITIZE_HOST @@ -132,7 +138,7 @@ ART_TARGET_CLANG_CFLAGS_arm64 += \ -DNVALGRIND # Warn about thread safety violations with clang. -art_clang_cflags := -Wthread-safety +art_clang_cflags := -Wthread-safety -Wthread-safety-negative # Warn if switch fallthroughs aren't annotated. art_clang_cflags += -Wimplicit-fallthrough diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 377cd4ed34..4850e6c44e 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -165,6 +165,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \ runtime/base/hex_dump_test.cc \ runtime/base/histogram_test.cc \ runtime/base/mutex_test.cc \ + runtime/base/out_test.cc \ runtime/base/scoped_flock_test.cc \ runtime/base/stringprintf_test.cc \ runtime/base/time_utils_test.cc \ @@ -255,6 +256,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \ compiler/optimizing/graph_checker_test.cc \ compiler/optimizing/graph_test.cc \ compiler/optimizing/gvn_test.cc \ + compiler/optimizing/licm_test.cc \ compiler/optimizing/linearize_test.cc \ compiler/optimizing/liveness_test.cc \ compiler/optimizing/live_interval_test.cc \ diff --git a/build/Android.oat.mk b/build/Android.oat.mk index c70e12deec..0c0c3df34c 100644 --- a/build/Android.oat.mk +++ b/build/Android.oat.mk @@ -64,14 +64,14 @@ define create-core-oat-host-rules core_compile_options += --compiler-filter=interpret-only core_infix := -interpreter endif - ifeq ($(1),interpreter-access-checks) + ifeq ($(1),interp-ac) core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail - core_infix := -interpreter-access-checks + core_infix := -interp-ac endif ifeq ($(1),default) # Default has no infix, no compile options. endif - ifneq ($(filter-out default interpreter interpreter-access-checks jit optimizing,$(1)),) + ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) #Technically this test is not precise, but hopefully good enough. $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) endif @@ -147,14 +147,14 @@ endef $(eval $(call create-core-oat-host-rule-combination,default,,)) $(eval $(call create-core-oat-host-rule-combination,optimizing,,)) $(eval $(call create-core-oat-host-rule-combination,interpreter,,)) -$(eval $(call create-core-oat-host-rule-combination,interpreter-access-checks,,)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac,,)) valgrindHOST_CORE_IMG_OUTS := valgrindHOST_CORE_OAT_OUTS := $(eval $(call create-core-oat-host-rule-combination,default,valgrind,32)) $(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32)) $(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32)) -$(eval $(call create-core-oat-host-rule-combination,interpreter-access-checks,valgrind,32)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32)) valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS) @@ -184,14 +184,14 @@ define create-core-oat-target-rules core_compile_options += --compiler-filter=interpret-only core_infix := -interpreter endif - ifeq ($(1),interpreter-access-checks) + ifeq ($(1),interp-ac) core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail - core_infix := -interpreter-access-checks + core_infix := -interp-ac endif ifeq ($(1),default) # Default has no infix, no compile options. endif - ifneq ($(filter-out default interpreter interpreter-access-checks jit optimizing,$(1)),) + ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) # Technically this test is not precise, but hopefully good enough. $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) endif @@ -272,14 +272,14 @@ endef $(eval $(call create-core-oat-target-rule-combination,default,,)) $(eval $(call create-core-oat-target-rule-combination,optimizing,,)) $(eval $(call create-core-oat-target-rule-combination,interpreter,,)) -$(eval $(call create-core-oat-target-rule-combination,interpreter-access-checks,,)) +$(eval $(call create-core-oat-target-rule-combination,interp-ac,,)) valgrindTARGET_CORE_IMG_OUTS := valgrindTARGET_CORE_OAT_OUTS := $(eval $(call create-core-oat-target-rule-combination,default,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32)) -$(eval $(call create-core-oat-target-rule-combination,interpreter-access-checks,valgrind,32)) +$(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32)) valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS) diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h index d215662645..dc2bc5c3f4 100644 --- a/compiler/common_compiler_test.h +++ b/compiler/common_compiler_test.h @@ -46,12 +46,12 @@ class CommonCompilerTest : public CommonRuntimeTest { // Create an OatMethod based on pointers (for unit tests). OatFile::OatMethod CreateOatMethod(const void* code); - void MakeExecutable(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); static void MakeExecutable(const void* code_start, size_t code_length); void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: virtual void SetUp(); @@ -76,17 +76,17 @@ class CommonCompilerTest : public CommonRuntimeTest { virtual void TearDown(); void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void CompileMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ReserveImageSpace(); diff --git a/compiler/compiler.h b/compiler/compiler.h index e5d1aff08c..fcd3434e68 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -58,7 +58,7 @@ class Compiler { const DexFile& dex_file) const = 0; virtual uintptr_t GetEntryPointOf(ArtMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; uint64_t GetMaximumCompilationTimeBeforeWarning() const { return maximum_compilation_time_before_warning_; diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h index e4570fd8d3..04c58aca6b 100644 --- a/compiler/dex/mir_field_info.h +++ b/compiler/dex/mir_field_info.h @@ -135,7 +135,7 @@ class MirIFieldLoweringInfo : public MirFieldInfo { // with IGET/IPUT. For fast path fields, retrieve the field offset. static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit, MirIFieldLoweringInfo* field_infos, size_t count) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Construct an unresolved instance field lowering info. explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened) @@ -192,7 +192,7 @@ class MirSFieldLoweringInfo : public MirFieldInfo { // and the type index of the declaring class in the compiled method's dex file. static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit, MirSFieldLoweringInfo* field_infos, size_t count) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Construct an unresolved static field lowering info. explicit MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type) diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index dbe906280f..23b7c4292b 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -173,7 +173,17 @@ enum OatMethodAttributes { typedef uint16_t BasicBlockId; static const BasicBlockId NullBasicBlockId = 0; -static constexpr bool kLeafOptimization = false; + +// Leaf optimization is basically the removal of suspend checks from leaf methods. +// This is incompatible with SuspendCheckElimination (SCE) which eliminates suspend +// checks from loops that call any non-intrinsic method, since a loop that calls +// only a leaf method would end up without any suspend checks at all. So turning +// this on automatically disables the SCE in MIRGraph::EliminateSuspendChecksGate(). +// +// Since the Optimizing compiler is actually applying the same optimization, Quick +// must not run SCE anyway, so we enable this optimization as a way to disable SCE +// while keeping a consistent behavior across the backends, b/22657404. +static constexpr bool kLeafOptimization = true; /* * In general, vreg/sreg describe Dalvik registers that originated with dx. However, diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h index 946c74becf..4512f35a99 100644 --- a/compiler/dex/mir_method_info.h +++ b/compiler/dex/mir_method_info.h @@ -99,7 +99,7 @@ class MirMethodLoweringInfo : public MirMethodInfo { // path methods, retrieve the method's vtable index and direct code and method when applicable. static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit, MirMethodLoweringInfo* method_infos, size_t count) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened) : MirMethodInfo(method_idx, diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 5bb0ce3ba5..80b7ac1e5b 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -1724,7 +1724,8 @@ void MIRGraph::StringChange() { bool MIRGraph::EliminateSuspendChecksGate() { - if ((cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled. + if (kLeafOptimization || // Incompatible (could create loops without suspend checks). + (cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled. GetMaxNestedLoops() == 0u || // Nothing to do. GetMaxNestedLoops() >= 32u || // Only 32 bits in suspend_checks_in_loops_[.]. // Exclude 32 as well to keep bit shifts well-defined. diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc index 10a4337cf5..47123ba28c 100644 --- a/compiler/dex/mir_optimization_test.cc +++ b/compiler/dex/mir_optimization_test.cc @@ -467,8 +467,17 @@ class SuspendCheckEliminationTest : public MirOptimizationTest { cu_.mir_graph->ComputeDominators(); cu_.mir_graph->ComputeTopologicalSortOrder(); cu_.mir_graph->SSATransformationEnd(); + bool gate_result = cu_.mir_graph->EliminateSuspendChecksGate(); - ASSERT_TRUE(gate_result); + ASSERT_NE(gate_result, kLeafOptimization); + if (kLeafOptimization) { + // Even with kLeafOptimization on and Gate() refusing to allow SCE, we want + // to run the SCE test to avoid bitrot, so we need to initialize explicitly. + cu_.mir_graph->suspend_checks_in_loops_ = + cu_.mir_graph->arena_->AllocArray<uint32_t>(cu_.mir_graph->GetNumBlocks(), + kArenaAllocMisc); + } + TopologicalSortIterator iterator(cu_.mir_graph.get()); bool change = false; for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) { diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index 2568ee3064..b83d132d41 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -56,6 +56,7 @@ static constexpr bool kIntrinsicIsStatic[] = { false, // kIntrinsicCharAt false, // kIntrinsicCompareTo false, // kIntrinsicGetCharsNoCheck + false, // kIntrinsicEquals false, // kIntrinsicIsEmptyOrLength false, // kIntrinsicIndexOf true, // kIntrinsicNewStringFromBytes @@ -92,6 +93,7 @@ static_assert(kIntrinsicIsStatic[kIntrinsicRoundDouble], "RoundDouble must be st static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicEquals], "Equals must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicGetCharsNoCheck], "GetCharsNoCheck must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static"); @@ -189,6 +191,7 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = { "getReferent", // kNameCacheReferenceGet "charAt", // kNameCacheCharAt "compareTo", // kNameCacheCompareTo + "equals", // kNameCacheEquals "getCharsNoCheck", // kNameCacheGetCharsNoCheck "isEmpty", // kNameCacheIsEmpty "indexOf", // kNameCacheIndexOf @@ -280,6 +283,8 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = { { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheLong } }, // kProtoCacheJS_V { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheShort } }, + // kProtoCacheObject_Z + { kClassCacheBoolean, 1, { kClassCacheJavaLangObject } }, // kProtoCacheObjectJII_Z { kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheInt, kClassCacheInt } }, @@ -411,6 +416,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0), INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0), + INTRINSIC(JavaLangString, Equals, Object_Z, kIntrinsicEquals, 0), INTRINSIC(JavaLangString, GetCharsNoCheck, IICharArrayI_V, kIntrinsicGetCharsNoCheck, 0), INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty), INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone), @@ -581,6 +587,9 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) { return backend->GenInlinedCharAt(info); case kIntrinsicCompareTo: return backend->GenInlinedStringCompareTo(info); + case kIntrinsicEquals: + // Quick does not implement this intrinsic. + return false; case kIntrinsicGetCharsNoCheck: return backend->GenInlinedStringGetCharsNoCheck(info); case kIntrinsicIsEmptyOrLength: diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h index 26b41bf54d..0969ff8b2d 100644 --- a/compiler/dex/quick/dex_file_method_inliner.h +++ b/compiler/dex/quick/dex_file_method_inliner.h @@ -62,49 +62,49 @@ class DexFileMethodInliner { * @return true if the method is a candidate for inlining, false otherwise. */ bool AnalyseMethodCode(verifier::MethodVerifier* verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); /** * Check whether a particular method index corresponds to an intrinsic or special function. */ - InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_); + InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) REQUIRES(!lock_); /** * Check whether a particular method index corresponds to an intrinsic function. */ - bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) LOCKS_EXCLUDED(lock_); + bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) REQUIRES(!lock_); /** * Generate code for an intrinsic function invocation. */ - bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) LOCKS_EXCLUDED(lock_); + bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) REQUIRES(!lock_); /** * Check whether a particular method index corresponds to a special function. */ - bool IsSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_); + bool IsSpecial(uint32_t method_index) REQUIRES(!lock_); /** * Generate code for a special function. */ - bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) LOCKS_EXCLUDED(lock_); + bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) REQUIRES(!lock_); /** * Try to inline an invoke. */ bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); /** * Gets the thread pointer entrypoint offset for a string init method index and pointer size. */ uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); /** * Check whether a particular method index is a string init. */ - bool IsStringInitMethodIndex(uint32_t method_index) LOCKS_EXCLUDED(lock_); + bool IsStringInitMethodIndex(uint32_t method_index) REQUIRES(!lock_); /** * To avoid multiple lookups of a class by its descriptor, we cache its @@ -170,6 +170,7 @@ class DexFileMethodInliner { kNameCacheReferenceGetReferent, kNameCacheCharAt, kNameCacheCompareTo, + kNameCacheEquals, kNameCacheGetCharsNoCheck, kNameCacheIsEmpty, kNameCacheIndexOf, @@ -242,6 +243,7 @@ class DexFileMethodInliner { kProtoCacheJJ_J, kProtoCacheJJ_V, kProtoCacheJS_V, + kProtoCacheObject_Z, kProtoCacheObjectJII_Z, kProtoCacheObjectJJJ_Z, kProtoCacheObjectJObjectObject_Z, @@ -351,11 +353,11 @@ class DexFileMethodInliner { * * Only DexFileToMethodInlinerMap may call this function to initialize the inliner. */ - void FindIntrinsics(const DexFile* dex_file) EXCLUSIVE_LOCKS_REQUIRED(lock_); + void FindIntrinsics(const DexFile* dex_file) REQUIRES(lock_); friend class DexFileToMethodInlinerMap; - bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) LOCKS_EXCLUDED(lock_); + bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) REQUIRES(!lock_); static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, MIR* move_result, const InlineMethod& method); diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc index dd68dd40c6..16c161e320 100644 --- a/compiler/dex/quick/quick_cfi_test.cc +++ b/compiler/dex/quick/quick_cfi_test.cc @@ -36,7 +36,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class QuickCFITest : public CFITest { public: @@ -56,6 +56,8 @@ class QuickCFITest : public CFITest { CompilerOptions::kDefaultSmallMethodThreshold, CompilerOptions::kDefaultTinyMethodThreshold, CompilerOptions::kDefaultNumDexMethodsThreshold, + CompilerOptions::kDefaultInlineDepthLimit, + CompilerOptions::kDefaultInlineMaxCodeUnits, false, CompilerOptions::kDefaultTopKProfileThreshold, false, @@ -134,6 +136,6 @@ TEST_ISA(kX86_64) TEST_ISA(kMips) TEST_ISA(kMips64) -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h index 43dd5786af..4a39ab3565 100644 --- a/compiler/dex/quick/quick_compiler.h +++ b/compiler/dex/quick/quick_compiler.h @@ -50,7 +50,7 @@ class QuickCompiler : public Compiler { const DexFile& dex_file) const OVERRIDE; uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit); diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc index 798e23fbac..98e9f38d52 100644 --- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc +++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc @@ -39,6 +39,8 @@ class QuickAssembleX86TestBase : public testing::Test { CompilerOptions::kDefaultSmallMethodThreshold, CompilerOptions::kDefaultTinyMethodThreshold, CompilerOptions::kDefaultNumDexMethodsThreshold, + CompilerOptions::kDefaultInlineDepthLimit, + CompilerOptions::kDefaultInlineMaxCodeUnits, false, CompilerOptions::kDefaultTopKProfileThreshold, false, diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h index d692d26229..03bf57bded 100644 --- a/compiler/dex/quick_compiler_callbacks.h +++ b/compiler/dex/quick_compiler_callbacks.h @@ -38,7 +38,7 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks { ~QuickCompilerCallbacks() { } bool MethodVerified(verifier::MethodVerifier* verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; void ClassRejected(ClassReference ref) OVERRIDE; diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h index 7fc2a2363d..9934f6b13b 100644 --- a/compiler/dex/verification_results.h +++ b/compiler/dex/verification_results.h @@ -43,15 +43,15 @@ class VerificationResults { ~VerificationResults(); bool ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(verified_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!verified_methods_lock_); const VerifiedMethod* GetVerifiedMethod(MethodReference ref) - LOCKS_EXCLUDED(verified_methods_lock_); - void RemoveVerifiedMethod(MethodReference ref) LOCKS_EXCLUDED(verified_methods_lock_); + REQUIRES(!verified_methods_lock_); + void RemoveVerifiedMethod(MethodReference ref) REQUIRES(!verified_methods_lock_); - void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); - bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); + void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_); + bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_); bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags); diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h index bf11839cf0..f7d6d67368 100644 --- a/compiler/dex/verified_method.h +++ b/compiler/dex/verified_method.h @@ -44,7 +44,7 @@ class VerifiedMethod { typedef SafeMap<uint32_t, DexFileReference> DequickenMap; static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ~VerifiedMethod() = default; const std::vector<uint8_t>& GetDexGcMap() const { @@ -107,15 +107,15 @@ class VerifiedMethod { // Generate devirtualizaion map into devirt_map_. void GenerateDevirtMap(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Generate dequickening map into dequicken_map_. Returns false if there is an error. bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Generate safe case set into safe_cast_set_. void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::vector<uint8_t> dex_gc_map_; DevirtualizationMap devirt_map_; diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index a52bfaeb5b..affa52a37a 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -167,69 +167,69 @@ class CompilerDriver::AOTCompilationStats { #define STATS_LOCK() #endif - void TypeInDexCache() { + void TypeInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); types_in_dex_cache_++; } - void TypeNotInDexCache() { + void TypeNotInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); types_not_in_dex_cache_++; } - void StringInDexCache() { + void StringInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); strings_in_dex_cache_++; } - void StringNotInDexCache() { + void StringNotInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); strings_not_in_dex_cache_++; } - void TypeDoesntNeedAccessCheck() { + void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_types_++; } - void TypeNeedsAccessCheck() { + void TypeNeedsAccessCheck() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_types_++; } - void ResolvedInstanceField() { + void ResolvedInstanceField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_instance_fields_++; } - void UnresolvedInstanceField() { + void UnresolvedInstanceField() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_instance_fields_++; } - void ResolvedLocalStaticField() { + void ResolvedLocalStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_local_static_fields_++; } - void ResolvedStaticField() { + void ResolvedStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_static_fields_++; } - void UnresolvedStaticField() { + void UnresolvedStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_static_fields_++; } // Indicate that type information from the verifier led to devirtualization. - void PreciseTypeDevirtualization() { + void PreciseTypeDevirtualization() REQUIRES(!stats_lock_) { STATS_LOCK(); type_based_devirtualization_++; } // Indicate that a method of the given type was resolved at compile time. - void ResolvedMethod(InvokeType type) { + void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); resolved_methods_[type]++; @@ -237,7 +237,7 @@ class CompilerDriver::AOTCompilationStats { // Indicate that a method of the given type was unresolved at compile time as it was in an // unknown dex file. - void UnresolvedMethod(InvokeType type) { + void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); unresolved_methods_[type]++; @@ -245,27 +245,27 @@ class CompilerDriver::AOTCompilationStats { // Indicate that a type of virtual method dispatch has been converted into a direct method // dispatch. - void VirtualMadeDirect(InvokeType type) { + void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) { DCHECK(type == kVirtual || type == kInterface || type == kSuper); STATS_LOCK(); virtual_made_direct_[type]++; } // Indicate that a method of the given type was able to call directly into boot. - void DirectCallsToBoot(InvokeType type) { + void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); direct_calls_to_boot_[type]++; } // Indicate that a method of the given type was able to be resolved directly from boot. - void DirectMethodsToBoot(InvokeType type) { + void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); direct_methods_to_boot_[type]++; } - void ProcessedInvoke(InvokeType type, int flags) { + void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) { STATS_LOCK(); if (flags == 0) { unresolved_methods_[type]++; @@ -290,13 +290,13 @@ class CompilerDriver::AOTCompilationStats { } // A check-cast could be eliminated due to verifier type analysis. - void SafeCast() { + void SafeCast() REQUIRES(!stats_lock_) { STATS_LOCK(); safe_casts_++; } // A check-cast couldn't be eliminated due to verifier type analysis. - void NotASafeCast() { + void NotASafeCast() REQUIRES(!stats_lock_) { STATS_LOCK(); not_safe_casts_++; } @@ -690,70 +690,79 @@ bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end(); } -static void ResolveExceptionsForMethod( - ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const DexFile::CodeItem* code_item = method_handle->GetCodeItem(); - if (code_item == nullptr) { - return; // native or abstract method - } - if (code_item->tries_size_ == 0) { - return; // nothing to process - } - const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0); - size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list); - for (size_t i = 0; i < num_encoded_catch_handlers; i++) { - int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list); - bool has_catch_all = false; - if (encoded_catch_handler_size <= 0) { - encoded_catch_handler_size = -encoded_catch_handler_size; - has_catch_all = true; - } - for (int32_t j = 0; j < encoded_catch_handler_size; j++) { - uint16_t encoded_catch_handler_handlers_type_idx = - DecodeUnsignedLeb128(&encoded_catch_handler_list); - // Add to set of types to resolve if not already in the dex cache resolved types - if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { - exceptions_to_resolve.insert( - std::pair<uint16_t, const DexFile*>(encoded_catch_handler_handlers_type_idx, - method_handle->GetDexFile())); +class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { + public: + ResolveCatchBlockExceptionsClassVisitor( + std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve) + : exceptions_to_resolve_(exceptions_to_resolve) {} + + void ResolveExceptionsForMethod(ArtMethod* method_handle) SHARED_REQUIRES(Locks::mutator_lock_) { + const DexFile::CodeItem* code_item = method_handle->GetCodeItem(); + if (code_item == nullptr) { + return; // native or abstract method + } + if (code_item->tries_size_ == 0) { + return; // nothing to process + } + const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0); + size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list); + for (size_t i = 0; i < num_encoded_catch_handlers; i++) { + int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list); + bool has_catch_all = false; + if (encoded_catch_handler_size <= 0) { + encoded_catch_handler_size = -encoded_catch_handler_size; + has_catch_all = true; + } + for (int32_t j = 0; j < encoded_catch_handler_size; j++) { + uint16_t encoded_catch_handler_handlers_type_idx = + DecodeUnsignedLeb128(&encoded_catch_handler_list); + // Add to set of types to resolve if not already in the dex cache resolved types + if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { + exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx, + method_handle->GetDexFile()); + } + // ignore address associated with catch handler + DecodeUnsignedLeb128(&encoded_catch_handler_list); + } + if (has_catch_all) { + // ignore catch all address + DecodeUnsignedLeb128(&encoded_catch_handler_list); } - // ignore address associated with catch handler - DecodeUnsignedLeb128(&encoded_catch_handler_list); - } - if (has_catch_all) { - // ignore catch all address - DecodeUnsignedLeb128(&encoded_catch_handler_list); } } -} -static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - auto* exceptions_to_resolve = - reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg); - const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); - for (auto& m : c->GetVirtualMethods(pointer_size)) { - ResolveExceptionsForMethod(&m, *exceptions_to_resolve); + virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + for (auto& m : c->GetVirtualMethods(pointer_size)) { + ResolveExceptionsForMethod(&m); + } + for (auto& m : c->GetDirectMethods(pointer_size)) { + ResolveExceptionsForMethod(&m); + } + return true; } - for (auto& m : c->GetDirectMethods(pointer_size)) { - ResolveExceptionsForMethod(&m, *exceptions_to_resolve); + + private: + std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve_; +}; + +class RecordImageClassesVisitor : public ClassVisitor { + public: + explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes) + : image_classes_(image_classes) {} + + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + std::string temp; + image_classes_->insert(klass->GetDescriptor(&temp)); + return true; } - return true; -} -static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::unordered_set<std::string>* image_classes = - reinterpret_cast<std::unordered_set<std::string>*>(arg); - std::string temp; - image_classes->insert(klass->GetDescriptor(&temp)); - return true; -} + private: + std::unordered_set<std::string>* const image_classes_; +}; // Make a list of descriptors for classes to include in the image -void CompilerDriver::LoadImageClasses(TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_) { +void CompilerDriver::LoadImageClasses(TimingLogger* timings) { CHECK(timings != nullptr); if (!IsImage()) { return; @@ -788,8 +797,8 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"))); do { unresolved_exception_types.clear(); - class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor, - &unresolved_exception_types); + ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types); + class_linker->VisitClasses(&visitor); for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) { uint16_t exception_type_idx = exception_type.first; const DexFile* dex_file = exception_type.second; @@ -812,14 +821,15 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) // We walk the roots looking for classes so that we'll pick up the // above classes plus any classes them depend on such super // classes, interfaces, and the required ClassLinker roots. - class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get()); + RecordImageClassesVisitor visitor(image_classes_.get()); + class_linker->VisitClasses(&visitor); CHECK_NE(image_classes_->size(), 0U); } static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::unordered_set<std::string>* image_classes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); StackHandleScope<1> hs(self); // Make a copy of the handle so that we don't clobber it doing Assign. @@ -876,7 +886,7 @@ class ClinitImageUpdate { // Visitor for VisitReferences. void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset); if (ref != nullptr) { VisitClinitClassesObject(ref); @@ -884,10 +894,15 @@ class ClinitImageUpdate { } // java.lang.Reference visitor for VisitReferences. - void operator()(mirror::Class* /* klass */, mirror::Reference* /* ref */) const { - } + void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED) + const {} - void Walk() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ignore class native roots. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + + void Walk() SHARED_REQUIRES(Locks::mutator_lock_) { // Use the initial classes as roots for a search. for (mirror::Class* klass_root : image_classes_) { VisitClinitClassesObject(klass_root); @@ -895,9 +910,32 @@ class ClinitImageUpdate { } private: + class FindImageClassesVisitor : public ClassVisitor { + public: + explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {} + + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + std::string temp; + const char* name = klass->GetDescriptor(&temp); + if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) { + data_->image_classes_.push_back(klass); + } else { + // Check whether it is initialized and has a clinit. They must be kept, too. + if (klass->IsInitialized() && klass->FindClassInitializer( + Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) { + data_->image_classes_.push_back(klass); + } + } + return true; + } + + private: + ClinitImageUpdate* const data_; + }; + ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self, ClassLinker* linker) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + SHARED_REQUIRES(Locks::mutator_lock_) : image_class_descriptors_(image_class_descriptors), self_(self) { CHECK(linker != nullptr); CHECK(image_class_descriptors != nullptr); @@ -911,29 +949,12 @@ class ClinitImageUpdate { // Find all the already-marked classes. WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); - linker->VisitClasses(FindImageClasses, this); - } - - static bool FindImageClasses(mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClinitImageUpdate* data = reinterpret_cast<ClinitImageUpdate*>(arg); - std::string temp; - const char* name = klass->GetDescriptor(&temp); - if (data->image_class_descriptors_->find(name) != data->image_class_descriptors_->end()) { - data->image_classes_.push_back(klass); - } else { - // Check whether it is initialized and has a clinit. They must be kept, too. - if (klass->IsInitialized() && klass->FindClassInitializer( - Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) { - data->image_classes_.push_back(klass); - } - } - - return true; + FindImageClassesVisitor visitor(this); + linker->VisitClasses(&visitor); } void VisitClinitClassesObject(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(object != nullptr); if (marked_objects_.find(object) != marked_objects_.end()) { // Already processed. @@ -1569,10 +1590,14 @@ bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc return result; } -class ParallelCompilationManager { +class CompilationVisitor { public: - typedef void Callback(const ParallelCompilationManager* manager, size_t index); + virtual ~CompilationVisitor() {} + virtual void Visit(size_t index) = 0; +}; +class ParallelCompilationManager { + public: ParallelCompilationManager(ClassLinker* class_linker, jobject class_loader, CompilerDriver* compiler, @@ -1610,14 +1635,15 @@ class ParallelCompilationManager { return dex_files_; } - void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) { + void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units) + REQUIRES(!*Locks::mutator_lock_) { Thread* self = Thread::Current(); self->AssertNoPendingException(); CHECK_GT(work_units, 0U); index_.StoreRelaxed(begin); for (size_t i = 0; i < work_units; ++i) { - thread_pool_->AddTask(self, new ForAllClosure(this, end, callback)); + thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor)); } thread_pool_->StartWorkers(self); @@ -1636,10 +1662,10 @@ class ParallelCompilationManager { private: class ForAllClosure : public Task { public: - ForAllClosure(ParallelCompilationManager* manager, size_t end, Callback* callback) + ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor) : manager_(manager), end_(end), - callback_(callback) {} + visitor_(visitor) {} virtual void Run(Thread* self) { while (true) { @@ -1647,7 +1673,7 @@ class ParallelCompilationManager { if (UNLIKELY(index >= end_)) { break; } - callback_(manager_, index); + visitor_->Visit(index); self->AssertNoPendingException(); } } @@ -1659,7 +1685,7 @@ class ParallelCompilationManager { private: ParallelCompilationManager* const manager_; const size_t end_; - Callback* const callback_; + CompilationVisitor* const visitor_; }; AtomicInteger index_; @@ -1676,7 +1702,7 @@ class ParallelCompilationManager { // A fast version of SkipClass above if the class pointer is available // that avoids the expensive FindInClassPath search. static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(klass != nullptr); const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile(); if (&dex_file != &original_dex_file) { @@ -1691,7 +1717,7 @@ static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Cla } static void CheckAndClearResolveException(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(self->IsExceptionPending()); mirror::Throwable* exception = self->GetException(); std::string temp; @@ -1717,134 +1743,148 @@ static void CheckAndClearResolveException(Thread* self) self->ClearException(); } -static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager, - size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - Thread* self = Thread::Current(); - jobject jclass_loader = manager->GetClassLoader(); - const DexFile& dex_file = *manager->GetDexFile(); - ClassLinker* class_linker = manager->GetClassLinker(); - - // If an instance field is final then we need to have a barrier on the return, static final - // fields are assigned within the lock held for class initialization. Conservatively assume - // constructor barriers are always required. - bool requires_constructor_barrier = true; - - // Method and Field are the worst. We can't resolve without either - // context from the code use (to disambiguate virtual vs direct - // method and instance vs static field) or from class - // definitions. While the compiler will resolve what it can as it - // needs it, here we try to resolve fields and methods used in class - // definitions, since many of them many never be referenced by - // generated code. - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - ScopedObjectAccess soa(self); - StackHandleScope<2> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); - // Resolve the class. - mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache, - class_loader); - bool resolve_fields_and_methods; - if (klass == nullptr) { - // Class couldn't be resolved, for example, super-class is in a different dex file. Don't - // attempt to resolve methods and fields when there is no declaring class. - CheckAndClearResolveException(soa.Self()); - resolve_fields_and_methods = false; - } else { - // We successfully resolved a class, should we skip it? - if (SkipClass(jclass_loader, dex_file, klass)) { - return; - } - // We want to resolve the methods and fields eagerly. - resolve_fields_and_methods = true; - } - // Note the class_data pointer advances through the headers, - // static fields, instance fields, direct methods, and virtual - // methods. - const uint8_t* class_data = dex_file.GetClassData(class_def); - if (class_data == nullptr) { - // Empty class such as a marker interface. - requires_constructor_barrier = false; - } else { - ClassDataItemIterator it(dex_file, class_data); - while (it.HasNextStaticField()) { - if (resolve_fields_and_methods) { - ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), - dex_cache, class_loader, true); - if (field == nullptr) { - CheckAndClearResolveException(soa.Self()); - } - } - it.Next(); - } - // We require a constructor barrier if there are final instance fields. - requires_constructor_barrier = false; - while (it.HasNextInstanceField()) { - if (it.MemberIsFinal()) { - requires_constructor_barrier = true; +class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor { + public: + explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager) + : manager_(manager) {} + + void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) { + ATRACE_CALL(); + Thread* const self = Thread::Current(); + jobject jclass_loader = manager_->GetClassLoader(); + const DexFile& dex_file = *manager_->GetDexFile(); + ClassLinker* class_linker = manager_->GetClassLinker(); + + // If an instance field is final then we need to have a barrier on the return, static final + // fields are assigned within the lock held for class initialization. Conservatively assume + // constructor barriers are always required. + bool requires_constructor_barrier = true; + + // Method and Field are the worst. We can't resolve without either + // context from the code use (to disambiguate virtual vs direct + // method and instance vs static field) or from class + // definitions. While the compiler will resolve what it can as it + // needs it, here we try to resolve fields and methods used in class + // definitions, since many of them many never be referenced by + // generated code. + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + ScopedObjectAccess soa(self); + StackHandleScope<2> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); + // Resolve the class. + mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache, + class_loader); + bool resolve_fields_and_methods; + if (klass == nullptr) { + // Class couldn't be resolved, for example, super-class is in a different dex file. Don't + // attempt to resolve methods and fields when there is no declaring class. + CheckAndClearResolveException(soa.Self()); + resolve_fields_and_methods = false; + } else { + // We successfully resolved a class, should we skip it? + if (SkipClass(jclass_loader, dex_file, klass)) { + return; } - if (resolve_fields_and_methods) { - ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), - dex_cache, class_loader, false); - if (field == nullptr) { - CheckAndClearResolveException(soa.Self()); + // We want to resolve the methods and fields eagerly. + resolve_fields_and_methods = true; + } + // Note the class_data pointer advances through the headers, + // static fields, instance fields, direct methods, and virtual + // methods. + const uint8_t* class_data = dex_file.GetClassData(class_def); + if (class_data == nullptr) { + // Empty class such as a marker interface. + requires_constructor_barrier = false; + } else { + ClassDataItemIterator it(dex_file, class_data); + while (it.HasNextStaticField()) { + if (resolve_fields_and_methods) { + ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, true); + if (field == nullptr) { + CheckAndClearResolveException(soa.Self()); + } } + it.Next(); } - it.Next(); - } - if (resolve_fields_and_methods) { - while (it.HasNextDirectMethod()) { - ArtMethod* method = class_linker->ResolveMethod( - dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, - it.GetMethodInvokeType(class_def)); - if (method == nullptr) { - CheckAndClearResolveException(soa.Self()); + // We require a constructor barrier if there are final instance fields. + requires_constructor_barrier = false; + while (it.HasNextInstanceField()) { + if (it.MemberIsFinal()) { + requires_constructor_barrier = true; + } + if (resolve_fields_and_methods) { + ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, false); + if (field == nullptr) { + CheckAndClearResolveException(soa.Self()); + } } it.Next(); } - while (it.HasNextVirtualMethod()) { - ArtMethod* method = class_linker->ResolveMethod( - dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, - it.GetMethodInvokeType(class_def)); - if (method == nullptr) { - CheckAndClearResolveException(soa.Self()); + if (resolve_fields_and_methods) { + while (it.HasNextDirectMethod()) { + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); + if (method == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + it.Next(); } - it.Next(); + while (it.HasNextVirtualMethod()) { + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); + if (method == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + it.Next(); + } + DCHECK(!it.HasNext()); } - DCHECK(!it.HasNext()); + } + if (requires_constructor_barrier) { + manager_->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index); } } - if (requires_constructor_barrier) { - manager->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index); - } -} -static void ResolveType(const ParallelCompilationManager* manager, size_t type_idx) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - // Class derived values are more complicated, they require the linker and loader. - ScopedObjectAccess soa(Thread::Current()); - ClassLinker* class_linker = manager->GetClassLinker(); - const DexFile& dex_file = *manager->GetDexFile(); - StackHandleScope<2> hs(soa.Self()); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()))); - mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + private: + const ParallelCompilationManager* const manager_; +}; - if (klass == nullptr) { - CHECK(soa.Self()->IsExceptionPending()); - mirror::Throwable* exception = soa.Self()->GetException(); - VLOG(compiler) << "Exception during type resolution: " << exception->Dump(); - if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) { - // There's little point continuing compilation if the heap is exhausted. - LOG(FATAL) << "Out of memory during type resolution for compilation"; +class ResolveTypeVisitor : public CompilationVisitor { + public: + explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) { + } + virtual void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) { + // Class derived values are more complicated, they require the linker and loader. + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = manager_->GetClassLinker(); + const DexFile& dex_file = *manager_->GetDexFile(); + StackHandleScope<2> hs(soa.Self()); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader()))); + mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + + if (klass == nullptr) { + soa.Self()->AssertPendingException(); + mirror::Throwable* exception = soa.Self()->GetException(); + VLOG(compiler) << "Exception during type resolution: " << exception->Dump(); + if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) { + // There's little point continuing compilation if the heap is exhausted. + LOG(FATAL) << "Out of memory during type resolution for compilation"; + } + soa.Self()->ClearException(); } - soa.Self()->ClearException(); } -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -1860,17 +1900,18 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil // For images we resolve all types, such as array, whereas for applications just those with // classdefs are resolved by ResolveClassFieldsAndMethods. TimingLogger::ScopedTiming t("Resolve Types", timings); - context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_); + ResolveTypeVisitor visitor(&context); + context.ForAll(0, dex_file.NumTypeIds(), &visitor, thread_count_); } TimingLogger::ScopedTiming t("Resolve MethodsAndFields", timings); - context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_); + ResolveClassFieldsAndMethodsVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) { - for (size_t i = 0; i != dex_files.size(); ++i) { - const DexFile* dex_file = dex_files[i]; + for (const DexFile* dex_file : dex_files) { CHECK(dex_file != nullptr); SetVerifiedDexFile(class_loader, *dex_file, dex_files, thread_pool, timings); } @@ -1878,67 +1919,73 @@ void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const D void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) { - for (size_t i = 0; i != dex_files.size(); ++i) { - const DexFile* dex_file = dex_files[i]; + for (const DexFile* dex_file : dex_files) { CHECK(dex_file != nullptr); VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings); } } -static void VerifyClass(const ParallelCompilationManager* manager, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - ScopedObjectAccess soa(Thread::Current()); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - const char* descriptor = dex_file.GetClassDescriptor(class_def); - ClassLinker* class_linker = manager->GetClassLinker(); - jobject jclass_loader = manager->GetClassLoader(); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); - if (klass.Get() == nullptr) { - CHECK(soa.Self()->IsExceptionPending()); - soa.Self()->ClearException(); +class VerifyClassVisitor : public CompilationVisitor { + public: + explicit VerifyClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} - /* - * At compile time, we can still structurally verify the class even if FindClass fails. - * This is to ensure the class is structurally sound for compilation. An unsound class - * will be rejected by the verifier and later skipped during compilation in the compiler. - */ - Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); - std::string error_msg; - if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader, - &class_def, true, &error_msg) == - verifier::MethodVerifier::kHardFailure) { - LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor) - << " because: " << error_msg; - manager->GetCompiler()->SetHadHardVerifierFailure(); - } - } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) { - CHECK(klass->IsResolved()) << PrettyClass(klass.Get()); - class_linker->VerifyClass(soa.Self(), klass); - - if (klass->IsErroneous()) { - // ClassLinker::VerifyClass throws, which isn't useful in the compiler. + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + ScopedObjectAccess soa(Thread::Current()); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + if (klass.Get() == nullptr) { CHECK(soa.Self()->IsExceptionPending()); soa.Self()->ClearException(); - manager->GetCompiler()->SetHadHardVerifierFailure(); - } - CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) - << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus(); + /* + * At compile time, we can still structurally verify the class even if FindClass fails. + * This is to ensure the class is structurally sound for compilation. An unsound class + * will be rejected by the verifier and later skipped during compilation in the compiler. + */ + Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); + std::string error_msg; + if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader, + &class_def, true, &error_msg) == + verifier::MethodVerifier::kHardFailure) { + LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor) + << " because: " << error_msg; + manager_->GetCompiler()->SetHadHardVerifierFailure(); + } + } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) { + CHECK(klass->IsResolved()) << PrettyClass(klass.Get()); + class_linker->VerifyClass(soa.Self(), klass); + + if (klass->IsErroneous()) { + // ClassLinker::VerifyClass throws, which isn't useful in the compiler. + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + manager_->GetCompiler()->SetHadHardVerifierFailure(); + } - // It is *very* problematic if there are verification errors in the boot classpath. For example, - // we rely on things working OK without verification when the decryption dialog is brought up. - // So abort in a debug build if we find this violated. - DCHECK(!manager->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " << - PrettyClass(klass.Get()) << " failed to fully verify."; + CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) + << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus(); + + // It is *very* problematic if there are verification errors in the boot classpath. For example, + // we rely on things working OK without verification when the decryption dialog is brought up. + // So abort in a debug build if we find this violated. + DCHECK(!manager_->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " + << PrettyClass(klass.Get()) << " failed to fully verify."; + } + soa.Self()->AssertNoPendingException(); } - soa.Self()->AssertNoPendingException(); -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -1947,48 +1994,56 @@ void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, thread_pool); - context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_); + VerifyClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } -static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - ScopedObjectAccess soa(Thread::Current()); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - const char* descriptor = dex_file.GetClassDescriptor(class_def); - ClassLinker* class_linker = manager->GetClassLinker(); - jobject jclass_loader = manager->GetClassLoader(); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); - // Class might have failed resolution. Then don't set it to verified. - if (klass.Get() != nullptr) { - // Only do this if the class is resolved. If even resolution fails, quickening will go very, - // very wrong. - if (klass->IsResolved()) { - if (klass->GetStatus() < mirror::Class::kStatusVerified) { - ObjectLock<mirror::Class> lock(soa.Self(), klass); - // Set class status to verified. - mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self()); - // Mark methods as pre-verified. If we don't do this, the interpreter will run with - // access checks. - klass->SetPreverifiedFlagOnAllMethods( - GetInstructionSetPointerSize(manager->GetCompiler()->GetInstructionSet())); - klass->SetPreverified(); +class SetVerifiedClassVisitor : public CompilationVisitor { + public: + explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + ScopedObjectAccess soa(Thread::Current()); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + // Class might have failed resolution. Then don't set it to verified. + if (klass.Get() != nullptr) { + // Only do this if the class is resolved. If even resolution fails, quickening will go very, + // very wrong. + if (klass->IsResolved()) { + if (klass->GetStatus() < mirror::Class::kStatusVerified) { + ObjectLock<mirror::Class> lock(soa.Self(), klass); + // Set class status to verified. + mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self()); + // Mark methods as pre-verified. If we don't do this, the interpreter will run with + // access checks. + klass->SetPreverifiedFlagOnAllMethods( + GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet())); + klass->SetPreverified(); + } + // Record the final class status if necessary. + ClassReference ref(manager_->GetDexFile(), class_def_index); + manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); } - // Record the final class status if necessary. - ClassReference ref(manager->GetDexFile(), class_def_index); - manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); + } else { + Thread* self = soa.Self(); + DCHECK(self->IsExceptionPending()); + self->ClearException(); } - } else { - Thread* self = soa.Self(); - DCHECK(self->IsExceptionPending()); - self->ClearException(); } -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -1997,99 +2052,107 @@ void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, thread_pool); - context.ForAll(0, dex_file.NumClassDefs(), SetVerifiedClass, thread_count_); + SetVerifiedClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } -static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - jobject jclass_loader = manager->GetClassLoader(); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_); - const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_); +class InitializeClassVisitor : public CompilationVisitor { + public: + explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} - ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader))); - - if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) { - // Only try to initialize classes that were successfully verified. - if (klass->IsVerified()) { - // Attempt to initialize the class but bail if we either need to initialize the super-class - // or static fields. - manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false); - if (!klass->IsInitialized()) { - // We don't want non-trivial class initialization occurring on multiple threads due to - // deadlock problems. For example, a parent class is initialized (holding its lock) that - // refers to a sub-class in its static/class initializer causing it to try to acquire the - // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock) - // after first initializing its parents, whose locks are acquired. This leads to a - // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock. - // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather - // than use a special Object for the purpose we use the Class of java.lang.Class. - Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass())); - ObjectLock<mirror::Class> lock(soa.Self(), h_klass); - // Attempt to initialize allowing initialization of parent classes but still not static - // fields. - manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true); + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + jobject jclass_loader = manager_->GetClassLoader(); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_); + const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_); + + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader))); + + if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) { + // Only try to initialize classes that were successfully verified. + if (klass->IsVerified()) { + // Attempt to initialize the class but bail if we either need to initialize the super-class + // or static fields. + manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false); if (!klass->IsInitialized()) { - // We need to initialize static fields, we only do this for image classes that aren't - // marked with the $NoPreloadHolder (which implies this should not be initialized early). - bool can_init_static_fields = manager->GetCompiler()->IsImage() && - manager->GetCompiler()->IsImageClass(descriptor) && - !StringPiece(descriptor).ends_with("$NoPreloadHolder;"); - if (can_init_static_fields) { - VLOG(compiler) << "Initializing: " << descriptor; - // TODO multithreading support. We should ensure the current compilation thread has - // exclusive access to the runtime and the transaction. To achieve this, we could use - // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity - // checks in Thread::AssertThreadSuspensionIsAllowable. - Runtime* const runtime = Runtime::Current(); - Transaction transaction; - - // Run the class initializer in transaction mode. - runtime->EnterTransactionMode(&transaction); - const mirror::Class::Status old_status = klass->GetStatus(); - bool success = manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true, - true); - // TODO we detach transaction from runtime to indicate we quit the transactional - // mode which prevents the GC from visiting objects modified during the transaction. - // Ensure GC is not run so don't access freed objects when aborting transaction. - - ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end"); - runtime->ExitTransactionMode(); - - if (!success) { - CHECK(soa.Self()->IsExceptionPending()); - mirror::Throwable* exception = soa.Self()->GetException(); - VLOG(compiler) << "Initialization of " << descriptor << " aborted because of " - << exception->Dump(); - std::ostream* file_log = manager->GetCompiler()-> - GetCompilerOptions().GetInitFailureOutput(); - if (file_log != nullptr) { - *file_log << descriptor << "\n"; - *file_log << exception->Dump() << "\n"; + // We don't want non-trivial class initialization occurring on multiple threads due to + // deadlock problems. For example, a parent class is initialized (holding its lock) that + // refers to a sub-class in its static/class initializer causing it to try to acquire the + // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock) + // after first initializing its parents, whose locks are acquired. This leads to a + // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock. + // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather + // than use a special Object for the purpose we use the Class of java.lang.Class. + Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass())); + ObjectLock<mirror::Class> lock(soa.Self(), h_klass); + // Attempt to initialize allowing initialization of parent classes but still not static + // fields. + manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true); + if (!klass->IsInitialized()) { + // We need to initialize static fields, we only do this for image classes that aren't + // marked with the $NoPreloadHolder (which implies this should not be initialized early). + bool can_init_static_fields = manager_->GetCompiler()->IsImage() && + manager_->GetCompiler()->IsImageClass(descriptor) && + !StringPiece(descriptor).ends_with("$NoPreloadHolder;"); + if (can_init_static_fields) { + VLOG(compiler) << "Initializing: " << descriptor; + // TODO multithreading support. We should ensure the current compilation thread has + // exclusive access to the runtime and the transaction. To achieve this, we could use + // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity + // checks in Thread::AssertThreadSuspensionIsAllowable. + Runtime* const runtime = Runtime::Current(); + Transaction transaction; + + // Run the class initializer in transaction mode. + runtime->EnterTransactionMode(&transaction); + const mirror::Class::Status old_status = klass->GetStatus(); + bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true, + true); + // TODO we detach transaction from runtime to indicate we quit the transactional + // mode which prevents the GC from visiting objects modified during the transaction. + // Ensure GC is not run so don't access freed objects when aborting transaction. + + ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end"); + runtime->ExitTransactionMode(); + + if (!success) { + CHECK(soa.Self()->IsExceptionPending()); + mirror::Throwable* exception = soa.Self()->GetException(); + VLOG(compiler) << "Initialization of " << descriptor << " aborted because of " + << exception->Dump(); + std::ostream* file_log = manager_->GetCompiler()-> + GetCompilerOptions().GetInitFailureOutput(); + if (file_log != nullptr) { + *file_log << descriptor << "\n"; + *file_log << exception->Dump() << "\n"; + } + soa.Self()->ClearException(); + transaction.Rollback(); + CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored"; } - soa.Self()->ClearException(); - transaction.Rollback(); - CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored"; } } + soa.Self()->AssertNoPendingException(); } - soa.Self()->AssertNoPendingException(); } + // Record the final class status if necessary. + ClassReference ref(manager_->GetDexFile(), class_def_index); + manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); } - // Record the final class status if necessary. - ClassReference ref(manager->GetDexFile(), class_def_index); - manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); + // Clear any class not found or verification exceptions. + soa.Self()->ClearException(); } - // Clear any class not found or verification exceptions. - soa.Self()->ClearException(); -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -2105,7 +2168,8 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& } else { thread_count = thread_count_; } - context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count); + InitializeClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); } void CompilerDriver::InitializeClasses(jobject class_loader, @@ -2132,101 +2196,108 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi VLOG(compiler) << "Compile: " << GetMemoryUsageString(false); } -void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, - size_t class_def_index) { - ATRACE_CALL(); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - ClassLinker* class_linker = manager->GetClassLinker(); - jobject jclass_loader = manager->GetClassLoader(); - Thread* self = Thread::Current(); - { - // Use a scoped object access to perform to the quick SkipClass check. - const char* descriptor = dex_file.GetClassDescriptor(class_def); - ScopedObjectAccess soa(self); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); - if (klass.Get() == nullptr) { - CHECK(soa.Self()->IsExceptionPending()); - soa.Self()->ClearException(); - } else if (SkipClass(jclass_loader, dex_file, klass.Get())) { +class CompileClassVisitor : public CompilationVisitor { + public: + explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + Thread* self = Thread::Current(); + { + // Use a scoped object access to perform to the quick SkipClass check. + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ScopedObjectAccess soa(self); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + if (klass.Get() == nullptr) { + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + } else if (SkipClass(jclass_loader, dex_file, klass.Get())) { + return; + } + } + ClassReference ref(&dex_file, class_def_index); + // Skip compiling classes with generic verifier failures since they will still fail at runtime + if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) { + return; + } + const uint8_t* class_data = dex_file.GetClassData(class_def); + if (class_data == nullptr) { + // empty class, probably a marker interface return; } - } - ClassReference ref(&dex_file, class_def_index); - // Skip compiling classes with generic verifier failures since they will still fail at runtime - if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) { - return; - } - const uint8_t* class_data = dex_file.GetClassData(class_def); - if (class_data == nullptr) { - // empty class, probably a marker interface - return; - } - CompilerDriver* const driver = manager->GetCompiler(); + CompilerDriver* const driver = manager_->GetCompiler(); - // Can we run DEX-to-DEX compiler on this class ? - DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; - { - ScopedObjectAccess soa(self); - StackHandleScope<1> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel( - soa.Self(), class_loader, dex_file, class_def); - } - ClassDataItemIterator it(dex_file, class_data); - // Skip fields - while (it.HasNextStaticField()) { - it.Next(); - } - while (it.HasNextInstanceField()) { - it.Next(); - } + // Can we run DEX-to-DEX compiler on this class ? + DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; + { + ScopedObjectAccess soa(self); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel( + soa.Self(), class_loader, dex_file, class_def); + } + ClassDataItemIterator it(dex_file, class_data); + // Skip fields + while (it.HasNextStaticField()) { + it.Next(); + } + while (it.HasNextInstanceField()) { + it.Next(); + } - bool compilation_enabled = driver->IsClassToCompile( - dex_file.StringByTypeIdx(class_def.class_idx_)); + bool compilation_enabled = driver->IsClassToCompile( + dex_file.StringByTypeIdx(class_def.class_idx_)); - // Compile direct methods - int64_t previous_direct_method_idx = -1; - while (it.HasNextDirectMethod()) { - uint32_t method_idx = it.GetMemberIndex(); - if (method_idx == previous_direct_method_idx) { - // smali can create dex files with two encoded_methods sharing the same method_idx - // http://code.google.com/p/smali/issues/detail?id=119 + // Compile direct methods + int64_t previous_direct_method_idx = -1; + while (it.HasNextDirectMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_direct_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_direct_method_idx = method_idx; + driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled); it.Next(); - continue; - } - previous_direct_method_idx = method_idx; - driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), - it.GetMethodInvokeType(class_def), class_def_index, - method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, - compilation_enabled); - it.Next(); - } - // Compile virtual methods - int64_t previous_virtual_method_idx = -1; - while (it.HasNextVirtualMethod()) { - uint32_t method_idx = it.GetMemberIndex(); - if (method_idx == previous_virtual_method_idx) { - // smali can create dex files with two encoded_methods sharing the same method_idx - // http://code.google.com/p/smali/issues/detail?id=119 + } + // Compile virtual methods + int64_t previous_virtual_method_idx = -1; + while (it.HasNextVirtualMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_virtual_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_virtual_method_idx = method_idx; + driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled); it.Next(); - continue; } - previous_virtual_method_idx = method_idx; - driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), - it.GetMethodInvokeType(class_def), class_def_index, - method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, - compilation_enabled); - it.Next(); + DCHECK(!it.HasNext()); } - DCHECK(!it.HasNext()); -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -2234,7 +2305,8 @@ void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_fil TimingLogger::ScopedTiming t("Compile Dex File", timings); ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this, &dex_file, dex_files, thread_pool); - context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_); + CompileClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } // Does the runtime for the InstructionSet provide an implementation returned by @@ -2453,7 +2525,7 @@ bool CompilerDriver::WriteElf(const std::string& android_root, const std::vector<const art::DexFile*>& dex_files, OatWriter* oat_writer, art::File* file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kProduce64BitELFFiles && Is64BitInstructionSet(GetInstructionSet())) { return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host, *this); } else { diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 5cf4044fd4..88e03a231f 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -114,14 +114,15 @@ class CompilerDriver { void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); CompiledMethod* CompileMethod(Thread* self, ArtMethod*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!compiled_methods_lock_) WARN_UNUSED; // Compile a single Method. void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_); VerificationResults* GetVerificationResults() const { return verification_results_; @@ -162,54 +163,56 @@ class CompilerDriver { // Generate the trampolines that are invoked by unresolved direct methods. const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateJniDlsymLookup() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); CompiledClass* GetCompiledClass(ClassReference ref) const - LOCKS_EXCLUDED(compiled_classes_lock_); + REQUIRES(!compiled_classes_lock_); CompiledMethod* GetCompiledMethod(MethodReference ref) const - LOCKS_EXCLUDED(compiled_methods_lock_); + REQUIRES(!compiled_methods_lock_); size_t GetNonRelativeLinkerPatchCount() const - LOCKS_EXCLUDED(compiled_methods_lock_); + REQUIRES(!compiled_methods_lock_); // Remove and delete a compiled method. - void RemoveCompiledMethod(const MethodReference& method_ref); + void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_); void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file, - uint16_t class_def_index); + uint16_t class_def_index) + REQUIRES(!freezing_constructor_lock_); bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, - uint16_t class_def_index) const; + uint16_t class_def_index) const + REQUIRES(!freezing_constructor_lock_); // Callbacks from compiler to see what runtime checks must be generated. bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx); bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Are runtime access checks necessary in the compiled code? bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx, bool* type_known_final = nullptr, bool* type_known_abstract = nullptr, bool* equals_referrers_class = nullptr) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Are runtime access and instantiable checks necessary in the code? bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx, bool* is_type_initialized, bool* use_direct_type_ptr, @@ -223,22 +226,22 @@ class CompilerDriver { // Get the DexCache for the mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve compiling method's class. Returns null on failure. mirror::Class* ResolveCompilingMethodsClass( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* ResolveClass( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, uint16_t type_index, const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a field. Returns null on failure, including incompatible class change. // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static. @@ -246,40 +249,40 @@ class CompilerDriver { const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a field with a given dex file. ArtField* ResolveFieldWithDexFile( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file, uint32_t field_idx, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get declaration location of a resolved field. void GetResolvedFieldDexFileLocation( ArtField* resolved_field, const DexFile** declaring_dex_file, uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool IsFieldVolatile(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - MemberOffset GetFieldOffset(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_); + MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_); // Find a dex cache for a dex file. inline mirror::DexCache* FindDexCache(const DexFile* dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset. std::pair<bool, bool> IsFastInstanceField( mirror::DexCache* dex_cache, mirror::Class* referrer_class, ArtField* resolved_field, uint16_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index // of the declaring class in the referrer's dex file. std::pair<bool, bool> IsFastStaticField( mirror::DexCache* dex_cache, mirror::Class* referrer_class, ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return whether the declaring class of `resolved_method` is // available to `referrer_class`. If this is true, compute the type @@ -291,34 +294,34 @@ class CompilerDriver { ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is static field's in referrer's class? bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is static field's class initialized? bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class, ArtField* resolved_field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a method. Returns null on failure, including incompatible class change. ArtMethod* ResolveMethod( ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get declaration location of a resolved field. void GetResolvedMethodDexFileLocation( ArtMethod* resolved_method, const DexFile** declaring_dex_file, uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the index in the vtable of the method. uint16_t GetResolvedMethodVTableIndex( ArtMethod* resolved_method, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value // for ProcessedInvoke() and computes the necessary lowering info. @@ -328,13 +331,13 @@ class CompilerDriver { mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type, MethodReference* target_method, const MethodReference* devirt_target, uintptr_t* direct_code, uintptr_t* direct_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is method's class initialized for an invoke? // For static invokes to determine whether we need to consider potential call to <clinit>(). // For non-static invokes, assuming a non-null reference, the class is always initialized. bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the // dex cache arrays don't have a fixed layout. @@ -349,18 +352,18 @@ class CompilerDriver { ArtField** resolved_field, mirror::Class** referrer_class, mirror::DexCache** dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast path instance field access? Computes field's offset and volatility. bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, MemberOffset* field_offset, bool* is_volatile) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); ArtField* ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fastpath static field access? Computes field's offset, volatility and whether the @@ -369,7 +372,7 @@ class CompilerDriver { MemberOffset* field_offset, uint32_t* storage_index, bool* is_referrers_class, bool* is_volatile, bool* is_initialized, Primitive::Type* type) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Can we fastpath a interface, super class or virtual method call? Computes method's vtable // index. @@ -377,7 +380,7 @@ class CompilerDriver { bool update_stats, bool enable_devirtualization, InvokeType* type, MethodReference* target_method, int* vtable_idx, uintptr_t* direct_code, uintptr_t* direct_method) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const; bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc); @@ -445,7 +448,7 @@ class CompilerDriver { bool IsMethodToCompile(const MethodReference& method_ref) const; void RecordClassStatus(ClassReference ref, mirror::Class::Status status) - LOCKS_EXCLUDED(compiled_classes_lock_); + REQUIRES(!compiled_classes_lock_); // Checks if the specified method has been verified without failures. Returns // false if the method is not in the verification results (GetVerificationResults). @@ -487,7 +490,7 @@ class CompilerDriver { ArtMember* resolved_member, uint16_t member_idx, uint32_t* storage_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can `referrer_class` access the resolved `member`? // Dispatch call to mirror::Class::CanAccessResolvedField or @@ -499,17 +502,17 @@ class CompilerDriver { ArtMember* member, mirror::DexCache* dex_cache, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we assume that the klass is initialized? bool CanAssumeClassIsInitialized(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we assume that the klass is loaded? bool CanAssumeClassIsLoaded(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics. // The only external contract is that unresolved method has flags 0 and resolved non-0. @@ -540,71 +543,68 @@ class CompilerDriver { /*out*/int* stats_flags, MethodReference* target_method, uintptr_t* direct_code, uintptr_t* direct_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: DexToDexCompilationLevel GetDexToDexCompilationlevel( Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, - const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::ClassDef& class_def) SHARED_REQUIRES(Locks::mutator_lock_); void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); - void LoadImageClasses(TimingLogger* timings); + void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); // Attempt to resolve all type, methods, fields, and strings // referenced from code in the dex file following PathClassLoader // ordering semantics. void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void ResolveDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings); void VerifyDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings); void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); void InitializeClasses(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); - void UpdateImageClasses(TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_); + void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); static void FindClinitImageClassesCallback(mirror::Object* object, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings); void CompileDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, DexToDexCompilationLevel dex_to_dex_compilation_level, bool compilation_enabled) - LOCKS_EXCLUDED(compiled_methods_lock_); - - static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!compiled_methods_lock_); // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first // as other fields rely on this. @@ -776,6 +776,7 @@ class CompilerDriver { DedupeSet<ArrayRef<const uint8_t>, SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_cfi_info_; + friend class CompileClassVisitor; DISALLOW_COPY_AND_ASSIGN(CompilerDriver); }; diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index b358f4f396..e35d07da83 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -37,7 +37,7 @@ namespace art { class CompilerDriverTest : public CommonCompilerTest { protected: - void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) { + void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) { TimingLogger timings("CompilerDriverTest::CompileAll", false, false); TimingLogger::ScopedTiming t(__FUNCTION__, &timings); compiler_driver_->CompileAll(class_loader, @@ -49,7 +49,7 @@ class CompilerDriverTest : public CommonCompilerTest { void EnsureCompiled(jobject class_loader, const char* class_name, const char* method, const char* signature, bool is_virtual) - LOCKS_EXCLUDED(Locks::mutator_lock_) { + REQUIRES(!Locks::mutator_lock_) { CompileAll(class_loader); Thread::Current()->TransitionFromSuspendedToRunnable(); bool started = runtime_->Start(); diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc index 226e6b7952..3f5a1eabb6 100644 --- a/compiler/driver/compiler_options.cc +++ b/compiler/driver/compiler_options.cc @@ -27,6 +27,8 @@ CompilerOptions::CompilerOptions() small_method_threshold_(kDefaultSmallMethodThreshold), tiny_method_threshold_(kDefaultTinyMethodThreshold), num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold), + inline_depth_limit_(kDefaultInlineDepthLimit), + inline_max_code_units_(kDefaultInlineMaxCodeUnits), include_patch_information_(kDefaultIncludePatchInformation), top_k_profile_threshold_(kDefaultTopKProfileThreshold), debuggable_(false), @@ -52,6 +54,8 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter, size_t small_method_threshold, size_t tiny_method_threshold, size_t num_dex_methods_threshold, + size_t inline_depth_limit, + size_t inline_max_code_units, bool include_patch_information, double top_k_profile_threshold, bool debuggable, @@ -71,6 +75,8 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter, small_method_threshold_(small_method_threshold), tiny_method_threshold_(tiny_method_threshold), num_dex_methods_threshold_(num_dex_methods_threshold), + inline_depth_limit_(inline_depth_limit), + inline_max_code_units_(inline_max_code_units), include_patch_information_(include_patch_information), top_k_profile_threshold_(top_k_profile_threshold), debuggable_(debuggable), diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index fe681e2a53..17b19dd51e 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -51,6 +51,8 @@ class CompilerOptions FINAL { static constexpr double kDefaultTopKProfileThreshold = 90.0; static const bool kDefaultGenerateDebugInfo = kIsDebugBuild; static const bool kDefaultIncludePatchInformation = false; + static const size_t kDefaultInlineDepthLimit = 3; + static const size_t kDefaultInlineMaxCodeUnits = 18; CompilerOptions(); ~CompilerOptions(); @@ -61,6 +63,8 @@ class CompilerOptions FINAL { size_t small_method_threshold, size_t tiny_method_threshold, size_t num_dex_methods_threshold, + size_t inline_depth_limit, + size_t inline_max_code_units, bool include_patch_information, double top_k_profile_threshold, bool debuggable, @@ -137,6 +141,14 @@ class CompilerOptions FINAL { return num_dex_methods_threshold_; } + size_t GetInlineDepthLimit() const { + return inline_depth_limit_; + } + + size_t GetInlineMaxCodeUnits() const { + return inline_max_code_units_; + } + double GetTopKProfileThreshold() const { return top_k_profile_threshold_; } @@ -202,6 +214,8 @@ class CompilerOptions FINAL { const size_t small_method_threshold_; const size_t tiny_method_threshold_; const size_t num_dex_methods_threshold_; + const size_t inline_depth_limit_; + const size_t inline_max_code_units_; const bool include_patch_information_; // When using a profile file only the top K% of the profiled samples will be compiled. const double top_k_profile_threshold_; diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc index 4d423d007f..a07d27c1d2 100644 --- a/compiler/dwarf/dwarf_test.cc +++ b/compiler/dwarf/dwarf_test.cc @@ -27,7 +27,7 @@ namespace art { namespace dwarf { // Run the tests only on host since we need objdump. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ constexpr CFIFormat kCFIFormat = DW_DEBUG_FRAME_FORMAT; @@ -336,7 +336,7 @@ TEST_F(DwarfTest, DebugInfo) { CheckObjdumpOutput(is64bit, "-W"); } -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace dwarf } // namespace art diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h index 8e13b51bbe..03f8ceb306 100644 --- a/compiler/elf_writer.h +++ b/compiler/elf_writer.h @@ -57,7 +57,7 @@ class ElfWriter { const std::vector<const DexFile*>& dex_files, const std::string& android_root, bool is_host) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; const CompilerDriver* const compiler_driver_; File* const elf_file_; diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h index fd202eeb5f..83781abeff 100644 --- a/compiler/elf_writer_quick.h +++ b/compiler/elf_writer_quick.h @@ -33,7 +33,7 @@ class ElfWriterQuick FINAL : public ElfWriter { const std::string& android_root, bool is_host, const CompilerDriver& driver) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void EncodeOatPatches(const std::vector<uintptr_t>& locations, std::vector<uint8_t>* buffer); @@ -44,7 +44,7 @@ class ElfWriterQuick FINAL : public ElfWriter { const std::string& android_root, bool is_host) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: ElfWriterQuick(const CompilerDriver& driver, File* elf_file) diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 2b65aa9337..dda36fa2ef 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -73,7 +73,7 @@ static constexpr bool kBinObjects = true; static constexpr bool kComputeEagerResolvedStrings = false; static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Class* klass = obj->GetClass(); CHECK_NE(PrettyClass(klass), "com.android.dex.Dex"); } @@ -539,16 +539,19 @@ bool ImageWriter::AllocMemory() { return true; } +class ComputeLazyFieldsForClassesVisitor : public ClassVisitor { + public: + bool Visit(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + StackHandleScope<1> hs(Thread::Current()); + mirror::Class::ComputeName(hs.NewHandle(c)); + return true; + } +}; + void ImageWriter::ComputeLazyFieldsForImageClasses() { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr); -} - -bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) { - Thread* self = Thread::Current(); - StackHandleScope<1> hs(self); - mirror::Class::ComputeName(hs.NewHandle(c)); - return true; + ComputeLazyFieldsForClassesVisitor visitor; + class_linker->VisitClassesWithoutClassesLock(&visitor); } void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) { @@ -592,9 +595,20 @@ bool ImageWriter::IsImageClass(Class* klass) { return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp)); } -struct NonImageClasses { - ImageWriter* image_writer; - std::set<std::string>* non_image_classes; +class NonImageClassesVisitor : public ClassVisitor { + public: + explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {} + + bool Visit(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (!image_writer_->IsImageClass(klass)) { + std::string temp; + non_image_classes_.insert(klass->GetDescriptor(&temp)); + } + return true; + } + + std::set<std::string> non_image_classes_; + ImageWriter* const image_writer_; }; void ImageWriter::PruneNonImageClasses() { @@ -606,14 +620,11 @@ void ImageWriter::PruneNonImageClasses() { Thread* self = Thread::Current(); // Make a list of classes we would like to prune. - std::set<std::string> non_image_classes; - NonImageClasses context; - context.image_writer = this; - context.non_image_classes = &non_image_classes; - class_linker->VisitClasses(NonImageClassesVisitor, &context); + NonImageClassesVisitor visitor(this); + class_linker->VisitClasses(&visitor); // Remove the undesired classes from the class roots. - for (const std::string& it : non_image_classes) { + for (const std::string& it : visitor.non_image_classes_) { bool result = class_linker->RemoveClass(it.c_str(), nullptr); DCHECK(result); } @@ -669,15 +680,6 @@ void ImageWriter::PruneNonImageClasses() { class_linker->DropFindArrayClassCache(); } -bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) { - NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg); - if (!context->image_writer->IsImageClass(klass)) { - std::string temp; - context->non_image_classes->insert(klass->GetDescriptor(&temp)); - } - return true; -} - void ImageWriter::CheckNonImageClassesRemoved() { if (compiler_driver_.GetImageClasses() != nullptr) { gc::Heap* heap = Runtime::Current()->GetHeap(); @@ -1035,7 +1037,7 @@ class FixupRootVisitor : public RootVisitor { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { *roots[i] = ImageAddress(*roots[i]); } @@ -1043,7 +1045,7 @@ class FixupRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr())); } @@ -1052,7 +1054,7 @@ class FixupRootVisitor : public RootVisitor { private: ImageWriter* const image_writer_; - mirror::Object* ImageAddress(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { const size_t offset = image_writer_->GetImageOffset(obj); auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset); VLOG(compiler) << "Update root from " << obj << " to " << dest; @@ -1189,8 +1191,15 @@ class FixupVisitor { FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) { } + // Ignore class roots since we don't have a way to map them to the destination. These are handled + // with other logic. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset); // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the // image. @@ -1200,8 +1209,7 @@ class FixupVisitor { // java.lang.ref.Reference visitor. void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>( mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent())); } @@ -1217,15 +1225,14 @@ class FixupClassVisitor FINAL : public FixupVisitor { } void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK(obj->IsClass()); FixupVisitor::operator()(obj, offset, /*is_static*/false); } void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { LOG(FATAL) << "Reference not expected here."; } }; diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 1523383657..cabd918354 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -69,15 +69,15 @@ class ImageWriter FINAL { } template <typename T> - T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) { return object == nullptr ? nullptr : reinterpret_cast<T*>(image_begin_ + GetImageOffset(object)); } - ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress( - const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile* dex_file, uint32_t offset) const SHARED_REQUIRES(Locks::mutator_lock_) { auto it = dex_cache_array_starts_.find(dex_file); DCHECK(it != dex_cache_array_starts_.end()); return reinterpret_cast<mirror::HeapReference<mirror::Object>*>( @@ -88,7 +88,7 @@ class ImageWriter FINAL { bool Write(const std::string& image_filename, const std::string& oat_filename, const std::string& oat_location) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); uintptr_t GetOatDataBegin() { return reinterpret_cast<uintptr_t>(oat_data_begin_); @@ -98,7 +98,7 @@ class ImageWriter FINAL { bool AllocMemory(); // Mark the objects defined in this space in the given live bitmap. - void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_); // Classify different kinds of bins that objects end up getting packed into during image writing. enum Bin { @@ -165,32 +165,32 @@ class ImageWriter FINAL { // We use the lock word to store the offset of the object in the image. void AssignImageOffset(mirror::Object* object, BinSlot bin_slot) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetImageOffset(mirror::Object* object, size_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsImageOffsetAssigned(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); void UpdateImageOffset(mirror::Object* obj, uintptr_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_); + void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsImageBinSlotAssigned(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); - void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_); static void* GetImageAddressCallback(void* writer, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj); } mirror::Object* GetLocalAddress(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t offset = GetImageOffset(object); uint8_t* dst = image_->Begin() + offset; return reinterpret_cast<mirror::Object*>(dst); @@ -209,74 +209,70 @@ class ImageWriter FINAL { } // Returns true if the class was in the original requested image classes list. - bool IsImageClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); // Debug aid that list of requested image classes. void DumpImageClasses(); // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying. void ComputeLazyFieldsForImageClasses() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Wire dex cache resolved strings to strings in the image to avoid runtime resolution. - void ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ComputeEagerResolvedStrings() SHARED_REQUIRES(Locks::mutator_lock_); static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Remove unwanted classes from various roots. - void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool NonImageClassesVisitor(mirror::Class* c, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_); // Verify unwanted classes removed. - void CheckNonImageClassesRemoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_); static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Lays out where the image objects will be at runtime. void CalculateNewObjectOffsets() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::ObjectArray<mirror::Object>* CreateImageRoots() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CalculateObjectBinSlots(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void UnbinObjectsIntoOffset(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void WalkFieldsInOrder(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void WalkFieldsCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Creates the contiguous image in memory and adjusts pointers. - void CopyAndFixupNativeData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CopyAndFixupNativeData() SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_); static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupObject(mirror::Object* orig, mirror::Object* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass, - Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Bin array_type) SHARED_REQUIRES(Locks::mutator_lock_); // Get quick code for non-resolution/imt_conflict/abstract method. const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetQuickEntryPoint(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Patches references in OatFile to expect runtime addresses. void SetOatChecksumFromElfFile(File* elf_file); @@ -285,10 +281,10 @@ class ImageWriter FINAL { size_t GetBinSizeSum(Bin up_to = kBinSize) const; // Return true if a method is likely to be dirtied at runtime. - bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_); // Assign the offset for an ArtMethod. - void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_REQUIRES(Locks::mutator_lock_); const CompilerDriver& compiler_driver_; @@ -376,6 +372,7 @@ class ImageWriter FINAL { friend class FixupClassVisitor; friend class FixupRootVisitor; friend class FixupVisitor; + friend class NonImageClassesVisitor; DISALLOW_COPY_AND_ASSIGN(ImageWriter); }; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index a122cebf50..c95bac24fd 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -55,7 +55,7 @@ extern "C" void jit_unload(void* handle) { } extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle); DCHECK(jit_compiler != nullptr); return jit_compiler->CompileMethod(self, method); @@ -71,6 +71,8 @@ JitCompiler::JitCompiler() : total_time_(0) { CompilerOptions::kDefaultSmallMethodThreshold, CompilerOptions::kDefaultTinyMethodThreshold, CompilerOptions::kDefaultNumDexMethodsThreshold, + CompilerOptions::kDefaultInlineDepthLimit, + CompilerOptions::kDefaultInlineMaxCodeUnits, /* include_patch_information */ false, CompilerOptions::kDefaultTopKProfileThreshold, Runtime::Current()->IsDebuggable(), diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index b0010e0eb2..ef68caa5fa 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -38,11 +38,11 @@ class JitCompiler { static JitCompiler* Create(); virtual ~JitCompiler(); bool CompileMethod(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // This is in the compiler since the runtime doesn't have access to the compiled method // structures. bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method, - OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_); CompilerCallbacks* GetCompilerCallbacks() const; size_t GetTotalCompileTime() const { return total_time_; @@ -63,7 +63,7 @@ class JitCompiler { const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end, const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map); bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(JitCompiler); }; diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 016f28ef1e..0bfe8a276a 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -28,7 +28,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class JNICFITest : public CFITest { public: @@ -88,6 +88,6 @@ TEST_ISA(kX86_64) TEST_ISA(kMips) TEST_ISA(kMips64) -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 074775633f..88dc29e6ab 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -44,7 +44,7 @@ class OatTest : public CommonCompilerTest { void CheckMethod(ArtMethod* method, const OatFile::OatMethod& oat_method, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const CompiledMethod* compiled_method = compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method->GetDexMethodIndex())); @@ -183,7 +183,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) { EXPECT_EQ(72U, sizeof(OatHeader)); EXPECT_EQ(4U, sizeof(OatMethodOffsets)); EXPECT_EQ(28U, sizeof(OatQuickMethodHeader)); - EXPECT_EQ(112 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints)); + EXPECT_EQ(113 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints)); } TEST_F(OatTest, OatHeaderIsValid) { diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 4318ea5b6c..64e748776d 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -365,7 +365,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -560,7 +560,7 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -601,7 +601,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -665,7 +665,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } bool StartClass(const DexFile* dex_file, size_t class_def_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatDexMethodVisitor::StartClass(dex_file, class_def_index); if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) { dex_cache_ = class_linker_->FindDexCache(*dex_file); @@ -673,7 +673,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return true; } - bool EndClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) { bool result = OatDexMethodVisitor::EndClass(); if (oat_class_index_ == writer_->oat_classes_.size()) { DCHECK(result); // OatDexMethodVisitor::EndClass() never fails. @@ -687,7 +687,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -793,7 +793,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } ArtMethod* GetTargetMethod(const LinkerPatch& patch) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { MethodReference ref = patch.TargetMethod(); mirror::DexCache* dex_cache = (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file); @@ -803,7 +803,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return method; } - uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { auto target_it = writer_->method_offset_map_.map.find(patch.TargetMethod()); uint32_t target_offset = (target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u; @@ -828,7 +828,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } mirror::Class* GetTargetType(const LinkerPatch& patch) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile()) ? dex_cache_ : class_linker_->FindDexCache(*patch.TargetTypeDexFile()); mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex()); @@ -836,7 +836,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return type; } - uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { if (writer_->image_writer_ != nullptr) { auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress( patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset()); @@ -849,7 +849,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // NOTE: Direct method pointers across oat files don't use linker patches. However, direct // type pointers across oat files do. (TODO: Investigate why.) if (writer_->image_writer_ != nullptr) { @@ -865,7 +865,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // NOTE: Direct method pointers across oat files don't use linker patches. However, direct // type pointers across oat files do. (TODO: Investigate why.) if (writer_->image_writer_ != nullptr) { @@ -882,7 +882,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t address = writer_->image_writer_ == nullptr ? target_offset : PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() + writer_->oat_data_offset_ + target_offset); diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index 82b9377c07..3baf43872e 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -165,9 +165,9 @@ class OatWriter { size_t InitOatClasses(size_t offset); size_t InitOatMaps(size_t offset); size_t InitOatCode(size_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t InitOatCodeDexFiles(size_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool WriteTables(OutputStream* out, const size_t file_offset); size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index e15eff9056..676b8421cd 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -4535,7 +4535,11 @@ void ParallelMoveResolverX86::EmitSwap(size_t index) { Location destination = move->GetDestination(); if (source.IsRegister() && destination.IsRegister()) { - __ xchgl(destination.AsRegister<Register>(), source.AsRegister<Register>()); + // Use XOR swap algorithm to avoid serializing XCHG instruction or using a temporary. + DCHECK_NE(destination.AsRegister<Register>(), source.AsRegister<Register>()); + __ xorl(destination.AsRegister<Register>(), source.AsRegister<Register>()); + __ xorl(source.AsRegister<Register>(), destination.AsRegister<Register>()); + __ xorl(destination.AsRegister<Register>(), source.AsRegister<Register>()); } else if (source.IsRegister() && destination.IsStackSlot()) { Exchange(source.AsRegister<Register>(), destination.GetStackIndex()); } else if (source.IsStackSlot() && destination.IsRegister()) { diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index cfebb77dd7..e4bc9e68e1 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -89,6 +89,33 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) { block->GetBlockId())); } + // Ensure that the only Return(Void) and Throw jump to Exit. An exiting + // TryBoundary may be between a Throw and the Exit if the Throw is in a try. + if (block->IsExitBlock()) { + for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) { + HBasicBlock* predecessor = block->GetPredecessors().Get(i); + if (predecessor->IsSingleTryBoundary() + && !predecessor->GetLastInstruction()->AsTryBoundary()->IsEntry()) { + HBasicBlock* real_predecessor = predecessor->GetSinglePredecessor(); + HInstruction* last_instruction = real_predecessor->GetLastInstruction(); + if (!last_instruction->IsThrow()) { + AddError(StringPrintf("Unexpected TryBoundary between %s:%d and Exit.", + last_instruction->DebugName(), + last_instruction->GetId())); + } + } else { + HInstruction* last_instruction = predecessor->GetLastInstruction(); + if (!last_instruction->IsReturn() + && !last_instruction->IsReturnVoid() + && !last_instruction->IsThrow()) { + AddError(StringPrintf("Unexpected instruction %s:%d jumps into the exit block.", + last_instruction->DebugName(), + last_instruction->GetId())); + } + } + } + } + // Visit this block's list of phis. for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); @@ -328,6 +355,39 @@ void GraphChecker::VisitInstanceOf(HInstanceOf* instruction) { void SSAChecker::VisitBasicBlock(HBasicBlock* block) { super_type::VisitBasicBlock(block); + // Ensure that only catch blocks have exceptional predecessors, and if they do + // these are instructions which throw into them. + if (block->IsCatchBlock()) { + for (size_t i = 0, e = block->GetExceptionalPredecessors().Size(); i < e; ++i) { + HInstruction* thrower = block->GetExceptionalPredecessors().Get(i); + HBasicBlock* try_block = thrower->GetBlock(); + if (!thrower->CanThrow()) { + AddError(StringPrintf("Exceptional predecessor %s:%d of catch block %d does not throw.", + thrower->DebugName(), + thrower->GetId(), + block->GetBlockId())); + } else if (!try_block->IsInTry()) { + AddError(StringPrintf("Exceptional predecessor %s:%d of catch block %d " + "is not in a try block.", + thrower->DebugName(), + thrower->GetId(), + block->GetBlockId())); + } else if (!try_block->GetTryEntry()->HasExceptionHandler(*block)) { + AddError(StringPrintf("Catch block %d is not an exception handler of " + "its exceptional predecessor %s:%d.", + block->GetBlockId(), + thrower->DebugName(), + thrower->GetId())); + } + } + } else { + if (!block->GetExceptionalPredecessors().IsEmpty()) { + AddError(StringPrintf("Normal block %d has %zu exceptional predecessors.", + block->GetBlockId(), + block->GetExceptionalPredecessors().Size())); + } + } + // Ensure that catch blocks are not normal successors, and normal blocks are // never exceptional successors. const size_t num_normal_successors = block->NumberOfNormalSuccessors(); @@ -512,6 +572,7 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) { void SSAChecker::VisitInstruction(HInstruction* instruction) { super_type::VisitInstruction(instruction); + HBasicBlock* block = instruction->GetBlock(); // Ensure an instruction dominates all its uses. for (HUseIterator<HInstruction*> use_it(instruction->GetUses()); @@ -543,6 +604,24 @@ void SSAChecker::VisitInstruction(HInstruction* instruction) { } } } + + // Ensure that throwing instructions in try blocks are listed as exceptional + // predecessors in their exception handlers. + if (instruction->CanThrow() && block->IsInTry()) { + for (HExceptionHandlerIterator handler_it(*block->GetTryEntry()); + !handler_it.Done(); + handler_it.Advance()) { + if (!handler_it.Current()->GetExceptionalPredecessors().Contains(instruction)) { + AddError(StringPrintf("Instruction %s:%d is in try block %d and can throw " + "but its exception handler %d does not list it in " + "its exceptional predecessors.", + instruction->DebugName(), + instruction->GetId(), + block->GetBlockId(), + handler_it.Current()->GetBlockId())); + } + } + } } static Primitive::Type PrimitiveKind(Primitive::Type type) { @@ -590,11 +669,32 @@ void SSAChecker::VisitPhi(HPhi* phi) { if (phi->IsCatchPhi()) { // The number of inputs of a catch phi corresponds to the total number of // throwing instructions caught by this catch block. + const GrowableArray<HInstruction*>& predecessors = + phi->GetBlock()->GetExceptionalPredecessors(); + if (phi->InputCount() != predecessors.Size()) { + AddError(StringPrintf( + "Phi %d in catch block %d has %zu inputs, " + "but catch block %d has %zu exceptional predecessors.", + phi->GetId(), phi->GetBlock()->GetBlockId(), phi->InputCount(), + phi->GetBlock()->GetBlockId(), predecessors.Size())); + } else { + for (size_t i = 0, e = phi->InputCount(); i < e; ++i) { + HInstruction* input = phi->InputAt(i); + HInstruction* thrower = predecessors.Get(i); + if (!input->StrictlyDominates(thrower)) { + AddError(StringPrintf( + "Input %d at index %zu of phi %d from catch block %d does not " + "dominate the throwing instruction %s:%d.", + input->GetId(), i, phi->GetId(), phi->GetBlock()->GetBlockId(), + thrower->DebugName(), thrower->GetId())); + } + } + } } else { // Ensure the number of inputs of a non-catch phi is the same as the number // of its predecessors. const GrowableArray<HBasicBlock*>& predecessors = - phi->GetBlock()->GetPredecessors(); + phi->GetBlock()->GetPredecessors(); if (phi->InputCount() != predecessors.Size()) { AddError(StringPrintf( "Phi %d in block %d has %zu inputs, " diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc index eca0d9344f..0f6677519e 100644 --- a/compiler/optimizing/graph_checker_test.cc +++ b/compiler/optimizing/graph_checker_test.cc @@ -25,14 +25,14 @@ namespace art { * Create a simple control-flow graph composed of two blocks: * * BasicBlock 0, succ: 1 - * 0: Goto 1 + * 0: ReturnVoid 1 * BasicBlock 1, pred: 0 * 1: Exit */ HGraph* CreateSimpleCFG(ArenaAllocator* allocator) { HGraph* graph = CreateGraph(allocator); HBasicBlock* entry_block = new (allocator) HBasicBlock(graph); - entry_block->AddInstruction(new (allocator) HGoto()); + entry_block->AddInstruction(new (allocator) HReturnVoid()); graph->AddBlock(entry_block); graph->SetEntryBlock(entry_block); HBasicBlock* exit_block = new (allocator) HBasicBlock(graph); diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index d6b5636edc..069a7a460b 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -386,6 +386,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { StartAttributeStream("recursive") << std::boolalpha << invoke->IsRecursive() << std::noboolalpha; + StartAttributeStream("intrinsic") << invoke->GetIntrinsic(); } void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE { @@ -464,21 +465,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } else { StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId(); } - } else if (IsReferenceTypePropagationPass() && is_after_pass_) { - if (instruction->GetType() == Primitive::kPrimNot) { - if (instruction->IsLoadClass()) { - ReferenceTypeInfo info = instruction->AsLoadClass()->GetLoadedClassRTI(); - ScopedObjectAccess soa(Thread::Current()); - DCHECK(info.IsValid()) << "Invalid RTI for " << instruction->DebugName(); - StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get()); - StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha; - } else { - ReferenceTypeInfo info = instruction->GetReferenceTypeInfo(); - ScopedObjectAccess soa(Thread::Current()); - DCHECK(info.IsValid()) << "Invalid RTI for " << instruction->DebugName(); - StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get()); - StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha; - } + } else if (IsReferenceTypePropagationPass() + && (instruction->GetType() == Primitive::kPrimNot)) { + ReferenceTypeInfo info = instruction->IsLoadClass() + ? instruction->AsLoadClass()->GetLoadedClassRTI() + : instruction->GetReferenceTypeInfo(); + ScopedObjectAccess soa(Thread::Current()); + if (info.IsValid()) { + StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get()); + StartAttributeStream("can_be_null") + << std::boolalpha << instruction->CanBeNull() << std::noboolalpha; + StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha; + } else { + DCHECK(!is_after_pass_) << "Type info should be valid after reference type propagation"; } } if (disasm_info_ != nullptr) { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 1551c1531a..01065959d8 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -22,8 +22,10 @@ #include "constant_folding.h" #include "dead_code_elimination.h" #include "driver/compiler_driver-inl.h" +#include "driver/compiler_options.h" #include "driver/dex_compilation_unit.h" #include "instruction_simplifier.h" +#include "intrinsics.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "nodes.h" @@ -38,9 +40,6 @@ namespace art { -static constexpr int kMaxInlineCodeUnits = 18; -static constexpr int kDepthLimit = 3; - void HInliner::Run() { if (graph_->IsDebuggable()) { // For simplicity, we currently never inline when the graph is debuggable. This avoids @@ -86,7 +85,7 @@ void HInliner::Run() { } static bool IsMethodOrDeclaringClassFinal(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return method->IsFinal() || method->GetDeclaringClass()->IsFinal(); } @@ -96,7 +95,7 @@ static bool IsMethodOrDeclaringClassFinal(ArtMethod* method) * Return nullptr if the runtime target cannot be proven. */ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (IsMethodOrDeclaringClassFinal(resolved_method)) { // No need to lookup further, the resolved method will be the target. return resolved_method; @@ -162,7 +161,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol static uint32_t FindMethodIndexIn(ArtMethod* method, const DexFile& dex_file, uint32_t referrer_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (method->GetDexFile()->GetLocation().compare(dex_file.GetLocation()) == 0) { return method->GetDexMethodIndex(); } else { @@ -219,7 +218,8 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, uint32_t method_index) con return false; } - if (code_item->insns_size_in_code_units_ > kMaxInlineCodeUnits) { + size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits(); + if (code_item->insns_size_in_code_units_ > inline_max_code_units) { VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file) << " is too big to inline"; return false; @@ -271,11 +271,11 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, const DexFile::CodeItem* code_item = resolved_method->GetCodeItem(); const DexFile& callee_dex_file = *resolved_method->GetDexFile(); uint32_t method_index = resolved_method->GetDexMethodIndex(); - + ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker(); DexCompilationUnit dex_compilation_unit( nullptr, caller_compilation_unit_.GetClassLoader(), - caller_compilation_unit_.GetClassLinker(), + class_linker, *resolved_method->GetDexFile(), code_item, resolved_method->GetDeclaringClass()->GetDexClassDefIndex(), @@ -356,8 +356,10 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, HConstantFolding fold(callee_graph); ReferenceTypePropagation type_propagation(callee_graph, handles_); InstructionSimplifier simplify(callee_graph, stats_); + IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_); HOptimization* optimizations[] = { + &intrinsics, &dce, &fold, &type_propagation, @@ -369,7 +371,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, optimization->Run(); } - if (depth_ + 1 < kDepthLimit) { + if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) { HInliner inliner(callee_graph, outer_compilation_unit_, dex_compilation_unit, @@ -448,7 +450,33 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, } } - callee_graph->InlineInto(graph_, invoke_instruction); + HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction); + + // When merging the graph we might create a new NullConstant in the caller graph which does + // not have the chance to be typed. We assign the correct type here so that we can keep the + // assertion that every reference has a valid type. This also simplifies checks along the way. + HNullConstant* null_constant = graph_->GetNullConstant(); + if (!null_constant->GetReferenceTypeInfo().IsValid()) { + ReferenceTypeInfo::TypeHandle obj_handle = + handles_->NewHandle(class_linker->GetClassRoot(ClassLinker::kJavaLangObject)); + null_constant->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(obj_handle, false /* is_exact */)); + } + + if ((return_replacement != nullptr) + && (return_replacement->GetType() == Primitive::kPrimNot)) { + if (!return_replacement->GetReferenceTypeInfo().IsValid()) { + // Make sure that we have a valid type for the return. We may get an invalid one when + // we inline invokes with multiple branches and create a Phi for the result. + // TODO: we could be more precise by merging the phi inputs but that requires + // some functionality from the reference type propagation. + DCHECK(return_replacement->IsPhi()); + ReferenceTypeInfo::TypeHandle return_handle = + handles_->NewHandle(resolved_method->GetReturnType()); + return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create( + return_handle, return_handle->IsFinal() /* is_exact */)); + } + } return true; } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 1089812beb..d3911456fb 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -195,6 +195,7 @@ bool InstructionSimplifierVisitor::IsDominatedByInputNullCheck(HInstruction* ins // Returns whether doing a type test between the class of `object` against `klass` has // a statically known outcome. The result of the test is stored in `outcome`. static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bool* outcome) { + DCHECK(!object->IsNullConstant()) << "Null constants should be special cased"; ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo(); ScopedObjectAccess soa(Thread::Current()); if (!obj_rti.IsValid()) { diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 8ef13e125e..f24b15289e 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -187,6 +187,8 @@ static Intrinsics GetIntrinsic(InlineMethod method) { return Intrinsics::kStringCharAt; case kIntrinsicCompareTo: return Intrinsics::kStringCompareTo; + case kIntrinsicEquals: + return Intrinsics::kStringEquals; case kIntrinsicGetCharsNoCheck: return Intrinsics::kStringGetCharsNoCheck; case kIntrinsicIsEmptyOrLength: @@ -359,7 +361,7 @@ void IntrinsicsRecognizer::Run() { std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) { switch (intrinsic) { case Intrinsics::kNone: - os << "No intrinsic."; + os << "None"; break; #define OPTIMIZING_INTRINSICS(Name, IsStatic) \ case Intrinsics::k ## Name: \ diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index b4dbf75f0a..3f157c6e36 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -1068,6 +1068,7 @@ UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure. UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) +UNIMPLEMENTED_INTRINSIC(StringEquals) #undef UNIMPLEMENTED_INTRINSIC diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 78ac167a87..aeae5764a5 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -1202,6 +1202,7 @@ void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) +UNIMPLEMENTED_INTRINSIC(StringEquals) #undef UNIMPLEMENTED_INTRINSIC diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h index 2c9248f52c..6e6f17328d 100644 --- a/compiler/optimizing/intrinsics_list.h +++ b/compiler/optimizing/intrinsics_list.h @@ -60,6 +60,7 @@ V(MemoryPokeShortNative, kStatic) \ V(StringCharAt, kDirect) \ V(StringCompareTo, kDirect) \ + V(StringEquals, kDirect) \ V(StringGetCharsNoCheck, kDirect) \ V(StringIndexOf, kDirect) \ V(StringIndexOfAfter, kDirect) \ diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 0d6ca09f31..6399ee80bc 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -945,6 +945,97 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + + // Request temporary registers, ECX and EDI needed for repe_cmpsw instruction. + locations->AddTemp(Location::RegisterLocation(ECX)); + locations->AddTemp(Location::RegisterLocation(EDI)); + + // Set output, ESI needed for repe_cmpsw instruction anyways. + locations->SetOut(Location::RegisterLocation(ESI)); +} + +void IntrinsicCodeGeneratorX86::VisitStringEquals(HInvoke* invoke) { + X86Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register str = locations->InAt(0).AsRegister<Register>(); + Register arg = locations->InAt(1).AsRegister<Register>(); + Register ecx = locations->GetTemp(0).AsRegister<Register>(); + Register edi = locations->GetTemp(1).AsRegister<Register>(); + Register esi = locations->Out().AsRegister<Register>(); + + Label end; + Label return_true; + Label return_false; + + // Get offsets of count, value, and class fields within a string object. + const uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value(); + const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value(); + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + // Check if input is null, return false if it is. + __ cmpl(arg, Immediate(0)); + __ j(kEqual, &return_false); + + // Instanceof check for the argument by comparing class fields. + // All string objects must have the same type since String cannot be subclassed. + // Receiver must be a string object, so its class field is equal to all strings' class fields. + // If the argument is a string object, its class field must be equal to receiver's class field. + __ movl(ecx, Address(str, class_offset)); + __ cmpl(ecx, Address(arg, class_offset)); + __ j(kNotEqual, &return_false); + + // Reference equality check, return true if same reference. + __ cmpl(str, arg); + __ j(kEqual, &return_true); + + // Load length of receiver string. + __ movl(ecx, Address(str, count_offset)); + // Check if lengths are equal, return false if they're not. + __ cmpl(ecx, Address(arg, count_offset)); + __ j(kNotEqual, &return_false); + // Return true if both strings are empty. + __ cmpl(ecx, Immediate(0)); + __ j(kEqual, &return_true); + + // Load starting addresses of string values into ESI/EDI as required for repe_cmpsw instruction. + __ leal(esi, Address(str, value_offset)); + __ leal(edi, Address(arg, value_offset)); + + // Divide string length by 2 to compare characters 2 at a time and adjust for odd lengths. + __ addl(ecx, Immediate(1)); + __ shrl(ecx, Immediate(1)); + + // Assertions that must hold in order to compare strings 2 characters at a time. + DCHECK_ALIGNED(value_offset, 4); + static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded"); + + // Loop to compare strings two characters at a time starting at the beginning of the string. + __ repe_cmpsl(); + // If strings are not equal, zero flag will be cleared. + __ j(kNotEqual, &return_false); + + // Return true and exit the function. + // If loop does not result in returning false, we return true. + __ Bind(&return_true); + __ movl(esi, Immediate(1)); + __ jmp(&end); + + // Return false and exit the function. + __ Bind(&return_false); + __ movl(esi, Immediate(0)); + __ Bind(&end); +} + static void CreateStringIndexOfLocations(HInvoke* invoke, ArenaAllocator* allocator, bool start_at_zero) { diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index ea342e9382..6e737d6d3c 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -1615,6 +1615,7 @@ void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSE UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(StringEquals) #undef UNIMPLEMENTED_INTRINSIC diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc new file mode 100644 index 0000000000..2fc66e6de4 --- /dev/null +++ b/compiler/optimizing/licm_test.cc @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/arena_allocator.h" +#include "builder.h" +#include "gtest/gtest.h" +#include "licm.h" +#include "nodes.h" +#include "optimizing_unit_test.h" +#include "side_effects_analysis.h" + +namespace art { + +/** + * Fixture class for the LICM tests. + */ +class LICMTest : public testing::Test { + public: + LICMTest() : pool_(), allocator_(&pool_) { + graph_ = CreateGraph(&allocator_); + } + + ~LICMTest() { } + + // Builds a singly-nested loop structure in CFG. Tests can further populate + // the basic blocks with instructions to set up interesting scenarios. + void BuildLoop() { + entry_ = new (&allocator_) HBasicBlock(graph_); + loop_preheader_ = new (&allocator_) HBasicBlock(graph_); + loop_header_ = new (&allocator_) HBasicBlock(graph_); + loop_body_ = new (&allocator_) HBasicBlock(graph_); + exit_ = new (&allocator_) HBasicBlock(graph_); + + graph_->AddBlock(entry_); + graph_->AddBlock(loop_preheader_); + graph_->AddBlock(loop_header_); + graph_->AddBlock(loop_body_); + graph_->AddBlock(exit_); + + graph_->SetEntryBlock(entry_); + graph_->SetExitBlock(exit_); + + // Set up loop flow in CFG. + entry_->AddSuccessor(loop_preheader_); + loop_preheader_->AddSuccessor(loop_header_); + loop_header_->AddSuccessor(loop_body_); + loop_header_->AddSuccessor(exit_); + loop_body_->AddSuccessor(loop_header_); + + // Provide boiler-plate instructions. + parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot); + entry_->AddInstruction(parameter_); + constant_ = new (&allocator_) HConstant(Primitive::kPrimInt); + loop_preheader_->AddInstruction(constant_); + loop_header_->AddInstruction(new (&allocator_) HIf(parameter_)); + loop_body_->AddInstruction(new (&allocator_) HGoto()); + exit_->AddInstruction(new (&allocator_) HExit()); + } + + // Performs LICM optimizations (after proper set up). + void PerformLICM() { + ASSERT_TRUE(graph_->TryBuildingSsa()); + SideEffectsAnalysis side_effects(graph_); + side_effects.Run(); + LICM licm(graph_, side_effects); + licm.Run(); + } + + // General building fields. + ArenaPool pool_; + ArenaAllocator allocator_; + HGraph* graph_; + + // Specific basic blocks. + HBasicBlock* entry_; + HBasicBlock* loop_preheader_; + HBasicBlock* loop_header_; + HBasicBlock* loop_body_; + HBasicBlock* exit_; + + HInstruction* parameter_; // "this" + HInstruction* constant_; +}; + +// +// The actual LICM tests. +// + +TEST_F(LICMTest, ConstantHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set array to constant. + HInstruction* constant = new (&allocator_) HConstant(Primitive::kPrimDouble); + loop_body_->InsertInstructionBefore(constant, loop_body_->GetLastInstruction()); + HInstruction* set_array = new (&allocator_) HArraySet( + parameter_, constant_, constant, Primitive::kPrimDouble, 0); + loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); + + EXPECT_EQ(constant->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(constant->GetBlock(), loop_preheader_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, FieldHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get field with different types. + HInstruction* get_field = new (&allocator_) HInstanceFieldGet( + parameter_, Primitive::kPrimLong, MemberOffset(10), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); + HInstruction* set_field = new (&allocator_) HInstanceFieldSet( + parameter_, constant_, Primitive::kPrimInt, MemberOffset(20), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_field->GetBlock(), loop_body_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_field->GetBlock(), loop_preheader_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, NoFieldHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get field with same types. + HInstruction* get_field = new (&allocator_) HInstanceFieldGet( + parameter_, Primitive::kPrimLong, MemberOffset(10), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); + HInstruction* set_field = new (&allocator_) HInstanceFieldSet( + parameter_, get_field, Primitive::kPrimLong, MemberOffset(10), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_field->GetBlock(), loop_body_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_field->GetBlock(), loop_body_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, ArrayHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get array with different types. + HInstruction* get_array = new (&allocator_) HArrayGet( + parameter_, constant_, Primitive::kPrimLong); + loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); + HInstruction* set_array = new (&allocator_) HArraySet( + parameter_, constant_, constant_, Primitive::kPrimInt, 0); + loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_array->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_array->GetBlock(), loop_preheader_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, NoArrayHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get array with same types. + HInstruction* get_array = new (&allocator_) HArrayGet( + parameter_, constant_, Primitive::kPrimLong); + loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); + HInstruction* set_array = new (&allocator_) HArraySet( + parameter_, get_array, constant_, Primitive::kPrimLong, 0); + loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_array->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_array->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); +} + +} // namespace art diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 296c1b02fc..61dadc2704 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -564,6 +564,13 @@ bool HBasicBlock::Dominates(HBasicBlock* other) const { return false; } +void HBasicBlock::AddExceptionalPredecessor(HInstruction* exceptional_predecessor) { + DCHECK(exceptional_predecessor->CanThrow()); + DCHECK(exceptional_predecessor->GetBlock()->IsInTry()); + DCHECK(exceptional_predecessor->GetBlock()->GetTryEntry()->HasExceptionHandler(*this)); + exceptional_predecessors_.Add(exceptional_predecessor); +} + static void UpdateInputsUsers(HInstruction* instruction) { for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { instruction->InputAt(i)->AddUseAt(instruction, i); @@ -1225,10 +1232,12 @@ bool HTryBoundary::HasSameExceptionHandlersAs(const HTryBoundary& other) const { return false; } - // Exception handler lists cannot contain duplicates, which makes it - // sufficient to test inclusion only in one direction. - for (HExceptionHandlerIterator it(other); !it.Done(); it.Advance()) { - if (!HasExceptionHandler(*it.Current())) { + // Exception handlers need to be stored in the same order. + for (HExceptionHandlerIterator it1(*this), it2(other); + !it1.Done(); + it1.Advance(), it2.Advance()) { + DCHECK(!it2.Done()); + if (it1.Current() != it2.Current()) { return false; } } @@ -1485,7 +1494,7 @@ void HGraph::DeleteDeadBlock(HBasicBlock* block) { blocks_.Put(block->GetBlockId(), nullptr); } -void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { +HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { DCHECK(HasExitBlock()) << "Unimplemented scenario"; // Update the environments in this graph to have the invoke's environment // as parent. @@ -1510,6 +1519,7 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { outer_graph->SetHasBoundsChecks(true); } + HInstruction* return_value = nullptr; if (GetBlocks().Size() == 3) { // Simple case of an entry block, a body block, and an exit block. // Put the body block's instruction into `invoke`'s block. @@ -1524,7 +1534,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Replace the invoke with the return value of the inlined graph. if (last->IsReturn()) { - invoke->ReplaceWith(last->InputAt(0)); + return_value = last->InputAt(0); + invoke->ReplaceWith(return_value); } else { DCHECK(last->IsReturnVoid()); } @@ -1546,7 +1557,6 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Update all predecessors of the exit block (now the `to` block) // to not `HReturn` but `HGoto` instead. - HInstruction* return_value = nullptr; bool returns_void = to->GetPredecessors().Get(0)->GetLastInstruction()->IsReturnVoid(); if (to->GetPredecessors().Size() == 1) { HBasicBlock* predecessor = to->GetPredecessors().Get(0); @@ -1680,6 +1690,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Finally remove the invoke from the caller. invoke->GetBlock()->RemoveInstruction(invoke); + + return return_value; } /* diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 1190fae914..9b8521d968 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -58,6 +58,7 @@ class SsaBuilder; static const int kDefaultNumberOfBlocks = 8; static const int kDefaultNumberOfSuccessors = 2; static const int kDefaultNumberOfPredecessors = 2; +static const int kDefaultNumberOfExceptionalPredecessors = 0; static const int kDefaultNumberOfDominatedBlocks = 1; static const int kDefaultNumberOfBackEdges = 1; @@ -210,7 +211,9 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { void ComputeTryBlockInformation(); // Inline this graph in `outer_graph`, replacing the given `invoke` instruction. - void InlineInto(HGraph* outer_graph, HInvoke* invoke); + // Returns the instruction used to replace the invoke expression or null if the + // invoke is for a void method. + HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke); // Need to add a couple of blocks to test if the loop body is entered and // put deoptimization instructions, etc. @@ -306,7 +309,12 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { // already, it is created and inserted into the graph. This method is only for // integral types. HConstant* GetConstant(Primitive::Type type, int64_t value); + + // TODO: This is problematic for the consistency of reference type propagation + // because it can be created anytime after the pass and thus it will be left + // with an invalid type. HNullConstant* GetNullConstant(); + HIntConstant* GetIntConstant(int32_t value) { return CreateConstant(value, &cached_int_constants_); } @@ -557,6 +565,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc) : graph_(graph), predecessors_(graph->GetArena(), kDefaultNumberOfPredecessors), + exceptional_predecessors_(graph->GetArena(), kDefaultNumberOfExceptionalPredecessors), successors_(graph->GetArena(), kDefaultNumberOfSuccessors), loop_information_(nullptr), dominator_(nullptr), @@ -571,6 +580,10 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { return predecessors_; } + const GrowableArray<HInstruction*>& GetExceptionalPredecessors() const { + return exceptional_predecessors_; + } + const GrowableArray<HBasicBlock*>& GetSuccessors() const { return successors_; } @@ -639,6 +652,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { HInstruction* GetLastPhi() const { return phis_.last_instruction_; } const HInstructionList& GetPhis() const { return phis_; } + void AddExceptionalPredecessor(HInstruction* exceptional_predecessor); + void AddSuccessor(HBasicBlock* block) { successors_.Add(block); block->predecessors_.Add(this); @@ -678,6 +693,10 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { predecessors_.Delete(block); } + void RemoveExceptionalPredecessor(HInstruction* instruction) { + exceptional_predecessors_.Delete(instruction); + } + void RemoveSuccessor(HBasicBlock* block) { successors_.Delete(block); } @@ -714,6 +733,15 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { return -1; } + size_t GetExceptionalPredecessorIndexOf(HInstruction* exceptional_predecessor) const { + for (size_t i = 0, e = exceptional_predecessors_.Size(); i < e; ++i) { + if (exceptional_predecessors_.Get(i) == exceptional_predecessor) { + return i; + } + } + return -1; + } + size_t GetSuccessorIndexOf(HBasicBlock* successor) const { for (size_t i = 0, e = successors_.Size(); i < e; ++i) { if (successors_.Get(i) == successor) { @@ -874,6 +902,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { private: HGraph* graph_; GrowableArray<HBasicBlock*> predecessors_; + GrowableArray<HInstruction*> exceptional_predecessors_; GrowableArray<HBasicBlock*> successors_; HInstructionList instructions_; HInstructionList phis_; @@ -1467,26 +1496,27 @@ class ReferenceTypeInfo : ValueObject { static ReferenceTypeInfo CreateInvalid() { return ReferenceTypeInfo(); } - static bool IsValidHandle(TypeHandle handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static bool IsValidHandle(TypeHandle handle) SHARED_REQUIRES(Locks::mutator_lock_) { return handle.GetReference() != nullptr; } - bool IsValid() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) { return IsValidHandle(type_handle_); } bool IsExact() const { return is_exact_; } - bool IsObjectClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + + bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsObjectClass(); } - bool IsInterface() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsInterface(); } Handle<mirror::Class> GetTypeHandle() const { return type_handle_; } - bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsValid()); DCHECK(rti.IsValid()); return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get()); @@ -1495,7 +1525,7 @@ class ReferenceTypeInfo : ValueObject { // Returns true if the type information provide the same amount of details. // Note that it does not mean that the instructions have the same actual type // (because the type can be the result of a merge). - bool IsEqual(ReferenceTypeInfo rti) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsValid() && !rti.IsValid()) { // Invalid types are equal. return true; @@ -3589,7 +3619,7 @@ class HInstanceFieldGet : public HExpression<1> { const DexFile& dex_file) : HExpression( field_type, - SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)), + SideEffects::FieldReadOfType(field_type, is_volatile)), field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) { SetRawInputAt(0, value); } @@ -4036,7 +4066,7 @@ class HStaticFieldGet : public HExpression<1> { const DexFile& dex_file) : HExpression( field_type, - SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)), + SideEffects::FieldReadOfType(field_type, is_volatile)), field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) { SetRawInputAt(0, cls); } @@ -4184,7 +4214,8 @@ class HBoundType : public HExpression<1> { HBoundType(HInstruction* input, ReferenceTypeInfo upper_bound, bool upper_can_be_null) : HExpression(Primitive::kPrimNot, SideEffects::None()), upper_bound_(upper_bound), - upper_can_be_null_(upper_can_be_null) { + upper_can_be_null_(upper_can_be_null), + can_be_null_(upper_can_be_null) { DCHECK_EQ(input->GetType(), Primitive::kPrimNot); SetRawInputAt(0, input); SetReferenceTypeInfo(upper_bound_); diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index fe3bb1a2b4..f455571636 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -29,7 +29,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class OptimizingCFITest : public CFITest { public: @@ -125,6 +125,6 @@ TEST_ISA(kArm64) TEST_ISA(kX86) TEST_ISA(kX86_64) -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 601d668995..6a50b7d4a4 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -86,7 +86,7 @@ class CodeVectorAllocator FINAL : public CodeAllocator { * Filter to apply to the visualizer. Methods whose name contain that filter will * be dumped. */ -static const char* kStringFilter = ""; +static constexpr const char kStringFilter[] = ""; class PassScope; @@ -105,12 +105,14 @@ class PassObserver : public ValueObject { visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()), visualizer_(visualizer_output, graph, *codegen), graph_in_bad_state_(false) { - if (strstr(method_name, kStringFilter) == nullptr) { - timing_logger_enabled_ = visualizer_enabled_ = false; - } - if (visualizer_enabled_) { - visualizer_.PrintHeader(method_name_); - codegen->SetDisassemblyInformation(&disasm_info_); + if (timing_logger_enabled_ || visualizer_enabled_) { + if (!IsVerboseMethod(compiler_driver, method_name)) { + timing_logger_enabled_ = visualizer_enabled_ = false; + } + if (visualizer_enabled_) { + visualizer_.PrintHeader(method_name_); + codegen->SetDisassemblyInformation(&disasm_info_); + } } } @@ -169,6 +171,23 @@ class PassObserver : public ValueObject { } } + static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) { + // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an + // empty kStringFilter matching all methods. + if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) { + return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name); + } + + // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code + // warning when the string is empty. + constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1; + if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) { + return true; + } + + return false; + } + HGraph* const graph_; const char* method_name_; @@ -237,7 +256,7 @@ class OptimizingCompiler FINAL : public Compiler { } uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); } @@ -350,6 +369,36 @@ static void RunOptimizations(HOptimization* optimizations[], } } +static void MaybeRunInliner(HGraph* graph, + CompilerDriver* driver, + OptimizingCompilerStats* stats, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer, + StackHandleScopeCollection* handles) { + const CompilerOptions& compiler_options = driver->GetCompilerOptions(); + bool should_inline = (compiler_options.GetInlineDepthLimit() > 0) + && (compiler_options.GetInlineMaxCodeUnits() > 0); + if (!should_inline) { + return; + } + + ArenaAllocator* arena = graph->GetArena(); + HInliner* inliner = new (arena) HInliner( + graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats); + ReferenceTypePropagation* type_propagation = + new (arena) ReferenceTypePropagation(graph, handles, + "reference_type_propagation_after_inlining"); + + HOptimization* optimizations[] = { + inliner, + // Run another type propagation phase: inlining will open up more opportunities + // to remove checkcast/instanceof and null checks. + type_propagation, + }; + + RunOptimizations(optimizations, arraysize(optimizations), pass_observer); +} + static void RunOptimizations(HGraph* graph, CompilerDriver* driver, OptimizingCompilerStats* stats, @@ -364,10 +413,6 @@ static void RunOptimizations(HGraph* graph, HConstantFolding* fold1 = new (arena) HConstantFolding(graph); InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); HBooleanSimplifier* boolean_simplify = new (arena) HBooleanSimplifier(graph); - - HInliner* inliner = new (arena) HInliner( - graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats); - HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining"); SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects); @@ -379,29 +424,29 @@ static void RunOptimizations(HGraph* graph, graph, stats, "instruction_simplifier_after_types"); InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( graph, stats, "instruction_simplifier_after_bce"); - ReferenceTypePropagation* type_propagation2 = - new (arena) ReferenceTypePropagation( - graph, handles, "reference_type_propagation_after_inlining"); InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier( graph, stats, "instruction_simplifier_before_codegen"); IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver); - HOptimization* optimizations[] = { + HOptimization* optimizations1[] = { intrinsics, fold1, simplify1, type_propagation, dce1, - simplify2, - inliner, - // Run another type propagation phase: inlining will open up more opprotunities - // to remove checkast/instanceof and null checks. - type_propagation2, + simplify2 + }; + + RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer); + + MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles); + + HOptimization* optimizations2[] = { // BooleanSimplifier depends on the InstructionSimplifier removing redundant // suspend checks to recognize empty blocks. boolean_simplify, - fold2, + fold2, // TODO: if we don't inline we can also skip fold2. side_effects, gvn, licm, @@ -414,7 +459,7 @@ static void RunOptimizations(HGraph* graph, simplify4, }; - RunOptimizations(optimizations, arraysize(optimizations), pass_observer); + RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); } // The stack map we generate must be 4-byte aligned on ARM. Since existing diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index d11a441a6e..d1c1134565 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -153,7 +153,7 @@ static HBoundType* CreateBoundType(ArenaAllocator* arena, HInstruction* obj, HLoadClass* load_class, bool upper_can_be_null) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo(); ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null); @@ -180,7 +180,7 @@ static bool ShouldCreateBoundType(HInstruction* position, ReferenceTypeInfo upper_bound, HInstruction* dominator_instr, HBasicBlock* dominator_block) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // If the position where we should insert the bound type is not already a // a bound type then we need to create one. if (position == nullptr || !position->IsBoundType()) { @@ -388,6 +388,8 @@ void RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr, } void RTPVisitor::VisitNullConstant(HNullConstant* instr) { + // TODO: The null constant could be bound contextually (e.g. based on return statements) + // to a more precise type. instr->SetReferenceTypeInfo( ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false)); } @@ -438,8 +440,10 @@ void RTPVisitor::VisitLoadClass(HLoadClass* instr) { Runtime::Current()->GetClassLinker()->FindDexCache(instr->GetDexFile()); // Get type from dex cache assuming it was populated by the verifier. mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex()); - DCHECK(resolved_class != nullptr); - ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(resolved_class); + // TODO: investigating why we are still getting unresolved classes: b/22821472. + ReferenceTypeInfo::TypeHandle handle = (resolved_class != nullptr) + ? handles_->NewHandle(resolved_class) + : object_class_handle_; instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true)); instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_class_handle_, /* is_exact */ true)); } @@ -546,7 +550,7 @@ ReferenceTypeInfo ReferenceTypePropagation::MergeTypes(const ReferenceTypeInfo& static void UpdateArrayGet(HArrayGet* instr, StackHandleScopeCollection* handles, ReferenceTypeInfo::TypeHandle object_class_handle) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(Primitive::kPrimNot, instr->GetType()); ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo(); diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index 9196b56e37..14d4a82e9b 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -41,8 +41,8 @@ class ReferenceTypePropagation : public HOptimization { private: void VisitPhi(HPhi* phi); void VisitBasicBlock(HBasicBlock* block); - void UpdateBoundType(HBoundType* bound_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void UpdatePhi(HPhi* phi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_); + void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_); void BoundTypeForIfNotNull(HBasicBlock* block); void BoundTypeForIfInstanceOf(HBasicBlock* block); void ProcessWorklist(); @@ -53,7 +53,7 @@ class ReferenceTypePropagation : public HOptimization { bool UpdateReferenceTypeInfo(HInstruction* instr); ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); StackHandleScopeCollection* handles_; diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index ff2e6ad821..2c34e4dd03 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -570,7 +570,9 @@ void SsaBuilder::VisitInstruction(HInstruction* instruction) { if (instruction->GetBlock()->IsInTry() && instruction->CanThrow()) { HTryBoundary* try_block = instruction->GetBlock()->GetTryEntry(); for (HExceptionHandlerIterator it(*try_block); !it.Done(); it.Advance()) { - GrowableArray<HInstruction*>* handler_locals = GetLocalsFor(it.Current()); + HBasicBlock* handler = it.Current(); + handler->AddExceptionalPredecessor(instruction); + GrowableArray<HInstruction*>* handler_locals = GetLocalsFor(handler); for (size_t i = 0, e = current_locals_->Size(); i < e; ++i) { HInstruction* local_value = current_locals_->Get(i); if (local_value != nullptr) { diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 701dbb019b..40502c173b 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -225,7 +225,7 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { // SsaLivenessAnalysis. for (size_t i = 0, e = environment->Size(); i < e; ++i) { HInstruction* instruction = environment->GetInstructionAt(i); - bool should_be_live = ShouldBeLiveForEnvironment(instruction); + bool should_be_live = ShouldBeLiveForEnvironment(current, instruction); if (should_be_live) { DCHECK(instruction->HasSsaIndex()); live_in->SetBit(instruction->GetSsaIndex()); diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 220ee6a8d0..a7044de850 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -1201,8 +1201,14 @@ class SsaLivenessAnalysis : public ValueObject { // Update the live_out set of the block and returns whether it has changed. bool UpdateLiveOut(const HBasicBlock& block); - static bool ShouldBeLiveForEnvironment(HInstruction* instruction) { + // Returns whether `instruction` in an HEnvironment held by `env_holder` + // should be kept live by the HEnvironment. + static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, + HInstruction* instruction) { if (instruction == nullptr) return false; + // A value that's not live in compiled code may still be needed in interpreter, + // due to code motion, etc. + if (env_holder->IsDeoptimize()) return true; if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true; return instruction->GetType() == Primitive::kPrimNot; } diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h index bdab2796d8..9fb22452ea 100644 --- a/compiler/trampolines/trampoline_compiler.h +++ b/compiler/trampolines/trampoline_compiler.h @@ -27,10 +27,10 @@ namespace art { // Create code that will invoke the function held in thread local storage. const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi, ThreadOffset<4> entry_point_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi, ThreadOffset<8> entry_point_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace art diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index 413b9eaa8c..b499dddb0c 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -133,14 +133,27 @@ uint32_t Thumb2Assembler::AdjustFixups() { AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate); } while (!fixups_to_recalculate.empty()) { - // Pop the fixup. - FixupId fixup_id = fixups_to_recalculate.front(); - fixups_to_recalculate.pop_front(); - Fixup* fixup = GetFixup(fixup_id); - DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0); - buffer_.Store<int16_t>(fixup->GetLocation(), 0); - // See if it needs adjustment. - AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate); + do { + // Pop the fixup. + FixupId fixup_id = fixups_to_recalculate.front(); + fixups_to_recalculate.pop_front(); + Fixup* fixup = GetFixup(fixup_id); + DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0); + buffer_.Store<int16_t>(fixup->GetLocation(), 0); + // See if it needs adjustment. + AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate); + } while (!fixups_to_recalculate.empty()); + + if ((current_code_size & 2) != 0 && !literals_.empty()) { + // If we need to add padding before literals, this may just push some out of range, + // so recalculate all load literals. This makes up for the fact that we don't mark + // load literal as a dependency of all previous Fixups even though it actually is. + for (Fixup& fixup : fixups_) { + if (fixup.IsLoadLiteral()) { + AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate); + } + } + } } if (kIsDebugBuild) { // Check that no fixup is marked as being in fixups_to_recalculate anymore. diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h index 838554ee6d..41eb5d36f2 100644 --- a/compiler/utils/arm/assembler_thumb2.h +++ b/compiler/utils/arm/assembler_thumb2.h @@ -489,6 +489,10 @@ class Thumb2Assembler FINAL : public ArmAssembler { return type_; } + bool IsLoadLiteral() const { + return GetType() >= kLoadLiteralNarrow; + } + Size GetOriginalSize() const { return original_size_; } diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc index 68b7931a0c..004853f224 100644 --- a/compiler/utils/arm/assembler_thumb2_test.cc +++ b/compiler/utils/arm/assembler_thumb2_test.cc @@ -950,4 +950,65 @@ TEST_F(AssemblerThumb2Test, LoadLiteralDoubleFar) { __ GetAdjustedPosition(label.Position())); } +TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1KiBDueToAlignmentOnSecondPass) { + // First part: as TwoCbzBeyondMaxOffset but add one 16-bit instruction to the end, + // so that the size is not Aligned<4>(.). On the first pass, the assembler resizes + // the second CBZ because it's out of range, then it will resize the first CBZ + // which has been pushed out of range. Thus, after the first pass, the code size + // will appear Aligned<4>(.) but the final size will not be. + Label label0, label1, label2; + __ cbz(arm::R0, &label1); + constexpr size_t kLdrR0R0Count1 = 63; + for (size_t i = 0; i != kLdrR0R0Count1; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ Bind(&label0); + __ cbz(arm::R0, &label2); + __ Bind(&label1); + constexpr size_t kLdrR0R0Count2 = 65; + for (size_t i = 0; i != kLdrR0R0Count2; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ Bind(&label2); + __ ldr(arm::R0, arm::Address(arm::R0)); + + std::string expected_part1 = + "cmp r0, #0\n" // cbz r0, label1 + "beq.n 1f\n" + + RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") + + "0:\n" + "cmp r0, #0\n" // cbz r0, label2 + "beq.n 2f\n" + "1:\n" + + RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") + + "2:\n" // Here the offset is Aligned<4>(.). + "ldr r0, [r0]\n"; // Make the first part + + // Second part: as LoadLiteralMax1KiB with the caveat that the offset of the load + // literal will not be Aligned<4>(.) but it will appear to be when we process the + // instruction during the first pass, so the literal will need a padding and it + // will push the literal out of range, so we shall end up with "ldr.w". + arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678); + __ LoadLiteral(arm::R0, literal); + Label label; + __ Bind(&label); + constexpr size_t kLdrR0R0Count = 511; + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + std::string expected = + expected_part1 + + "1:\n" + "ldr.w r0, [pc, #((2f - 1b - 2) & ~2)]\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".align 2, 0\n" + "2:\n" + ".word 0x12345678\n"; + DriverStr(expected, "LoadLiteralMax1KiB"); + + EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u, + __ GetAdjustedPosition(label.Position())); +} + } // namespace art diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 20f61f942b..cb01cea8ef 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -32,7 +32,7 @@ namespace arm { // Include results file (generated manually) #include "assembler_thumb_test_expected.cc.inc" -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ // This controls whether the results are printed to the // screen or compared against the expected output. // To generate new expected output, set this to true and @@ -72,7 +72,7 @@ void InitResults() { } std::string GetToolsDir() { -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ // This will only work on the host. There is no as, objcopy or objdump on the device. static std::string toolsdir; @@ -89,7 +89,7 @@ std::string GetToolsDir() { } void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) { -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ static std::string toolsdir = GetToolsDir(); ScratchFile file; diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc index 325ee4fa01..42ed8810f8 100644 --- a/compiler/utils/swap_space.cc +++ b/compiler/utils/swap_space.cc @@ -143,7 +143,6 @@ SpaceChunk SwapSpace::NewFileChunk(size_t min_size) { LOG(ERROR) << "Unable to mmap new swap file chunk."; LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size; LOG(ERROR) << "Free list:"; - MutexLock lock(Thread::Current(), lock_); DumpFreeMap(free_by_size_); LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_); LOG(FATAL) << "Aborting..."; diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h index 691df4a945..f7c772d673 100644 --- a/compiler/utils/swap_space.h +++ b/compiler/utils/swap_space.h @@ -60,15 +60,15 @@ class SwapSpace { public: SwapSpace(int fd, size_t initial_size); ~SwapSpace(); - void* Alloc(size_t size) LOCKS_EXCLUDED(lock_); - void Free(void* ptr, size_t size) LOCKS_EXCLUDED(lock_); + void* Alloc(size_t size) REQUIRES(!lock_); + void Free(void* ptr, size_t size) REQUIRES(!lock_); size_t GetSize() { return size_; } private: - SpaceChunk NewFileChunk(size_t min_size); + SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_); int fd_; size_t size_; diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index fa85ada864..a614193c96 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1515,6 +1515,21 @@ void X86Assembler::repne_scasw() { } +void X86Assembler::repe_cmpsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86Assembler::repe_cmpsl() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + X86Assembler* X86Assembler::lock() { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0xF0); diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index d1b4e1dc5f..ae8d7a1a1d 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -465,6 +465,8 @@ class X86Assembler FINAL : public Assembler { void jmp(Label* label); void repne_scasw(); + void repe_cmpsw(); + void repe_cmpsl(); X86Assembler* lock(); void cmpxchgl(const Address& address, Register reg); diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc index aacc57bb0c..7663580793 100644 --- a/compiler/utils/x86/assembler_x86_test.cc +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -196,4 +196,16 @@ TEST_F(AssemblerX86Test, Repnescasw) { DriverStr(expected, "Repnescasw"); } +TEST_F(AssemblerX86Test, Repecmpsw) { + GetAssembler()->repe_cmpsw(); + const char* expected = "repe cmpsw\n"; + DriverStr(expected, "Repecmpsw"); +} + +TEST_F(AssemblerX86Test, Repecmpsl) { + GetAssembler()->repe_cmpsl(); + const char* expected = "repe cmpsl\n"; + DriverStr(expected, "Repecmpsl"); +} + } // namespace art diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index f35f51c494..1dd4a2e3e9 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -2073,6 +2073,21 @@ void X86_64Assembler::repne_scasw() { } +void X86_64Assembler::repe_cmpsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86_64Assembler::repe_cmpsl() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) { // TODO: Need to have a code constants table. int64_t constant = bit_cast<int64_t, double>(value); diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 61ffeab1e8..89a5606399 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -603,6 +603,8 @@ class X86_64Assembler FINAL : public Assembler { void bswapq(CpuRegister dst); void repne_scasw(); + void repe_cmpsw(); + void repe_cmpsl(); // // Macros for High-level operations. diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index 6da5c35731..e1e4c32d8f 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -35,7 +35,7 @@ TEST(AssemblerX86_64, CreateBuffer) { ASSERT_EQ(static_cast<size_t>(5), buffer.Size()); } -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ static constexpr size_t kRandomIterations = 1000; // Devices might be puny, don't stress them... #else static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerful. @@ -1263,4 +1263,16 @@ TEST_F(AssemblerX86_64Test, Repnescasw) { DriverStr(expected, "Repnescasw"); } +TEST_F(AssemblerX86_64Test, Repecmpsw) { + GetAssembler()->repe_cmpsw(); + const char* expected = "repe cmpsw\n"; + DriverStr(expected, "Repecmpsw"); +} + +TEST_F(AssemblerX86_64Test, Repecmpsl) { + GetAssembler()->repe_cmpsl(); + const char* expected = "repe cmpsl\n"; + DriverStr(expected, "Repecmpsl"); +} + } // namespace art diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 74ec2ed756..75d6137eae 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -280,6 +280,18 @@ NO_RETURN static void Usage(const char* fmt, ...) { UsageError(" Example: --num-dex-method=%d", CompilerOptions::kDefaultNumDexMethodsThreshold); UsageError(" Default: %d", CompilerOptions::kDefaultNumDexMethodsThreshold); UsageError(""); + UsageError(" --inline-depth-limit=<depth-limit>: the depth limit of inlining for fine tuning"); + UsageError(" the compiler. A zero value will disable inlining. Honored only by Optimizing."); + UsageError(" Example: --inline-depth-limit=%d", CompilerOptions::kDefaultInlineDepthLimit); + UsageError(" Default: %d", CompilerOptions::kDefaultInlineDepthLimit); + UsageError(""); + UsageError(" --inline-max-code-units=<code-units-count>: the maximum code units that a method"); + UsageError(" can have to be considered for inlining. A zero value will disable inlining."); + UsageError(" Honored only by Optimizing."); + UsageError(" Example: --inline-max-code-units=%d", + CompilerOptions::kDefaultInlineMaxCodeUnits); + UsageError(" Default: %d", CompilerOptions::kDefaultInlineMaxCodeUnits); + UsageError(""); UsageError(" --dump-timing: display a breakdown of where time was spent"); UsageError(""); UsageError(" --include-patch-information: Include patching information so the generated code"); @@ -550,6 +562,8 @@ class Dex2Oat FINAL { int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold; int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold; int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold; + int inline_depth_limit = CompilerOptions::kDefaultInlineDepthLimit; + int inline_max_code_units = CompilerOptions::kDefaultInlineMaxCodeUnits; // Profile file to use double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold; @@ -720,6 +734,22 @@ class Dex2Oat FINAL { if (num_dex_methods_threshold < 0) { Usage("--num-dex-methods passed a negative value %s", num_dex_methods_threshold); } + } else if (option.starts_with("--inline-depth-limit=")) { + const char* limit = option.substr(strlen("--inline-depth-limit=")).data(); + if (!ParseInt(limit, &inline_depth_limit)) { + Usage("Failed to parse --inline-depth-limit '%s' as an integer", limit); + } + if (inline_depth_limit < 0) { + Usage("--inline-depth-limit passed a negative value %s", inline_depth_limit); + } + } else if (option.starts_with("--inline-max-code-units=")) { + const char* code_units = option.substr(strlen("--inline-max-code-units=")).data(); + if (!ParseInt(code_units, &inline_max_code_units)) { + Usage("Failed to parse --inline-max-code-units '%s' as an integer", code_units); + } + if (inline_max_code_units < 0) { + Usage("--inline-max-code-units passed a negative value %s", inline_max_code_units); + } } else if (option == "--host") { is_host_ = true; } else if (option == "--runtime-arg") { @@ -992,6 +1022,8 @@ class Dex2Oat FINAL { small_method_threshold, tiny_method_threshold, num_dex_methods_threshold, + inline_depth_limit, + inline_max_code_units, include_patch_information, top_k_profile_threshold, debuggable, @@ -1660,7 +1692,7 @@ class Dex2Oat FINAL { // Let the ImageWriter write the image file. If we do not compile PIC, also fix up the oat file. bool CreateImageFile() - LOCKS_EXCLUDED(Locks::mutator_lock_) { + REQUIRES(!Locks::mutator_lock_) { CHECK(image_writer_ != nullptr); if (!image_writer_->Write(image_filename_, oat_unstripped_, oat_location_)) { LOG(ERROR) << "Failed to create image file " << image_filename_; diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc index 2ead4a2af5..44787a7ac8 100644 --- a/disassembler/disassembler_x86.cc +++ b/disassembler/disassembler_x86.cc @@ -1117,6 +1117,9 @@ DISASSEMBLER_ENTRY(cmp, opcode1 = opcode_tmp.c_str(); } break; + case 0xA7: + opcode1 = (prefix[2] == 0x66 ? "cmpsw" : "cmpsl"); + break; case 0xAF: opcode1 = (prefix[2] == 0x66 ? "scasw" : "scasl"); break; diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc index dce5206608..304d4e5860 100644 --- a/imgdiag/imgdiag.cc +++ b/imgdiag/imgdiag.cc @@ -56,7 +56,7 @@ class ImgDiagDumper { image_location_(image_location), image_diff_pid_(image_diff_pid) {} - bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; os << "MAGIC: " << image_header_.GetMagic() << "\n\n"; @@ -92,7 +92,7 @@ class ImgDiagDumper { return str.substr(idx + 1); } - bool DumpImageDiff(pid_t image_diff_pid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool DumpImageDiff(pid_t image_diff_pid) SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; { @@ -140,7 +140,7 @@ class ImgDiagDumper { // Look at /proc/$pid/mem and only diff the things from there bool DumpImageDiffMap(pid_t image_diff_pid, const backtrace_map_t& boot_map) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; const size_t pointer_size = InstructionSetPointerSize( Runtime::Current()->GetInstructionSet()); @@ -683,7 +683,7 @@ class ImgDiagDumper { } static std::string GetClassDescriptor(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(klass != nullptr); std::string descriptor; diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 93254547d7..b8b6a5f7c0 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -499,7 +499,7 @@ class OatDumper { return oat_file_.GetOatHeader().GetInstructionSet(); } - const void* GetQuickOatCode(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const void* GetQuickOatCode(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < oat_dex_files_.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; CHECK(oat_dex_file != nullptr); @@ -1462,7 +1462,7 @@ class ImageDumper { image_header_(image_header), oat_dumper_options_(oat_dumper_options) {} - bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; std::ostream& indent_os = vios_.Stream(); @@ -1664,7 +1664,7 @@ class ImageDumper { private: static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(type != nullptr); if (value == nullptr) { os << StringPrintf("null %s\n", PrettyDescriptor(type).c_str()); @@ -1681,7 +1681,7 @@ class ImageDumper { } static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { os << StringPrintf("%s: ", field->GetName()); switch (field->GetTypeAsPrimitiveType()) { case Primitive::kPrimLong: @@ -1734,7 +1734,7 @@ class ImageDumper { } static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* super = klass->GetSuperClass(); if (super != nullptr) { DumpFields(os, obj, super); @@ -1750,7 +1750,7 @@ class ImageDumper { } const void* GetQuickOatCodeBegin(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet())); if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) { @@ -1763,7 +1763,7 @@ class ImageDumper { } uint32_t GetQuickOatCodeSize(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetQuickOatCodeBegin(m)); if (oat_code_begin == nullptr) { return 0; @@ -1772,7 +1772,7 @@ class ImageDumper { } const void* GetQuickOatCodeEnd(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m)); if (oat_code_begin == nullptr) { return nullptr; @@ -1780,7 +1780,7 @@ class ImageDumper { return oat_code_begin + GetQuickOatCodeSize(m); } - static void Callback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); DCHECK(arg != nullptr); ImageDumper* state = reinterpret_cast<ImageDumper*>(arg); @@ -1882,7 +1882,7 @@ class ImageDumper { } void DumpMethod(ArtMethod* method, ImageDumper* state, std::ostream& indent_os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(method != nullptr); const auto image_pointer_size = InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet()); @@ -2070,7 +2070,7 @@ class ImageDumper { } void DumpOutliers(std::ostream& os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t sum_of_sizes = 0; size_t sum_of_sizes_squared = 0; size_t sum_of_expansion = 0; @@ -2171,7 +2171,7 @@ class ImageDumper { } void Dump(std::ostream& os, std::ostream& indent_os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { { os << "art_file_bytes = " << PrettySize(file_bytes) << "\n\n" << "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n"; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index dbd1d23634..1ed65974dc 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -445,7 +445,7 @@ class FixupRootVisitor : public RootVisitor { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]); } @@ -453,7 +453,7 @@ class FixupRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr())); } diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 23abca8c7e..466dacb492 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -94,16 +94,16 @@ class PatchOat { bool new_oat_out); // Output oat was newly created? static void BitmapCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { reinterpret_cast<PatchOat*>(arg)->VisitObject(obj); } void VisitObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupMethod(ArtMethod* object, ArtMethod* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupNativePointerArray(mirror::PointerArray* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool InHeap(mirror::Object*); // Patches oat in place, modifying the oat_file given to the constructor. @@ -113,13 +113,13 @@ class PatchOat { template <typename ElfFileImpl> bool PatchOatHeader(ElfFileImpl* oat_file); - bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PatchArtMethods(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool PatchImage() SHARED_REQUIRES(Locks::mutator_lock_); + void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); + void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchInternedStrings(const ImageHeader* image_header) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool WriteElf(File* out); bool WriteImage(File* out); @@ -177,10 +177,15 @@ class PatchOat { PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {} ~PatchVisitor() {} void operator() (mirror::Object* obj, MemberOffset off, bool b) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // For reference classes. void operator() (mirror::Class* cls, mirror::Reference* ref) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + // TODO: Consider using these for updating native class roots? + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + private: PatchOat* const patcher_; mirror::Object* const copy_; diff --git a/runtime/Android.mk b/runtime/Android.mk index fe79e72031..ce3e6d1f37 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -39,6 +39,7 @@ LIBART_COMMON_SRC_FILES := \ base/unix_file/random_access_file_utils.cc \ check_jni.cc \ class_linker.cc \ + class_table.cc \ common_throws.cc \ debugger.cc \ dex_file.cc \ @@ -340,10 +341,13 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ LIBART_CFLAGS := -DBUILDING_LIBART=1 +LIBART_TARGET_CFLAGS := +LIBART_HOST_CFLAGS := + ifeq ($(MALLOC_IMPL),dlmalloc) - LIBART_CFLAGS += -DUSE_DLMALLOC + LIBART_TARGET_CFLAGS += -DUSE_DLMALLOC else - LIBART_CFLAGS += -DUSE_JEMALLOC + LIBART_TARGET_CFLAGS += -DUSE_JEMALLOC endif # Default dex2oat instruction set features. @@ -439,8 +443,10 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT LOCAL_CFLAGS := $$(LIBART_CFLAGS) LOCAL_LDFLAGS := $$(LIBART_LDFLAGS) ifeq ($$(art_target_or_host),target) + LOCAL_CFLAGS += $$(LIBART_TARGET_CFLAGS) LOCAL_LDFLAGS += $$(LIBART_TARGET_LDFLAGS) else #host + LOCAL_CFLAGS += $$(LIBART_HOST_CFLAGS) LOCAL_LDFLAGS += $$(LIBART_HOST_LDFLAGS) ifeq ($$(art_static_or_shared),static) LOCAL_LDFLAGS += -static @@ -580,4 +586,6 @@ LIBART_HOST_SRC_FILES_32 := LIBART_HOST_SRC_FILES_64 := LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := LIBART_CFLAGS := +LIBART_TARGET_CFLAGS := +LIBART_HOST_CFLAGS := build-libart := diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h index a58aecbc6b..77bb5c8399 100644 --- a/runtime/arch/arm/context_arm.h +++ b/runtime/arch/arm/context_arm.h @@ -35,7 +35,7 @@ class ArmContext : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index 2f2654d4f6..be9af9871d 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -171,6 +171,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = artReadBarrierSlow; } } // namespace art diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc index f8590d3bd2..28d1942f0a 100644 --- a/runtime/arch/arm/instruction_set_features_arm.cc +++ b/runtime/arch/arm/instruction_set_features_arm.cc @@ -16,7 +16,7 @@ #include "instruction_set_features_arm.h" -#if defined(HAVE_ANDROID_OS) && defined(__arm__) +#if defined(__ANDROID__) && defined(__arm__) #include <sys/auxv.h> #include <asm/hwcap.h> #endif @@ -166,7 +166,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() { bool has_div = false; bool has_lpae = false; -#if defined(HAVE_ANDROID_OS) && defined(__arm__) +#if defined(__ANDROID__) && defined(__arm__) uint64_t hwcaps = getauxval(AT_HWCAP); LOG(INFO) << "hwcaps=" << hwcaps; if ((hwcaps & HWCAP_IDIVT) != 0) { diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index 20001109a6..f6d954f4f1 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -51,7 +51,6 @@ sub sp, #12 @ 3 words of space, bottom word will hold Method* .cfi_adjust_cfa_offset 12 RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1. - THIS_LOAD_REQUIRES_READ_BARRIER ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*. str \rTemp1, [sp, #0] @ Place Method* at bottom of stack. str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. @@ -79,7 +78,6 @@ sub sp, #4 @ bottom word will hold Method* .cfi_adjust_cfa_offset 4 RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1. - THIS_LOAD_REQUIRES_READ_BARRIER ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*. str \rTemp1, [sp, #0] @ Place Method* at bottom of stack. str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. @@ -139,7 +137,6 @@ .macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2 SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1. - THIS_LOAD_REQUIRES_READ_BARRIER @ rTemp1 is kRefsAndArgs Method*. ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET] str \rTemp1, [sp, #0] @ Place Method* at bottom of stack. @@ -171,7 +168,6 @@ .cfi_adjust_cfa_offset -40 .endm - .macro RETURN_IF_RESULT_IS_ZERO cbnz r0, 1f @ result non-zero branch over bx lr @ return @@ -588,6 +584,59 @@ ENTRY art_quick_check_cast bkpt END art_quick_check_cast +// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude. +.macro POP_REG_NE rReg, offset, rExclude + .ifnc \rReg, \rExclude + ldr \rReg, [sp, #\offset] @ restore rReg + .cfi_restore \rReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER rDest, rObj, offset +#ifdef USE_READ_BARRIER + push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj) + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r1, 4 + .cfi_rel_offset r2, 8 + .cfi_rel_offset r3, 12 + .cfi_rel_offset ip, 16 + .cfi_rel_offset lr, 20 + sub sp, #8 @ push padding + .cfi_adjust_cfa_offset 8 + @ mov r0, r0 @ pass ref in r0 (no-op for now since parameter ref is unused) + .ifnc \rObj, r1 + mov r1, \rObj @ pass rObj + .endif + mov r2, #\offset @ pass offset + bl artReadBarrierSlow @ artReadBarrierSlow(ref, rObj, offset) + @ No need to unpoison return value in r0, artReadBarrierSlow() would do the unpoisoning. + .ifnc \rDest, r0 + mov \rDest, r0 @ save return value in rDest + .endif + add sp, #8 @ pop padding + .cfi_adjust_cfa_offset -8 + POP_REG_NE r0, 0, \rDest @ conditionally restore saved registers + POP_REG_NE r1, 4, \rDest + POP_REG_NE r2, 8, \rDest + POP_REG_NE r3, 12, \rDest + POP_REG_NE ip, 16, \rDest + add sp, #20 + .cfi_adjust_cfa_offset -20 + pop {lr} @ restore lr + .cfi_adjust_cfa_offset -4 + .cfi_restore lr +#else + ldr \rDest, [\rObj, #\offset] + UNPOISON_HEAP_REF \rDest +#endif // USE_READ_BARRIER +.endm + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -609,15 +658,21 @@ ENTRY art_quick_aput_obj_with_bound_check b art_quick_throw_array_bounds END art_quick_aput_obj_with_bound_check +#ifdef USE_READ_BARRIER + .extern artReadBarrierSlow +#endif .hidden art_quick_aput_obj ENTRY art_quick_aput_obj +#ifdef USE_READ_BARRIER + @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from READ_BARRIER macro. + tst r2, r2 + beq .Ldo_aput_null +#else cbz r2, .Ldo_aput_null - ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET] - UNPOISON_HEAP_REF r3 - ldr ip, [r2, #MIRROR_OBJECT_CLASS_OFFSET] - UNPOISON_HEAP_REF ip - ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] - UNPOISON_HEAP_REF r3 +#endif // USE_READ_BARRIER + READ_BARRIER r3, r0, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER ip, r2, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET cmp r3, ip @ value's type == array's component type - trivial assignability bne .Lcheck_assignability .Ldo_aput: diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h index 0383ad628a..1c99f3c42d 100644 --- a/runtime/arch/arm64/context_arm64.h +++ b/runtime/arch/arm64/context_arm64.h @@ -35,7 +35,7 @@ class Arm64Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index 2ce2a29bbf..0f06727d0d 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -155,6 +155,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = artReadBarrierSlow; }; } // namespace art diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 6d9b44a1d2..8ba3d4392d 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -31,8 +31,6 @@ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . - THIS_LOAD_REQUIRES_READ_BARRIER - // Loads appropriate callee-save-method. ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ] @@ -95,8 +93,6 @@ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly] . - THIS_LOAD_REQUIRES_READ_BARRIER - // Loads appropriate callee-save-method. ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ] @@ -251,7 +247,6 @@ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . - THIS_LOAD_REQUIRES_READ_BARRIER ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ] SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL @@ -542,18 +537,18 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 // W10 - temporary add x9, sp, #8 // Destination address is bottom of stack + null. - // Use \@ to differentiate between macro invocations. -.LcopyParams\@: + // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler + // does not have unique-id variables. +1: cmp w2, #0 - beq .LendCopyParams\@ + beq 2f sub w2, w2, #4 // Need 65536 bytes of range. ldr w10, [x1, x2] str w10, [x9, x2] - b .LcopyParams\@ - -.LendCopyParams\@: + b 1b +2: // Store null into ArtMethod* at bottom of frame. str xzr, [sp] .endm @@ -592,26 +587,29 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. ldrb w10, [x5] + // Check the return type and store the correct register into the jvalue in memory. + // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables. + // Don't set anything for a void type. cmp w10, #'V' - beq .Lexit_art_quick_invoke_stub\@ + beq 3f + // Is it a double? cmp w10, #'D' - bne .Lreturn_is_float\@ + bne 1f str d0, [x4] - b .Lexit_art_quick_invoke_stub\@ + b 3f -.Lreturn_is_float\@: +1: // Is it a float? cmp w10, #'F' - bne .Lreturn_is_int\@ + bne 2f str s0, [x4] - b .Lexit_art_quick_invoke_stub\@ + b 3f - // Just store x0. Doesn't matter if it is 64 or 32 bits. -.Lreturn_is_int\@: +2: // Just store x0. Doesn't matter if it is 64 or 32 bits. str x0, [x4] -.Lexit_art_quick_invoke_stub\@: +3: // Finish up. ldp x2, x19, [xFP, #32] // Restore stack pointer and x19. .cfi_restore x19 mov sp, x2 @@ -1119,6 +1117,62 @@ ENTRY art_quick_check_cast brk 0 // We should not return here... END art_quick_check_cast +// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude. +.macro POP_REG_NE xReg, offset, xExclude + .ifnc \xReg, \xExclude + ldr \xReg, [sp, #\offset] // restore xReg + .cfi_restore \xReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * xDest, wDest and xObj are registers, offset is a defined literal such as + * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle + * name mismatch between instructions. This macro uses the lower 32b of register when possible. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER xDest, wDest, xObj, offset +#ifdef USE_READ_BARRIER + // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned. + stp x0, x1, [sp, #-48]! + .cfi_adjust_cfa_offset 48 + .cfi_rel_offset x0, 0 + .cfi_rel_offset x1, 8 + stp x2, x3, [sp, #16] + .cfi_rel_offset x2, 16 + .cfi_rel_offset x3, 24 + stp x4, xLR, [sp, #32] + .cfi_rel_offset x4, 32 + .cfi_rel_offset x30, 40 + + // mov x0, x0 // pass ref in x0 (no-op for now since parameter ref is unused) + .ifnc \xObj, x1 + mov x1, \xObj // pass xObj + .endif + mov w2, #\offset // pass offset + bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset) + // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning. + .ifnc \wDest, w0 + mov \wDest, w0 // save return value in wDest + .endif + + // Conditionally restore saved registers + POP_REG_NE x0, 0, \xDest + POP_REG_NE x1, 8, \xDest + POP_REG_NE x2, 16, \xDest + POP_REG_NE x3, 24, \xDest + POP_REG_NE x4, 32, \xDest + ldr xLR, [sp, #40] + .cfi_restore x30 + add sp, sp, #48 + .cfi_adjust_cfa_offset -48 +#else + ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. + UNPOISON_HEAP_REF \wDest +#endif // USE_READ_BARRIER +.endm + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -1146,17 +1200,17 @@ ENTRY art_quick_aput_obj_with_bound_check b art_quick_throw_array_bounds END art_quick_aput_obj_with_bound_check +#ifdef USE_READ_BARRIER + .extern artReadBarrierSlow +#endif ENTRY art_quick_aput_obj cbz x2, .Ldo_aput_null - ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b + READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b // This also zero-extends to x3 - UNPOISON_HEAP_REF w3 - ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b + READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b // This also zero-extends to x4 - UNPOISON_HEAP_REF w4 - ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b + READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b // This also zero-extends to x3 - UNPOISON_HEAP_REF w3 cmp w3, w4 // value's type == array's component type - trivial assignability bne .Lcheck_assignability .Ldo_aput: diff --git a/runtime/arch/context.h b/runtime/arch/context.h index f86f9ae117..9ef761e981 100644 --- a/runtime/arch/context.h +++ b/runtime/arch/context.h @@ -42,7 +42,7 @@ class Context { // Reads values from callee saves in the given frame. The frame also holds // the method that holds the layout. virtual void FillCalleeSaves(const StackVisitor& fr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Sets the stack pointer value. virtual void SetSP(uintptr_t new_sp) = 0; diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc index e6f4e7ab01..99c2d4dc74 100644 --- a/runtime/arch/instruction_set_features_test.cc +++ b/runtime/arch/instruction_set_features_test.cc @@ -18,7 +18,7 @@ #include <gtest/gtest.h> -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/properties.h" #endif @@ -26,7 +26,7 @@ namespace art { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #if defined(__aarch64__) TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromSystemPropertyVariant) { LOG(WARNING) << "Test disabled due to no CPP define for A53 erratum 835769"; @@ -111,7 +111,7 @@ TEST(InstructionSetFeaturesTest, FeaturesFromCpuInfo) { } #endif -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ TEST(InstructionSetFeaturesTest, HostFeaturesFromCppDefines) { std::string error_msg; std::unique_ptr<const InstructionSetFeatures> default_features( diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h index d01b95e5f6..38cf29a6aa 100644 --- a/runtime/arch/mips/context_mips.h +++ b/runtime/arch/mips/context_mips.h @@ -34,7 +34,7 @@ class MipsContext : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h index b1aa3ee63f..f9c53152f6 100644 --- a/runtime/arch/mips/entrypoints_direct_mips.h +++ b/runtime/arch/mips/entrypoints_direct_mips.h @@ -44,7 +44,8 @@ static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) { entrypoint == kQuickCmpgDouble || entrypoint == kQuickCmpgFloat || entrypoint == kQuickCmplDouble || - entrypoint == kQuickCmplFloat; + entrypoint == kQuickCmplFloat || + entrypoint == kQuickReadBarrierSlow; } } // namespace art diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index 09a018ebc6..4e4b91fdcd 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -279,6 +279,8 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pReadBarrierJni = ReadBarrierJni; static_assert(!IsDirectEntrypoint(kQuickReadBarrierJni), "Non-direct C stub marked direct."); + qpoints->pReadBarrierSlow = artReadBarrierSlow; + static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct."); }; } // namespace art diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 2819f92a0d..4d5004f444 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -79,7 +79,6 @@ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) lw $t0, 0($t0) - THIS_LOAD_REQUIRES_READ_BARRIER lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t0) sw $t0, 0($sp) # Place Method* at bottom of stack. sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -127,7 +126,6 @@ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) lw $t0, 0($t0) - THIS_LOAD_REQUIRES_READ_BARRIER lw $t0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t0) sw $t0, 0($sp) # Place Method* at bottom of stack. sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -219,7 +217,6 @@ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) lw $t0, 0($t0) - THIS_LOAD_REQUIRES_READ_BARRIER lw $t0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t0) sw $t0, 0($sp) # Place Method* at bottom of stack. sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -627,6 +624,76 @@ ENTRY art_quick_check_cast END art_quick_check_cast /* + * Restore rReg's value from offset($sp) if rReg is not the same as rExclude. + * nReg is the register number for rReg. + */ +.macro POP_REG_NE rReg, nReg, offset, rExclude + .ifnc \rReg, \rExclude + lw \rReg, \offset($sp) # restore rReg + .cfi_restore \nReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER rDest, rObj, offset +#ifdef USE_READ_BARRIER + # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment. + addiu $sp, $sp, -32 + .cfi_adjust_cfa_offset 32 + sw $ra, 28($sp) + .cfi_rel_offset 31, 28 + sw $t9, 24($sp) + .cfi_rel_offset 25, 24 + sw $t1, 20($sp) + .cfi_rel_offset 9, 20 + sw $t0, 16($sp) + .cfi_rel_offset 8, 16 + sw $a2, 8($sp) # padding slot at offset 12 (padding can be any slot in the 32B) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + sw $a0, 0($sp) + .cfi_rel_offset 4, 0 + + # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused) + .ifnc \rObj, $a1 + move $a1, \rObj # pass rObj + .endif + addiu $a2, $zero, \offset # pass offset + jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset) + addiu $sp, $sp, -16 # Use branch delay slot to reserve argument slots on the stack + # before the call to artReadBarrierSlow. + addiu $sp, $sp, 16 # restore stack after call to artReadBarrierSlow + # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning. + move \rDest, $v0 # save return value in rDest + # (rDest cannot be v0 in art_quick_aput_obj) + + lw $a0, 0($sp) # restore registers except rDest + # (rDest can only be t0 or t1 in art_quick_aput_obj) + .cfi_restore 4 + lw $a1, 4($sp) + .cfi_restore 5 + lw $a2, 8($sp) + .cfi_restore 6 + POP_REG_NE $t0, 8, 16, \rDest + POP_REG_NE $t1, 9, 20, \rDest + lw $t9, 24($sp) + .cfi_restore 25 + lw $ra, 28($sp) # restore $ra + .cfi_restore 31 + addiu $sp, $sp, 32 + .cfi_adjust_cfa_offset -32 +#else + lw \rDest, \offset(\rObj) + UNPOISON_HEAP_REF \rDest +#endif // USE_READ_BARRIER +.endm + + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. * a0 = array, a1 = index, a2 = value @@ -648,15 +715,15 @@ ENTRY art_quick_aput_obj_with_bound_check move $a1, $t0 END art_quick_aput_obj_with_bound_check +#ifdef USE_READ_BARRIER + .extern artReadBarrierSlow +#endif ENTRY art_quick_aput_obj beqz $a2, .Ldo_aput_null nop - lw $t0, MIRROR_OBJECT_CLASS_OFFSET($a0) - UNPOISON_HEAP_REF $t0 - lw $t1, MIRROR_OBJECT_CLASS_OFFSET($a2) - UNPOISON_HEAP_REF $t1 - lw $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0) - UNPOISON_HEAP_REF $t0 + READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability nop .Ldo_aput: diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h index ebc036cf98..e4a144f420 100644 --- a/runtime/arch/mips64/context_mips64.h +++ b/runtime/arch/mips64/context_mips64.h @@ -34,7 +34,7 @@ class Mips64Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index 4904af9cfc..ec02d5ab69 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -186,6 +186,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = artReadBarrierSlow; }; } // namespace art diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index abca70b363..c30e6ca93f 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -89,7 +89,6 @@ # load appropriate callee-save-method ld $t1, %got(_ZN3art7Runtime9instance_E)($gp) ld $t1, 0($t1) - THIS_LOAD_REQUIRES_READ_BARRIER ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t1) sd $t1, 0($sp) # Place ArtMethod* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -132,7 +131,6 @@ # load appropriate callee-save-method ld $t1, %got(_ZN3art7Runtime9instance_E)($gp) ld $t1, 0($t1) - THIS_LOAD_REQUIRES_READ_BARRIER ld $t1, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t1) sd $t1, 0($sp) # Place Method* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -255,7 +253,6 @@ # load appropriate callee-save-method ld $t1, %got(_ZN3art7Runtime9instance_E)($gp) ld $t1, 0($t1) - THIS_LOAD_REQUIRES_READ_BARRIER ld $t1, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t1) sd $t1, 0($sp) # Place Method* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -888,6 +885,77 @@ ENTRY art_quick_check_cast move $a2, rSELF # pass Thread::Current END art_quick_check_cast + + /* + * Restore rReg's value from offset($sp) if rReg is not the same as rExclude. + * nReg is the register number for rReg. + */ +.macro POP_REG_NE rReg, nReg, offset, rExclude + .ifnc \rReg, \rExclude + ld \rReg, \offset($sp) # restore rReg + .cfi_restore \nReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER rDest, rObj, offset +#ifdef USE_READ_BARRIER + # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 16B-aligned. + daddiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sd $ra, 56($sp) + .cfi_rel_offset 31, 56 + sd $t9, 48($sp) + .cfi_rel_offset 25, 48 + sd $t1, 40($sp) + .cfi_rel_offset 13, 40 + sd $t0, 32($sp) + .cfi_rel_offset 12, 32 + sd $a2, 16($sp) # padding slot at offset 24 (padding can be any slot in the 64B) + .cfi_rel_offset 6, 16 + sd $a1, 8($sp) + .cfi_rel_offset 5, 8 + sd $a0, 0($sp) + .cfi_rel_offset 4, 0 + + # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused) + .ifnc \rObj, $a1 + move $a1, \rObj # pass rObj + .endif + daddiu $a2, $zero, \offset # pass offset + jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset) + .cpreturn # Restore gp from t8 in branch delay slot. + # t8 may be clobbered in artReadBarrierSlow. + # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning. + move \rDest, $v0 # save return value in rDest + # (rDest cannot be v0 in art_quick_aput_obj) + + ld $a0, 0($sp) # restore registers except rDest + # (rDest can only be t0 or t1 in art_quick_aput_obj) + .cfi_restore 4 + ld $a1, 8($sp) + .cfi_restore 5 + ld $a2, 16($sp) + .cfi_restore 6 + POP_REG_NE $t0, 12, 32, \rDest + POP_REG_NE $t1, 13, 40, \rDest + ld $t9, 48($sp) + .cfi_restore 25 + ld $ra, 56($sp) # restore $ra + .cfi_restore 31 + daddiu $sp, $sp, 64 + .cfi_adjust_cfa_offset -64 + SETUP_GP # set up gp because we are not returning +#else + lwu \rDest, \offset(\rObj) + UNPOISON_HEAP_REF \rDest +#endif // USE_READ_BARRIER +.endm + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -913,12 +981,9 @@ END art_quick_aput_obj_with_bound_check ENTRY art_quick_aput_obj beq $a2, $zero, .Ldo_aput_null nop - lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0) - UNPOISON_HEAP_REF $t0 - lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2) - UNPOISON_HEAP_REF $t1 - lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0) - UNPOISON_HEAP_REF $t0 + READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability nop .Ldo_aput: diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 05b42f5604..cf7db34ca1 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -1124,8 +1124,6 @@ TEST_F(StubTest, CheckCast) { TEST_F(StubTest, APutObj) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) Thread* self = Thread::Current(); @@ -1258,8 +1256,6 @@ TEST_F(StubTest, APutObj) { } TEST_F(StubTest, AllocObject) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) // This will lead to OOM error messages in the log. @@ -1385,8 +1381,6 @@ TEST_F(StubTest, AllocObject) { } TEST_F(StubTest, AllocObjectArray) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs @@ -1474,8 +1468,6 @@ TEST_F(StubTest, AllocObjectArray) { TEST_F(StubTest, StringCompareTo) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs @@ -1557,7 +1549,7 @@ TEST_F(StubTest, StringCompareTo) { static void GetSetBooleanStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) constexpr size_t num_values = 5; @@ -1588,7 +1580,7 @@ static void GetSetBooleanStatic(ArtField* f, Thread* self, } static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int8_t values[] = { -128, -64, 0, 64, 127 }; @@ -1619,7 +1611,7 @@ static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer, static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint8_t values[] = { 0, true, 2, 128, 0xFF }; @@ -1654,7 +1646,7 @@ static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thre } static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int8_t values[] = { -128, -64, 0, 64, 127 }; @@ -1689,7 +1681,7 @@ static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f, static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF }; @@ -1719,7 +1711,7 @@ static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer, } static void GetSetShortStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE }; @@ -1750,7 +1742,7 @@ static void GetSetShortStatic(ArtField* f, Thread* self, static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF }; @@ -1784,7 +1776,7 @@ static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f, } static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE }; @@ -1819,7 +1811,7 @@ static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f, static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF }; @@ -1855,7 +1847,7 @@ static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer, static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF }; @@ -1896,7 +1888,7 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f, static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { test->Invoke3WithReferrer(static_cast<size_t>(f_idx), reinterpret_cast<size_t>(val), 0U, @@ -1916,7 +1908,7 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test); @@ -1940,7 +1932,7 @@ static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer, static void set_and_check_instance(ArtField* f, mirror::Object* trg, mirror::Object* val, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()), reinterpret_cast<size_t>(trg), reinterpret_cast<size_t>(val), @@ -1963,7 +1955,7 @@ static void set_and_check_instance(ArtField* f, mirror::Object* trg, static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test); @@ -1986,7 +1978,7 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f, static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \ defined(__aarch64__) uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF }; @@ -2017,7 +2009,7 @@ static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer, static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \ defined(__aarch64__) uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF }; @@ -2152,8 +2144,6 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) } TEST_F(StubTest, Fields8) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2166,8 +2156,6 @@ TEST_F(StubTest, Fields8) { } TEST_F(StubTest, Fields16) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2180,8 +2168,6 @@ TEST_F(StubTest, Fields16) { } TEST_F(StubTest, Fields32) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2193,8 +2179,6 @@ TEST_F(StubTest, Fields32) { } TEST_F(StubTest, FieldsObj) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2206,8 +2190,6 @@ TEST_F(StubTest, FieldsObj) { } TEST_F(StubTest, Fields64) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2221,8 +2203,6 @@ TEST_F(StubTest, Fields64) { TEST_F(StubTest, IMT) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); ScopedObjectAccess soa(self); @@ -2342,8 +2322,6 @@ TEST_F(StubTest, IMT) { TEST_F(StubTest, StringIndexOf) { #if defined(__arm__) || defined(__aarch64__) - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); ScopedObjectAccess soa(self); // garbage is created during ClassLinker::Init @@ -2416,4 +2394,40 @@ TEST_F(StubTest, StringIndexOf) { #endif } +TEST_F(StubTest, ReadBarrier) { +#if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \ + defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__))) + Thread* self = Thread::Current(); + + const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow); + + // Create an object + ScopedObjectAccess soa(self); + // garbage is created during ClassLinker::Init + + StackHandleScope<2> hs(soa.Self()); + Handle<mirror::Class> c( + hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"))); + + // Build an object instance + Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self()))); + + EXPECT_FALSE(self->IsExceptionPending()); + + size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()), + mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self); + + EXPECT_FALSE(self->IsExceptionPending()); + EXPECT_NE(reinterpret_cast<size_t>(nullptr), result); + mirror::Class* klass = reinterpret_cast<mirror::Class*>(result); + EXPECT_EQ(klass, obj->GetClass()); + + // Tests done. +#else + LOG(INFO) << "Skipping read_barrier_slow"; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping read_barrier_slow" << std::endl; +#endif +} + } // namespace art diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h index a783d48ed2..c4a11d8a88 100644 --- a/runtime/arch/x86/context_x86.h +++ b/runtime/arch/x86/context_x86.h @@ -34,7 +34,7 @@ class X86Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(ESP, new_sp); diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index 737f4d1c5b..e2632c103b 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -28,6 +28,9 @@ namespace art { extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass, const mirror::Class* ref_class); +// Read barrier entrypoints. +extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t); + void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Interpreter @@ -141,6 +144,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = art_quick_read_barrier_slow; }; } // namespace art diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index ebfb3faf4b..1da5a2ff17 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -33,7 +33,6 @@ MACRO2(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME, got_reg, temp_reg) movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg) movl (REG_VAR(temp_reg)), REG_VAR(temp_reg) // Push save all callee-save method. - THIS_LOAD_REQUIRES_READ_BARRIER pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg)) CFI_ADJUST_CFA_OFFSET(4) // Store esp as the top quick frame. @@ -60,7 +59,6 @@ MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME, got_reg, temp_reg) movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg) movl (REG_VAR(temp_reg)), REG_VAR(temp_reg) // Push save all callee-save method. - THIS_LOAD_REQUIRES_READ_BARRIER pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg)) CFI_ADJUST_CFA_OFFSET(4) // Store esp as the top quick frame. @@ -106,7 +104,6 @@ MACRO2(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME, got_reg, temp_reg) movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg) movl (REG_VAR(temp_reg)), REG_VAR(temp_reg) // Push save all callee-save method. - THIS_LOAD_REQUIRES_READ_BARRIER pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg)) CFI_ADJUST_CFA_OFFSET(4) // Store esp as the stop quick frame. @@ -1126,6 +1123,53 @@ DEFINE_FUNCTION art_quick_check_cast UNREACHABLE END_FUNCTION art_quick_check_cast +// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack. +MACRO2(POP_REG_NE, reg, exclude_reg) + .ifc RAW_VAR(reg), RAW_VAR(exclude_reg) + addl MACRO_LITERAL(4), %esp + CFI_ADJUST_CFA_OFFSET(-4) + .else + POP RAW_VAR(reg) + .endif +END_MACRO + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * obj_reg and dest_reg are registers, offset is a defined literal such as + * MIRROR_OBJECT_CLASS_OFFSET. + * pop_eax is a boolean flag, indicating if eax is popped after the call. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +MACRO4(READ_BARRIER, obj_reg, offset, dest_reg, pop_eax) +#ifdef USE_READ_BARRIER + PUSH eax // save registers used in art_quick_aput_obj + PUSH ebx + PUSH edx + PUSH ecx + // Outgoing argument set up + pushl MACRO_LITERAL((RAW_VAR(offset))) // pass offset, double parentheses are necessary + CFI_ADJUST_CFA_OFFSET(4) + PUSH RAW_VAR(obj_reg) // pass obj_reg + PUSH eax // pass ref, just pass eax for now since parameter ref is unused + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset) + // No need to unpoison return value in eax, artReadBarrierSlow() would do the unpoisoning. + .ifnc RAW_VAR(dest_reg), eax + movl %eax, REG_VAR(dest_reg) // save loaded ref in dest_reg + .endif + addl MACRO_LITERAL(12), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-12) + POP_REG_NE ecx, RAW_VAR(dest_reg) // Restore args except dest_reg + POP_REG_NE edx, RAW_VAR(dest_reg) + POP_REG_NE ebx, RAW_VAR(dest_reg) + .ifc RAW_VAR(pop_eax), true + POP_REG_NE eax, RAW_VAR(dest_reg) + .endif +#else + movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg) + UNPOISON_HEAP_REF RAW_VAR(dest_reg) +#endif // USE_READ_BARRIER +END_MACRO + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -1149,17 +1193,20 @@ END_FUNCTION art_quick_aput_obj_with_bound_check DEFINE_FUNCTION art_quick_aput_obj test %edx, %edx // store of null jz .Ldo_aput_null - movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx - UNPOISON_HEAP_REF ebx - movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx - UNPOISON_HEAP_REF ebx + READ_BARRIER eax, MIRROR_OBJECT_CLASS_OFFSET, ebx, true + READ_BARRIER ebx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ebx, true // value's type == array's component type - trivial assignability -#ifdef USE_HEAP_POISONING - PUSH eax // save eax +#if defined(USE_READ_BARRIER) + READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false + cmpl %eax, %ebx + POP eax // restore eax from the push in the beginning of READ_BARRIER macro +#elif defined(USE_HEAP_POISONING) + PUSH eax // save eax + // Cannot call READ_BARRIER macro here, because the above push messes up stack alignment. movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax UNPOISON_HEAP_REF eax cmpl %eax, %ebx - POP eax // restore eax + POP eax // restore eax #else cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx #endif @@ -1181,6 +1228,8 @@ DEFINE_FUNCTION art_quick_aput_obj subl LITERAL(8), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(8) #ifdef USE_HEAP_POISONING + // This load does not need read barrier, since edx is unchanged and there's no GC safe point + // from last read of MIRROR_OBJECT_CLASS_OFFSET(%edx). movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // pass arg2 - type of the value to be stored UNPOISON_HEAP_REF eax PUSH eax @@ -1696,5 +1745,15 @@ DEFINE_FUNCTION art_nested_signal_return UNREACHABLE END_FUNCTION art_nested_signal_return +DEFINE_FUNCTION art_quick_read_barrier_slow + PUSH edx // pass arg3 - offset + PUSH ecx // pass arg2 - obj + PUSH eax // pass arg1 - ref + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset) + addl LITERAL(12), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-12) + ret +END_FUNCTION art_quick_read_barrier_slow + // TODO: implement these! UNIMPLEMENTED art_quick_memcmp16 diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S index 706ae58d91..cf0039c84e 100644 --- a/runtime/arch/x86_64/asm_support_x86_64.S +++ b/runtime/arch/x86_64/asm_support_x86_64.S @@ -24,6 +24,7 @@ #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 +#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4 #define END_MACRO .endm #if defined(__clang__) diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h index c9b0ff6b72..30bb9ec362 100644 --- a/runtime/arch/x86_64/context_x86_64.h +++ b/runtime/arch/x86_64/context_x86_64.h @@ -34,7 +34,7 @@ class X86_64Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(RSP, new_sp); diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index d0ab9d5d49..ef1bb5f9a7 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -29,6 +29,9 @@ namespace art { extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass, const mirror::Class* ref_class); +// Read barrier entrypoints. +extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t); + void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { #if defined(__APPLE__) @@ -145,6 +148,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = art_quick_read_barrier_slow; #endif // __APPLE__ }; diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 7e7d789c8d..f4c9488260 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -66,7 +66,6 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) movq %xmm14, 24(%rsp) movq %xmm15, 32(%rsp) // R10 := ArtMethod* for save all callee save frame method. - THIS_LOAD_REQUIRES_READ_BARRIER movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Store ArtMethod* to bottom of stack. movq %r10, 0(%rsp) @@ -109,7 +108,6 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME) movq %xmm14, 24(%rsp) movq %xmm15, 32(%rsp) // R10 := ArtMethod* for refs only callee save frame method. - THIS_LOAD_REQUIRES_READ_BARRIER movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Store ArtMethod* to bottom of stack. movq %r10, 0(%rsp) @@ -168,7 +166,6 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME) subq MACRO_LITERAL(80 + 4 * 8), %rsp CFI_ADJUST_CFA_OFFSET(80 + 4 * 8) // R10 := ArtMethod* for ref and args callee save frame method. - THIS_LOAD_REQUIRES_READ_BARRIER movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Save FPRs. movq %xmm0, 16(%rsp) @@ -920,8 +917,12 @@ DEFINE_FUNCTION art_quick_alloc_object_tlab // Fast path tlab allocation. // RDI: uint32_t type_idx, RSI: ArtMethod* // RDX, RCX, R8, R9: free. RAX: return val. + // TODO: Add read barrier when this function is used. + // Might need a special macro since rsi and edx is 32b/64b mismatched. movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array UNPOISON_HEAP_REF edx + // TODO: Add read barrier when this function is used. + // Might need to break down into multiple instructions to get the base address in a register. // Load the class movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx UNPOISON_HEAP_REF edx @@ -1127,19 +1128,23 @@ END_FUNCTION art_quick_unlock_object DEFINE_FUNCTION art_quick_check_cast PUSH rdi // Save args for exc PUSH rsi + subq LITERAL(8), %rsp // Alignment padding. + CFI_ADJUST_CFA_OFFSET(8) SETUP_FP_CALLEE_SAVE_FRAME call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass) testq %rax, %rax jz 1f // jump forward if not assignable RESTORE_FP_CALLEE_SAVE_FRAME - addq LITERAL(16), %rsp // pop arguments - CFI_ADJUST_CFA_OFFSET(-16) + addq LITERAL(24), %rsp // pop arguments + CFI_ADJUST_CFA_OFFSET(-24) ret - CFI_ADJUST_CFA_OFFSET(16 + 4 * 8) // Reset unwind info so following code unwinds. + CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds. 1: RESTORE_FP_CALLEE_SAVE_FRAME + addq LITERAL(8), %rsp // pop padding + CFI_ADJUST_CFA_OFFSET(-8) POP rsi // Pop arguments POP rdi SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context @@ -1149,6 +1154,60 @@ DEFINE_FUNCTION art_quick_check_cast END_FUNCTION art_quick_check_cast +// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack. +MACRO2(POP_REG_NE, reg, exclude_reg) + .ifc RAW_VAR(reg), RAW_VAR(exclude_reg) + addq MACRO_LITERAL(8), %rsp + CFI_ADJUST_CFA_OFFSET(-8) + .else + POP RAW_VAR(reg) + .endif +END_MACRO + + /* + * Macro to insert read barrier, used in art_quick_aput_obj and art_quick_alloc_object_tlab. + * obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as + * MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between + * 64b PUSH/POP and 32b argument. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + * + * As with art_quick_aput_obj* functions, the 64b versions are in comments. + */ +MACRO4(READ_BARRIER, obj_reg, offset, dest_reg32, dest_reg64) +#ifdef USE_READ_BARRIER + PUSH rax // save registers that might be used + PUSH rdi + PUSH rsi + PUSH rdx + PUSH rcx + SETUP_FP_CALLEE_SAVE_FRAME + // Outgoing argument set up + // movl %edi, %edi // pass ref, no-op for now since parameter ref is unused + // // movq %rdi, %rdi + movl REG_VAR(obj_reg), %esi // pass obj_reg + // movq REG_VAR(obj_reg), %rsi + movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary + // movq MACRO_LITERAL((RAW_VAR(offset))), %rdx + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset) + // No need to unpoison return value in rax, artReadBarrierSlow() would do the unpoisoning. + .ifnc RAW_VAR(dest_reg32), eax + // .ifnc RAW_VAR(dest_reg64), rax + movl %eax, REG_VAR(dest_reg32) // save loaded ref in dest_reg + // movq %rax, REG_VAR(dest_reg64) + .endif + RESTORE_FP_CALLEE_SAVE_FRAME + POP_REG_NE rcx, RAW_VAR(dest_reg64) // Restore registers except dest_reg + POP_REG_NE rdx, RAW_VAR(dest_reg64) + POP_REG_NE rsi, RAW_VAR(dest_reg64) + POP_REG_NE rdi, RAW_VAR(dest_reg64) + POP_REG_NE rax, RAW_VAR(dest_reg64) +#else + movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg32) + // movq RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg64) + UNPOISON_HEAP_REF RAW_VAR(dest_reg32) // UNPOISON_HEAP_REF only takes a 32b register +#endif // USE_READ_BARRIER +END_MACRO + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -1193,15 +1252,13 @@ DEFINE_FUNCTION art_quick_aput_obj testl %edx, %edx // store of null // test %rdx, %rdx jz .Ldo_aput_null - movl MIRROR_OBJECT_CLASS_OFFSET(%edi), %ecx -// movq MIRROR_OBJECT_CLASS_OFFSET(%rdi), %rcx - UNPOISON_HEAP_REF ecx - movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ecx), %ecx -// movq MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %rcx - UNPOISON_HEAP_REF ecx -#ifdef USE_HEAP_POISONING - movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // rax is free. - UNPOISON_HEAP_REF eax + READ_BARRIER edi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx + // READ_BARRIER rdi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx + READ_BARRIER ecx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx + // READ_BARRIER rcx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx +#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER) + READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax // rax is free. + // READ_BARRIER rdx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax cmpl %eax, %ecx // value's type == array's component type - trivial assignability #else cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability @@ -1226,13 +1283,16 @@ DEFINE_FUNCTION art_quick_aput_obj PUSH rdi PUSH rsi PUSH rdx - subq LITERAL(8), %rsp // Alignment padding. - CFI_ADJUST_CFA_OFFSET(8) SETUP_FP_CALLEE_SAVE_FRAME - // "Uncompress" = do nothing, as already zero-extended on load. - movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class. - UNPOISON_HEAP_REF esi +#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER) + // The load of MIRROR_OBJECT_CLASS_OFFSET(%edx) is redundant, eax still holds the value. + movl %eax, %esi // Pass arg2 = value's class. + // movq %rax, %rsi +#else + // "Uncompress" = do nothing, as already zero-extended on load. + movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class. +#endif movq %rcx, %rdi // Pass arg1 = array's component type. call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b) @@ -1243,8 +1303,6 @@ DEFINE_FUNCTION art_quick_aput_obj RESTORE_FP_CALLEE_SAVE_FRAME // Restore arguments. - addq LITERAL(8), %rsp - CFI_ADJUST_CFA_OFFSET(-8) POP rdx POP rsi POP rdi @@ -1258,12 +1316,10 @@ DEFINE_FUNCTION art_quick_aput_obj movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero // movb %dl, (%rdx, %rdi) ret - CFI_ADJUST_CFA_OFFSET(32 + 4 * 8) // Reset unwind info so following code unwinds. + CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds. .Lthrow_array_store_exception: RESTORE_FP_CALLEE_SAVE_FRAME // Restore arguments. - addq LITERAL(8), %rsp - CFI_ADJUST_CFA_OFFSET(-8) POP rdx POP rsi POP rdi @@ -1717,7 +1773,11 @@ UNIMPLEMENTED art_quick_memcmp16 DEFINE_FUNCTION art_quick_assignable_from_code SETUP_FP_CALLEE_SAVE_FRAME + subq LITERAL(8), %rsp // Alignment padding. + CFI_ADJUST_CFA_OFFSET(8) call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*) + addq LITERAL(8), %rsp + CFI_ADJUST_CFA_OFFSET(-8) RESTORE_FP_CALLEE_SAVE_FRAME ret END_FUNCTION art_quick_assignable_from_code @@ -1733,3 +1793,14 @@ DEFINE_FUNCTION art_nested_signal_return call PLT_SYMBOL(longjmp) UNREACHABLE END_FUNCTION art_nested_signal_return + +DEFINE_FUNCTION art_quick_read_barrier_slow + SETUP_FP_CALLEE_SAVE_FRAME + subq LITERAL(8), %rsp // Alignment padding. + CFI_ADJUST_CFA_OFFSET(8) + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset) + addq LITERAL(8), %rsp + CFI_ADJUST_CFA_OFFSET(-8) + RESTORE_FP_CALLEE_SAVE_FRAME + ret +END_FUNCTION art_quick_read_barrier_slow diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h index 73beb1f168..5138cc99bf 100644 --- a/runtime/art_field-inl.h +++ b/runtime/art_field-inl.h @@ -253,7 +253,7 @@ inline void ArtField::SetObject(mirror::Object* object, mirror::Object* l) { SetObj<kTransactionActive>(object, l); } -inline const char* ArtField::GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t field_index = GetDexFieldIndex(); if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) { DCHECK(IsStatic()); @@ -264,7 +264,7 @@ inline const char* ArtField::GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock return dex_file->GetFieldName(dex_file->GetFieldId(field_index)); } -inline const char* ArtField::GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t field_index = GetDexFieldIndex(); if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) { DCHECK(IsStatic()); @@ -278,11 +278,11 @@ inline const char* ArtField::GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mu } inline Primitive::Type ArtField::GetTypeAsPrimitiveType() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Primitive::GetType(GetTypeDescriptor()[0]); } -inline bool ArtField::IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline bool ArtField::IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_) { return GetTypeAsPrimitiveType() != Primitive::kPrimNot; } @@ -304,15 +304,15 @@ inline mirror::Class* ArtField::GetType() { return type; } -inline size_t ArtField::FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline size_t ArtField::FieldSize() SHARED_REQUIRES(Locks::mutator_lock_) { return Primitive::ComponentSize(GetTypeAsPrimitiveType()); } -inline mirror::DexCache* ArtField::GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline mirror::DexCache* ArtField::GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_) { return GetDeclaringClass()->GetDexCache(); } -inline const DexFile* ArtField::GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline const DexFile* ArtField::GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_) { return GetDexCache()->GetDexFile(); } diff --git a/runtime/art_field.h b/runtime/art_field.h index 7a03723d00..fa0694b8ca 100644 --- a/runtime/art_field.h +++ b/runtime/art_field.h @@ -42,27 +42,27 @@ class ArtField FINAL { public: ArtField(); - mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); void SetDeclaringClass(mirror::Class *new_declaring_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_); - void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. access_flags_ = new_access_flags; } - bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPublic) != 0; } - bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccStatic) != 0; } - bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } @@ -76,115 +76,116 @@ class ArtField FINAL { } // Offset to field within an Object. - MemberOffset GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetOffset() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset OffsetOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_)); } - MemberOffset GetOffsetDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetOffsetDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_); - void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetOffset(MemberOffset num_bytes) SHARED_REQUIRES(Locks::mutator_lock_); // field access, null object for static fields - uint8_t GetBoolean(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint8_t GetBoolean(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetBoolean(mirror::Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetBoolean(mirror::Object* object, uint8_t z) SHARED_REQUIRES(Locks::mutator_lock_); - int8_t GetByte(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int8_t GetByte(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetByte(mirror::Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetByte(mirror::Object* object, int8_t b) SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetChar(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetChar(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetChar(mirror::Object* object, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetChar(mirror::Object* object, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_); - int16_t GetShort(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int16_t GetShort(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetShort(mirror::Object* object, int16_t s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetShort(mirror::Object* object, int16_t s) SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetInt(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetInt(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetInt(mirror::Object* object, int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetInt(mirror::Object* object, int32_t i) SHARED_REQUIRES(Locks::mutator_lock_); - int64_t GetLong(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int64_t GetLong(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetLong(mirror::Object* object, int64_t j) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetLong(mirror::Object* object, int64_t j) SHARED_REQUIRES(Locks::mutator_lock_); - float GetFloat(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + float GetFloat(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetFloat(mirror::Object* object, float f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetFloat(mirror::Object* object, float f) SHARED_REQUIRES(Locks::mutator_lock_); - double GetDouble(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + double GetDouble(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetDouble(mirror::Object* object, double d) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDouble(mirror::Object* object, double d) SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetObject(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> void SetObject(mirror::Object* object, mirror::Object* l) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Raw field accesses. - uint32_t Get32(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t Get32(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> void Set32(mirror::Object* object, uint32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - uint64_t Get64(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint64_t Get64(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void Set64(mirror::Object* object, uint64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Set64(mirror::Object* object, uint64_t new_value) SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetObj(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetObj(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> void SetObj(mirror::Object* object, mirror::Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires. template<typename RootVisitorType> - void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS; - bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccVolatile) != 0; } // Returns an instance field with this offset in the given class or null if not found. static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a static field with this offset in the given class or null if not found. static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Resolves / returns the name from the dex cache. mirror::String* GetStringName(Thread* self, bool resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_); - Primitive::Type GetTypeAsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Primitive::Type GetTypeAsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_); template <bool kResolve> - mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_); - size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t FieldSize() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_); GcRoot<mirror::Class>& DeclaringClassRoot() { return declaring_class_; @@ -192,11 +193,11 @@ class ArtField FINAL { private: mirror::Class* ProxyFindSystemClass(const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_); mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx, mirror::DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); GcRoot<mirror::Class> declaring_class_; diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 7673418fd1..f37e0407ca 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -19,6 +19,7 @@ #include "arch/context.h" #include "art_field-inl.h" #include "art_method-inl.h" +#include "base/out.h" #include "base/stringpiece.h" #include "dex_file-inl.h" #include "dex_instruction.h" @@ -94,7 +95,7 @@ size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) { } static bool HasSameNameAndSignature(ArtMethod* method1, ArtMethod* method2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedAssertNoThreadSuspension ants(Thread::Current(), "HasSameNameAndSignature"); const DexFile* dex_file = method1->GetDexFile(); const DexFile::MethodId& mid = dex_file->GetMethodId(method1->GetDexMethodIndex()); @@ -455,7 +456,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* // Counts the number of references in the parameter list of the corresponding method. // Note: Thus does _not_ include "this" for non-static methods. static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t shorty_len; const char* shorty = method->GetShorty(&shorty_len); uint32_t refs = 0; @@ -565,7 +566,7 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param const uint8_t* ArtMethod::GetQuickenedInfo() { bool found = false; OatFile::OatMethod oat_method = - Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found); + Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, outof(found)); if (!found || (oat_method.GetQuickCode() != nullptr)) { return nullptr; } diff --git a/runtime/art_method.h b/runtime/art_method.h index 4169c5ebd9..90352b7c08 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -54,24 +54,24 @@ class ArtMethod FINAL { static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetDeclaringClass(mirror::Class *new_declaring_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset DeclaringClassOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_)); } - ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_); void SetAccessFlags(uint32_t new_access_flags) { // Not called within a transaction. @@ -79,35 +79,35 @@ class ArtMethod FINAL { } // Approximate what kind of method call would be used for this method. - InvokeType GetInvokeType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the method is declared public. - bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPublic) != 0; } // Returns true if the method is declared private. - bool IsPrivate() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrivate() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPrivate) != 0; } // Returns true if the method is declared static. - bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccStatic) != 0; } // Returns true if the method is a constructor. - bool IsConstructor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsConstructor() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccConstructor) != 0; } // Returns true if the method is a class initializer. - bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsClassInitializer() SHARED_REQUIRES(Locks::mutator_lock_) { return IsConstructor() && IsStatic(); } // Returns true if the method is static, private, or a constructor. - bool IsDirect() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsDirect() SHARED_REQUIRES(Locks::mutator_lock_) { return IsDirect(GetAccessFlags()); } @@ -116,56 +116,56 @@ class ArtMethod FINAL { } // Returns true if the method is declared synchronized. - bool IsSynchronized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsSynchronized() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized; return (GetAccessFlags() & synchonized) != 0; } - bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } - bool IsMiranda() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsMiranda() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccMiranda) != 0; } - bool IsNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsNative() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccNative) != 0; } - bool ShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccDontInline) != 0; } - void SetShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) { SetAccessFlags(GetAccessFlags() | kAccDontInline); } - bool IsFastNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFastNative() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t mask = kAccFastNative | kAccNative; return (GetAccessFlags() & mask) == mask; } - bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccAbstract) != 0; } - bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccSynthetic) != 0; } - bool IsProxyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPreverified) != 0; } - void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsPreverified()); SetAccessFlags(GetAccessFlags() | kAccPreverified); } - bool IsOptimized(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) { // Temporary solution for detecting if a method has been optimized: the compiler // does not create a GC map. Instead, the vmap table contains the stack map // (as in stack_map.h). @@ -175,18 +175,18 @@ class ArtMethod FINAL { && GetNativeGcMap(pointer_size) == nullptr; } - bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_); // Doesn't do erroneous / unresolved class checks. - uint16_t GetMethodIndexDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetMethodIndexDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_); - size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetVtableIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetMethodIndex(); } - void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMethodIndex(uint16_t new_method_index) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. method_index_ = new_method_index; } @@ -211,7 +211,7 @@ class ArtMethod FINAL { // Number of 32bit registers that would be required to hold all the arguments static size_t NumArgRegisters(const StringPiece& shorty); - ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_); void SetDexMethodIndex(uint32_t new_idx) { // Not called within a transaction. @@ -227,36 +227,36 @@ class ArtMethod FINAL { } ALWAYS_INLINE mirror::PointerArray* GetDexCacheResolvedMethods() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + bool HasDexCacheResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_); bool HasSameDexCacheResolvedMethods(ArtMethod* other) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kWithCheck = true> mirror::Class* GetDexCacheResolvedType(uint32_t type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* new_dex_cache_types) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + bool HasDexCacheResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_); + bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_); bool HasSameDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* other_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the Class* from the type index into this method's dex cache. mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Find the method that this method overrides. - ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); // Find the method index for this method within other_dexfile. If this method isn't present then // return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same @@ -264,10 +264,10 @@ class ArtMethod FINAL { // in the other_dexfile. uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile, uint32_t name_and_signature_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const void* GetEntryPointFromQuickCompiledCode() { return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*)); @@ -287,7 +287,7 @@ class ArtMethod FINAL { entry_point_from_quick_compiled_code, pointer_size); } - uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetCodeSize() SHARED_REQUIRES(Locks::mutator_lock_); // Check whether the given PC is within the quick compiled code associated with this method's // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for @@ -297,12 +297,12 @@ class ArtMethod FINAL { reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc); } - void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the entrypoint points to the interpreter, as // opposed to the compiled code, that is, this method will be // interpretered on invocation. - bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsEntrypointInterpreter() SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetQuickOatCodeOffset(); void SetQuickOatCodeOffset(uint32_t code_offset); @@ -317,37 +317,37 @@ class ArtMethod FINAL { // Actual entry point pointer to compiled oat code or null. const void* GetQuickOatEntryPoint(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Actual pointer to compiled oat code or null. const void* GetQuickOatCodePointer(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size)); } // Callers should wrap the uint8_t* in a MappingTable instance for convenient access. const uint8_t* GetMappingTable(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Callers should wrap the uint8_t* in a VmapTable instance for convenient access. const uint8_t* GetVmapTable(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const uint8_t* GetQuickenedInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_); - CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_); // Callers should wrap the uint8_t* in a GcMap instance for convenient access. const uint8_t* GetNativeGcMap(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kCheckFrameSize = true> - uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t result = GetQuickFrameInfo().FrameSizeInBytes(); if (kCheckFrameSize) { DCHECK_LE(static_cast<size_t>(kStackAlignment), result); @@ -355,30 +355,30 @@ class ArtMethod FINAL { return result; } - QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_); QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - FrameOffset GetReturnPcOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) { return GetReturnPcOffset(GetFrameSizeInBytes()); } FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes()); return FrameOffset(frame_size_in_bytes - sizeof(void*)); } - FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) { constexpr size_t handle_scope_offset = sizeof(ArtMethod*); DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes()); return FrameOffset(handle_scope_offset); } void RegisterNative(const void* native_method, bool is_fast) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset EntryPointFromJniOffset(size_t pointer_size) { return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER( @@ -397,7 +397,7 @@ class ArtMethod FINAL { return GetEntryPoint<void*>(EntryPointFromJniOffset(pointer_size), pointer_size); } - void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) { SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*)); } ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) { @@ -409,34 +409,34 @@ class ArtMethod FINAL { ALWAYS_INLINE bool IsRuntimeMethod(); // Is this a hand crafted method used for something like describing callee saves? - bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_); - uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_); #ifdef NDEBUG uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return pc - reinterpret_cast<uintptr_t>(quick_entry_point); } #else uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); #endif // Converts a native PC to a dex PC. uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Converts a dex PC to a native PC. uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - MethodReference ToMethodReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) { return MethodReference(GetDexFile(), GetDexMethodIndex()); } @@ -445,63 +445,64 @@ class ArtMethod FINAL { // a move-exception instruction is present. uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc, bool* has_no_move_exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires. template<typename RootVisitorType> - void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS; - const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetDeclaringClassDescriptor() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const char* GetShorty() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t unused_length; return GetShorty(&unused_length); } - const char* GetShorty(uint32_t* out_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetShorty(uint32_t* out_length) SHARED_REQUIRES(Locks::mutator_lock_); - const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const Signature GetSignature() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::String* GetNameAsString(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::CodeItem* GetCodeItem() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::ProtoId& GetPrototype() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::ProtoId& GetPrototype() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::TypeList* GetParameterTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::TypeList* GetParameterTypeList() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetDeclaringClassSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetDeclaringClassSourceFile() SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::ClassDef& GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::ClassDef& GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetReturnTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetReturnTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_); const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large // number of bugs at call sites. - mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetReturnType(bool resolve = true) SHARED_REQUIRES(Locks::mutator_lock_); - mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // May cause thread suspension due to class resolution. bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Size of an instance of this object. static size_t ObjectSize(size_t pointer_size) { @@ -510,10 +511,10 @@ class ArtMethod FINAL { } void CopyFrom(const ArtMethod* src, size_t image_pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE mirror::ObjectArray<mirror::Class>* GetDexCacheResolvedTypes() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". diff --git a/runtime/asm_support.h b/runtime/asm_support.h index b1d0841964..350a0d4c15 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -103,17 +103,20 @@ ADD_TEST_EQ(THREAD_EXCEPTION_OFFSET, ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET, art::Thread::TopOfManagedStackOffset<__SIZEOF_POINTER__>().Int32Value()) -// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_. +// Offset of field Thread::tlsPtr_.self. #define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (9 * __SIZEOF_POINTER__)) ADD_TEST_EQ(THREAD_SELF_OFFSET, art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value()) -#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 150 * __SIZEOF_POINTER__) +// Offset of field Thread::tlsPtr_.thread_local_pos. +#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 151 * __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET, art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value()) +// Offset of field Thread::tlsPtr_.thread_local_end. #define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET, art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value()) +// Offset of field Thread::tlsPtr_.thread_local_objects. #define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET, art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value()) diff --git a/runtime/barrier.h b/runtime/barrier.h index 0e7f61ef71..02f9f58ff0 100644 --- a/runtime/barrier.h +++ b/runtime/barrier.h @@ -39,10 +39,10 @@ class Barrier { virtual ~Barrier(); // Pass through the barrier, decrement the count but do not block. - void Pass(Thread* self); + void Pass(Thread* self) REQUIRES(!lock_); // Wait on the barrier, decrement the count. - void Wait(Thread* self); + void Wait(Thread* self) REQUIRES(!lock_); // The following three calls are only safe if we somehow know that no other thread both // - has been woken up, and @@ -51,18 +51,18 @@ class Barrier { // to sleep, resulting in a deadlock. // Increment the count by delta, wait on condition if count is non zero. - void Increment(Thread* self, int delta) LOCKS_EXCLUDED(lock_); + void Increment(Thread* self, int delta) REQUIRES(!lock_);; // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns // true if time out occurred. - bool Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_); + bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!lock_); // Set the count to a new value. This should only be used if there is no possibility that // another thread is still in Wait(). See above. - void Init(Thread* self, int count); + void Init(Thread* self, int count) REQUIRES(!lock_); private: - void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_); + void SetCountLocked(Thread* self, int count) REQUIRES(lock_); // Counter, when this reaches 0 all people blocked on the barrier are signalled. int count_ GUARDED_BY(lock_); diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index d977941daa..c4b36ee766 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -182,12 +182,12 @@ class ArenaPool { public: explicit ArenaPool(bool use_malloc = true, bool low_4gb = false); ~ArenaPool(); - Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_); - void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_); - size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_); + Arena* AllocArena(size_t size) REQUIRES(!lock_); + void FreeArenaChain(Arena* first) REQUIRES(!lock_); + size_t GetBytesAllocated() const REQUIRES(!lock_); // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works // use_malloc is false. - void TrimMaps() LOCKS_EXCLUDED(lock_); + void TrimMaps() REQUIRES(!lock_); private: const bool use_malloc_; diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h index 709d9ae771..d110fe30b7 100644 --- a/runtime/base/hash_set.h +++ b/runtime/base/hash_set.h @@ -470,31 +470,31 @@ class HashSet { } void DeallocateStorage() { - if (num_buckets_ != 0) { - if (owns_data_) { - for (size_t i = 0; i < NumBuckets(); ++i) { - allocfn_.destroy(allocfn_.address(data_[i])); - } + if (owns_data_) { + for (size_t i = 0; i < NumBuckets(); ++i) { + allocfn_.destroy(allocfn_.address(data_[i])); + } + if (data_ != nullptr) { allocfn_.deallocate(data_, NumBuckets()); - owns_data_ = false; } - data_ = nullptr; - num_buckets_ = 0; + owns_data_ = false; } + data_ = nullptr; + num_buckets_ = 0; } // Expand the set based on the load factors. void Expand() { size_t min_index = static_cast<size_t>(Size() / min_load_factor_); - if (min_index < kMinBuckets) { - min_index = kMinBuckets; - } // Resize based on the minimum load factor. Resize(min_index); } // Expand / shrink the table to the new specified size. void Resize(size_t new_size) { + if (new_size < kMinBuckets) { + new_size = kMinBuckets; + } DCHECK_GE(new_size, Size()); T* const old_data = data_; size_t old_num_buckets = num_buckets_; diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 859de4bd5b..7a620e375b 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -26,7 +26,7 @@ #include "utils.h" // Headers for LogMessage::LogLine. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/log.h" #else #include <sys/types.h> @@ -47,7 +47,7 @@ static std::unique_ptr<std::string> gProgramInvocationShortName; // Print INTERNAL_FATAL messages directly instead of at destruction time. This only works on the // host right now: for the device, a stream buf collating output into lines and calling LogLine or // lower-level logging is necessary. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ static constexpr bool kPrintInternalFatalDirectly = false; #else static constexpr bool kPrintInternalFatalDirectly = !kIsTargetBuild; @@ -234,7 +234,7 @@ std::ostream& LogMessage::stream() { return data_->GetBuffer(); } -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ static const android_LogPriority kLogSeverityToAndroidLogPriority[] = { ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN, ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL @@ -245,7 +245,7 @@ static_assert(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1, void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_severity, const char* message) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ const char* tag = ProgramInvocationShortName(); int priority = kLogSeverityToAndroidLogPriority[log_severity]; if (priority == ANDROID_LOG_FATAL) { @@ -264,7 +264,7 @@ void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_se void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity, const char* message) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Use android_writeLog() to avoid stack-based buffers used by android_printLog(). const char* tag = ProgramInvocationShortName(); int priority = kLogSeverityToAndroidLogPriority[log_severity]; diff --git a/runtime/base/logging.h b/runtime/base/logging.h index 93d4edcb20..2cd1a4de9f 100644 --- a/runtime/base/logging.h +++ b/runtime/base/logging.h @@ -237,7 +237,7 @@ class LogMessage { public: LogMessage(const char* file, unsigned int line, LogSeverity severity, int error); - ~LogMessage(); // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_). + ~LogMessage(); // TODO: enable REQUIRES(!Locks::logging_lock_). // Returns the stream associated with the message, the LogMessage performs output when it goes // out of scope. diff --git a/runtime/base/macros.h b/runtime/base/macros.h index 5c596471c2..1d5dee23f8 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -244,18 +244,14 @@ template<typename... T> void UNUSED(const T&...) {} #define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) #define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) -#define EXCLUSIVE_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) #define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded) -#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable) #define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) -#define LOCKS_EXCLUDED(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) #define PT_GUARDED_BY(x) // THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x)) #define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded) #define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) -#define SHARED_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) #if defined(__clang__) #define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) @@ -263,12 +259,43 @@ template<typename... T> void UNUSED(const T&...) {} #define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) #define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) #define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) +#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) +#define SHARED_REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) +#define CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(capability(__VA_ARGS__)) +#define SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_capability(__VA_ARGS__)) +#define ASSERT_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(__VA_ARGS__)) +#define ASSERT_SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(__VA_ARGS__)) +#define RETURN_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(__VA_ARGS__)) +#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) +#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) +#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) +#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) +#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) +#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) #else #define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(__VA_ARGS__)) #define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(__VA_ARGS__)) #define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(__VA_ARGS__)) #define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(__VA_ARGS__)) #define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock(__VA_ARGS__)) +#define REQUIRES(...) +#define SHARED_REQUIRES(...) +#define CAPABILITY(...) +#define SHARED_CAPABILITY(...) +#define ASSERT_CAPABILITY(...) +#define ASSERT_SHARED_CAPABILITY(...) +#define RETURN_CAPABILITY(...) +#define TRY_ACQUIRE(...) +#define TRY_ACQUIRE_SHARED(...) +#define ACQUIRE(...) +#define ACQUIRE_SHARED(...) +#define RELEASE(...) +#define RELEASE_SHARED(...) +#define SCOPED_CAPABILITY #endif +#define LOCKABLE CAPABILITY("mutex") +#define SHARED_LOCKABLE SHARED_CAPABILITY("mutex") + #endif // ART_RUNTIME_BASE_MACROS_H_ diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index c591a51886..62cfb5243c 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -62,6 +62,7 @@ Mutex* Locks::thread_suspend_count_lock_ = nullptr; Mutex* Locks::trace_lock_ = nullptr; Mutex* Locks::unexpected_signal_lock_ = nullptr; Mutex* Locks::lambda_table_lock_ = nullptr; +Uninterruptible Roles::uninterruptible_; struct AllMutexData { // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 5b258e5ddb..d0504d993f 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -43,8 +43,8 @@ namespace art { -class LOCKABLE ReaderWriterMutex; -class LOCKABLE MutatorMutex; +class SHARED_LOCKABLE ReaderWriterMutex; +class SHARED_LOCKABLE MutatorMutex; class ScopedContentionRecorder; class Thread; @@ -214,35 +214,37 @@ class LOCKABLE Mutex : public BaseMutex { virtual bool IsMutex() const { return true; } // Block until mutex is free then acquire exclusive access. - void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); - void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); } + void ExclusiveLock(Thread* self) ACQUIRE(); + void Lock(Thread* self) ACQUIRE() { ExclusiveLock(self); } // Returns true if acquires exclusive access, false otherwise. - bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); - bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); } + bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true); + bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); } // Release exclusive access. - void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); - void Unlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); } + void ExclusiveUnlock(Thread* self) RELEASE(); + void Unlock(Thread* self) RELEASE() { ExclusiveUnlock(self); } // Is the current thread the exclusive holder of the Mutex. bool IsExclusiveHeld(const Thread* self) const; // Assert that the Mutex is exclusively held by the current thread. - void AssertExclusiveHeld(const Thread* self) { + void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { CHECK(IsExclusiveHeld(self)) << *this; } } - void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); } + void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); } // Assert that the Mutex is not held by the current thread. - void AssertNotHeldExclusive(const Thread* self) { + void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsExclusiveHeld(self)) << *this; } } - void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); } + void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) { + AssertNotHeldExclusive(self); + } // Id associated with exclusive owner. No memory ordering semantics if called from a thread other // than the owner. @@ -255,6 +257,9 @@ class LOCKABLE Mutex : public BaseMutex { virtual void Dump(std::ostream& os) const; + // For negative capabilities in clang annotations. + const Mutex& operator!() const { return *this; } + private: #if ART_USE_FUTEXES // 0 is unheld, 1 is held. @@ -290,7 +295,7 @@ class LOCKABLE Mutex : public BaseMutex { // Shared(n) | Block | error | SharedLock(n+1)* | Shared(n-1) or Free // * for large values of n the SharedLock may block. std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu); -class LOCKABLE ReaderWriterMutex : public BaseMutex { +class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { public: explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel); ~ReaderWriterMutex(); @@ -298,12 +303,12 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { virtual bool IsReaderWriterMutex() const { return true; } // Block until ReaderWriterMutex is free then acquire exclusive access. - void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); - void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); } + void ExclusiveLock(Thread* self) ACQUIRE(); + void WriterLock(Thread* self) ACQUIRE() { ExclusiveLock(self); } // Release exclusive access. - void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); - void WriterUnlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); } + void ExclusiveUnlock(Thread* self) RELEASE(); + void WriterUnlock(Thread* self) RELEASE() { ExclusiveUnlock(self); } // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success // or false if timeout is reached. @@ -313,15 +318,15 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { #endif // Block until ReaderWriterMutex is shared or free then acquire a share on the access. - void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE; - void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); } + void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE; + void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); } // Try to acquire share of ReaderWriterMutex. - bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); + bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true); // Release a share of the access. - void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE; - void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); } + void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE; + void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); } // Is the current thread the exclusive holder of the ReaderWriterMutex. bool IsExclusiveHeld(const Thread* self) const; @@ -368,6 +373,9 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { virtual void Dump(std::ostream& os) const; + // For negative capabilities in clang annotations. + const ReaderWriterMutex& operator!() const { return *this; } + private: #if ART_USE_FUTEXES // Out-of-inline path for handling contention for a SharedLock. @@ -402,7 +410,7 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { // suspended states before exclusive ownership of the mutator mutex is sought. // std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu); -class LOCKABLE MutatorMutex : public ReaderWriterMutex { +class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex { public: explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel) : ReaderWriterMutex(name, level) {} @@ -410,6 +418,9 @@ class LOCKABLE MutatorMutex : public ReaderWriterMutex { virtual bool IsMutatorMutex() const { return true; } + // For negative capabilities in clang annotations. + const MutatorMutex& operator!() const { return *this; } + private: friend class Thread; void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE; @@ -458,13 +469,13 @@ class ConditionVariable { // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it // upon destruction. -class SCOPED_LOCKABLE MutexLock { +class SCOPED_CAPABILITY MutexLock { public: - explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) { + explicit MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) { mu_.ExclusiveLock(self_); } - ~MutexLock() UNLOCK_FUNCTION() { + ~MutexLock() RELEASE() { mu_.ExclusiveUnlock(self_); } @@ -478,14 +489,14 @@ class SCOPED_LOCKABLE MutexLock { // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon // construction and releases it upon destruction. -class SCOPED_LOCKABLE ReaderMutexLock { +class SCOPED_CAPABILITY ReaderMutexLock { public: - explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : + explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) { mu_.SharedLock(self_); } - ~ReaderMutexLock() UNLOCK_FUNCTION() { + ~ReaderMutexLock() RELEASE() { mu_.SharedUnlock(self_); } @@ -500,7 +511,7 @@ class SCOPED_LOCKABLE ReaderMutexLock { // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon // construction and releases it upon destruction. -class SCOPED_LOCKABLE WriterMutexLock { +class SCOPED_CAPABILITY WriterMutexLock { public: explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) { @@ -520,6 +531,17 @@ class SCOPED_LOCKABLE WriterMutexLock { // "WriterMutexLock mu(lock)". #define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name") +// For StartNoThreadSuspension and EndNoThreadSuspension. +class CAPABILITY("role") Role { + public: + void Acquire() ACQUIRE() {} + void Release() RELEASE() {} + const Role& operator!() const { return *this; } +}; + +class Uninterruptible : public Role { +}; + // Global mutexes corresponding to the levels above. class Locks { public: @@ -655,6 +677,11 @@ class Locks { static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_); }; +class Roles { + public: + static Uninterruptible uninterruptible_; +}; + } // namespace art #endif // ART_RUNTIME_BASE_MUTEX_H_ diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc index 3750c815e9..340550f02e 100644 --- a/runtime/base/mutex_test.cc +++ b/runtime/base/mutex_test.cc @@ -101,18 +101,18 @@ struct RecursiveLockWait { : mu("test mutex", kDefaultMutexLevel, true), cv("test condition variable", mu) { } - static void* Callback(void* arg) { - RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg); - state->mu.Lock(Thread::Current()); - state->cv.Signal(Thread::Current()); - state->mu.Unlock(Thread::Current()); - return nullptr; - } - Mutex mu; ConditionVariable cv; }; +static void* RecursiveLockWaitCallback(void* arg) { + RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg); + state->mu.Lock(Thread::Current()); + state->cv.Signal(Thread::Current()); + state->mu.Unlock(Thread::Current()); + return nullptr; +} + // GCC has trouble with our mutex tests, so we have to turn off thread safety analysis. static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS { RecursiveLockWait state; @@ -120,8 +120,7 @@ static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS { state.mu.Lock(Thread::Current()); pthread_t pthread; - int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback, - &state); + int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWaitCallback, &state); ASSERT_EQ(0, pthread_create_result); state.cv.Wait(Thread::Current()); diff --git a/runtime/base/out.h b/runtime/base/out.h new file mode 100644 index 0000000000..7b4bc1216c --- /dev/null +++ b/runtime/base/out.h @@ -0,0 +1,279 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_OUT_H_ +#define ART_RUNTIME_BASE_OUT_H_ + +#include <base/macros.h> +#include <base/logging.h> + +#include <memory> +// A zero-overhead abstraction marker that means this value is meant to be used as an out +// parameter for functions. It mimics semantics of a pointer that the function will +// dereference and output its value into. +// +// Inspired by the 'out' language keyword in C#. +// +// Declaration example: +// int do_work(size_t args, out<int> result); +// // returns 0 on success, sets result, otherwise error code +// +// Use-site example: +// // (1) -- out of a local variable or field +// int res; +// if (do_work(1, outof(res)) { +// cout << "success: " << res; +// } +// // (2) -- out of an iterator +// std::vector<int> list = {1}; +// std::vector<int>::iterator it = list.begin(); +// if (do_work(2, outof_iterator(*it)) { +// cout << "success: " << list[0]; +// } +// // (3) -- out of a pointer +// int* array = &some_other_value; +// if (do_work(3, outof_ptr(array))) { +// cout << "success: " << *array; +// } +// +// The type will also automatically decay into a C-style pointer for compatibility +// with calling legacy code that expect pointers. +// +// Declaration example: +// void write_data(int* res) { *res = 5; } +// +// Use-site example: +// int data; +// write_data(outof(res)); +// // data is now '5' +// (The other outof_* functions can be used analogously when the target is a C-style pointer). +// +// --------------- +// +// Other typical pointer operations such as addition, subtraction, etc are banned +// since there is exactly one value being output. +// +namespace art { + +// Forward declarations. See below for specific functions. +template <typename T> +struct out_convertible; // Implicitly converts to out<T> or T*. + +// Helper function that automatically infers 'T' +// +// Returns a type that is implicitly convertible to either out<T> or T* depending +// on the call site. +// +// Example: +// int do_work(size_t args, out<int> result); +// // returns 0 on success, sets result, otherwise error code +// +// Usage: +// int res; +// if (do_work(1, outof(res)) { +// cout << "success: " << res; +// } +template <typename T> +out_convertible<T> outof(T& param) ALWAYS_INLINE; + +// Helper function that automatically infers 'T' from a container<T>::iterator. +// To use when the argument is already inside an iterator. +// +// Returns a type that is implicitly convertible to either out<T> or T* depending +// on the call site. +// +// Example: +// int do_work(size_t args, out<int> result); +// // returns 0 on success, sets result, otherwise error code +// +// Usage: +// std::vector<int> list = {1}; +// std::vector<int>::iterator it = list.begin(); +// if (do_work(2, outof_iterator(*it)) { +// cout << "success: " << list[0]; +// } +template <typename It> +auto ALWAYS_INLINE outof_iterator(It iter) + -> out_convertible<typename std::remove_reference<decltype(*iter)>::type>; + +// Helper function that automatically infers 'T'. +// To use when the argument is already a pointer. +// +// ptr must be not-null, else a DCHECK failure will occur. +// +// Returns a type that is implicitly convertible to either out<T> or T* depending +// on the call site. +// +// Example: +// int do_work(size_t args, out<int> result); +// // returns 0 on success, sets result, otherwise error code +// +// Usage: +// int* array = &some_other_value; +// if (do_work(3, outof_ptr(array))) { +// cout << "success: " << *array; +// } +template <typename T> +out_convertible<T> outof_ptr(T* ptr) ALWAYS_INLINE; + +// Zero-overhead wrapper around a non-null non-const pointer meant to be used to output +// the result of parameters. There are no other extra guarantees. +// +// The most common use case is to treat this like a typical pointer argument, for example: +// +// void write_out_5(out<int> x) { +// *x = 5; +// } +// +// The following operations are supported: +// operator* -> use like a pointer (guaranteed to be non-null) +// == and != -> compare against other pointers for (in)equality +// begin/end -> use in standard C++ algorithms as if it was an iterator +template <typename T> +struct out { + // Has to be mutable lref. Otherwise how would you write something as output into it? + explicit inline out(T& param) + : param_(param) {} + + // Model a single-element iterator (or pointer) to the parameter. + inline T& operator *() { + return param_; + } + + // Model dereferencing fields/methods on a pointer. + inline T* operator->() { + return std::addressof(param_); + } + + // + // Comparison against this or other pointers. + // + template <typename T2> + inline bool operator==(const T2* other) const { + return std::addressof(param_) == other; + } + + template <typename T2> + inline bool operator==(const out<T>& other) const { + return std::addressof(param_) == std::addressof(other.param_); + } + + // An out-parameter is never null. + inline bool operator==(std::nullptr_t) const { + return false; + } + + template <typename T2> + inline bool operator!=(const T2* other) const { + return std::addressof(param_) != other; + } + + template <typename T2> + inline bool operator!=(const out<T>& other) const { + return std::addressof(param_) != std::addressof(other.param_); + } + + // An out-parameter is never null. + inline bool operator!=(std::nullptr_t) const { + return true; + } + + // + // Iterator interface implementation. Use with standard algorithms. + // TODO: (add items in iterator_traits if this is truly useful). + // + + inline T* begin() { + return std::addressof(param_); + } + + inline const T* begin() const { + return std::addressof(param_); + } + + inline T* end() { + return std::addressof(param_) + 1; + } + + inline const T* end() const { + return std::addressof(param_) + 1; + } + + private: + T& param_; +}; + +// +// IMPLEMENTATION DETAILS +// + +// +// This intermediate type should not be used directly by user code. +// +// It enables 'outof(x)' to be passed into functions that expect either +// an out<T> **or** a regular C-style pointer (T*). +// +template <typename T> +struct out_convertible { + explicit inline out_convertible(T& param) + : param_(param) { + } + + // Implicitly convert into an out<T> for standard usage. + inline operator out<T>() { + return out<T>(param_); + } + + // Implicitly convert into a '*' for legacy usage. + inline operator T*() { + return std::addressof(param_); + } + private: + T& param_; +}; + +// Helper function that automatically infers 'T' +template <typename T> +inline out_convertible<T> outof(T& param) { + return out_convertible<T>(param); +} + +// Helper function that automatically infers 'T'. +// To use when the argument is already inside an iterator. +template <typename It> +inline auto outof_iterator(It iter) + -> out_convertible<typename std::remove_reference<decltype(*iter)>::type> { + return outof(*iter); +} + +// Helper function that automatically infers 'T'. +// To use when the argument is already a pointer. +template <typename T> +inline out_convertible<T> outof_ptr(T* ptr) { + DCHECK(ptr != nullptr); + return outof(*ptr); +} + +// Helper function that automatically infers 'T'. +// Forwards an out parameter from one function into another. +template <typename T> +inline out_convertible<T> outof_forward(out<T>& out_param) { + T& param = *out_param; + return out_convertible<T>(param); +} + +} // namespace art +#endif // ART_RUNTIME_BASE_OUT_H_ diff --git a/runtime/base/out_fwd.h b/runtime/base/out_fwd.h new file mode 100644 index 0000000000..6b2f926429 --- /dev/null +++ b/runtime/base/out_fwd.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_OUT_FWD_H_ +#define ART_RUNTIME_BASE_OUT_FWD_H_ + +// Forward declaration for "out<T>". See <out.h> for more information. +// Other headers use only the forward declaration. + +// Callers of functions that take an out<T> parameter should #include <out.h> to get outof_. +// which constructs out<T> through type inference. +namespace art { +template <typename T> +struct out; +} // namespace art + +#endif // ART_RUNTIME_BASE_OUT_FWD_H_ diff --git a/runtime/base/out_test.cc b/runtime/base/out_test.cc new file mode 100644 index 0000000000..427420035b --- /dev/null +++ b/runtime/base/out_test.cc @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "out.h" + +#include <algorithm> +#include <gtest/gtest.h> + +namespace art { + +struct OutTest : public testing::Test { + // Multiplies values less than 10 by two, stores the result and returns 0. + // Returns -1 if the original value was not multiplied by two. + static int multiply_small_values_by_two(size_t args, out<int> result) { + if (args < 10) { + *result = args * 2; + return 0; + } else { + return -1; + } + } +}; + +extern "C" int multiply_small_values_by_two_legacy(size_t args, int* result) { + if (args < 10) { + *result = args * 2; + return 0; + } else { + return -1; + } +} + +TEST_F(OutTest, TraditionalCall) { + // For calling traditional C++ functions. + int res; + EXPECT_EQ(multiply_small_values_by_two(1, outof(res)), 0); + EXPECT_EQ(2, res); +} + +TEST_F(OutTest, LegacyCall) { + // For calling legacy, e.g. C-style functions. + int res2; + EXPECT_EQ(0, multiply_small_values_by_two_legacy(1, outof(res2))); + EXPECT_EQ(2, res2); +} + +TEST_F(OutTest, CallFromIterator) { + // For calling a function with a parameter originating as an iterator. + std::vector<int> list = {1, 2, 3}; // NOLINT [whitespace/labels] [4] + std::vector<int>::iterator it = list.begin(); + + EXPECT_EQ(0, multiply_small_values_by_two(2, outof_iterator(it))); + EXPECT_EQ(4, list[0]); +} + +TEST_F(OutTest, CallFromPointer) { + // For calling a function with a parameter originating as a C-pointer. + std::vector<int> list = {1, 2, 3}; // NOLINT [whitespace/labels] [4] + + int* list_ptr = &list[2]; // 3 + + EXPECT_EQ(0, multiply_small_values_by_two(2, outof_ptr(list_ptr))); + EXPECT_EQ(4, list[2]); +} + +TEST_F(OutTest, OutAsIterator) { + // For using the out<T> parameter as an iterator inside of the callee. + std::vector<int> list; + int x = 100; + out<int> out_from_x = outof(x); + + for (const int& val : out_from_x) { + list.push_back(val); + } + + ASSERT_EQ(1u, list.size()); + EXPECT_EQ(100, list[0]); + + // A more typical use-case would be to use std algorithms + EXPECT_NE(out_from_x.end(), + std::find(out_from_x.begin(), + out_from_x.end(), + 100)); // Search for '100' in out. +} + +} // namespace art diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h index b300109e31..e10cd2452a 100644 --- a/runtime/base/timing_logger.h +++ b/runtime/base/timing_logger.h @@ -33,17 +33,17 @@ class CumulativeLogger { explicit CumulativeLogger(const std::string& name); ~CumulativeLogger(); void Start(); - void End() LOCKS_EXCLUDED(lock_); - void Reset() LOCKS_EXCLUDED(lock_); - void Dump(std::ostream& os) const LOCKS_EXCLUDED(lock_); + void End() REQUIRES(!lock_); + void Reset() REQUIRES(!lock_); + void Dump(std::ostream& os) const REQUIRES(!lock_); uint64_t GetTotalNs() const { return GetTotalTime() * kAdjust; } // Allow the name to be modified, particularly when the cumulative logger is a field within a // parent class that is unable to determine the "name" of a sub-class. - void SetName(const std::string& name) LOCKS_EXCLUDED(lock_); - void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_); - size_t GetIterations() const; + void SetName(const std::string& name) REQUIRES(!lock_); + void AddLogger(const TimingLogger& logger) REQUIRES(!lock_); + size_t GetIterations() const REQUIRES(!lock_); private: class HistogramComparator { @@ -58,8 +58,8 @@ class CumulativeLogger { static constexpr size_t kInitialBucketSize = 50; // 50 microseconds. void AddPair(const std::string &label, uint64_t delta_time) - EXCLUSIVE_LOCKS_REQUIRED(lock_); - void DumpHistogram(std::ostream &os) const EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); + void DumpHistogram(std::ostream &os) const REQUIRES(lock_); uint64_t GetTotalTime() const { return total_time_; } diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index 0ae32f4785..38bc8186d5 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -16,6 +16,7 @@ #include "check_jni.h" +#include <iomanip> #include <sys/mman.h> #include <zlib.h> @@ -155,7 +156,7 @@ class ScopedCheck { * Assumes "jobj" has already been validated. */ bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* o = soa.Decode<mirror::Object*>(java_object); if (o == nullptr) { AbortF("field operation on NULL object: %p", java_object); @@ -199,7 +200,7 @@ class ScopedCheck { */ bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc, jmethodID mid, Primitive::Type type, InvokeType invoke) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; @@ -246,7 +247,7 @@ class ScopedCheck { * Assumes "java_class" has already been validated. */ bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(java_class); ArtField* f = CheckFieldID(soa, fid); if (f == nullptr) { @@ -269,7 +270,7 @@ class ScopedCheck { * Instances of "java_class" must be instances of the method's declaring class. */ bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; @@ -290,7 +291,7 @@ class ScopedCheck { * will be handled automatically by the instanceof check.) */ bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; @@ -343,7 +344,7 @@ class ScopedCheck { * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable. */ bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* traceMethod = nullptr; if (has_method_ && soa.Vm()->IsTracingEnabled()) { // We need to guard some of the invocation interface's calls: a bad caller might @@ -443,7 +444,7 @@ class ScopedCheck { } bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* method = soa.Decode<mirror::Object*>(jmethod); if (method == nullptr) { AbortF("expected non-null method"); @@ -461,7 +462,7 @@ class ScopedCheck { } bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = soa.DecodeMethod(mid); if (method == nullptr) { AbortF("expected non-null constructor"); @@ -475,7 +476,7 @@ class ScopedCheck { } bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* field = soa.Decode<mirror::Object*>(jfield); if (field == nullptr) { AbortF("expected non-null java.lang.reflect.Field"); @@ -491,7 +492,7 @@ class ScopedCheck { } bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* obj = soa.Decode<mirror::Object*>(jobj); if (!obj->GetClass()->IsThrowableClass()) { AbortF("expected java.lang.Throwable but got object of type " @@ -502,7 +503,7 @@ class ScopedCheck { } bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(jc); if (!c->IsThrowableClass()) { AbortF("expected java.lang.Throwable class but got object of " @@ -533,7 +534,7 @@ class ScopedCheck { } bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(jc); if (!c->IsInstantiableNonArray()) { AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c); @@ -543,7 +544,7 @@ class ScopedCheck { } bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!CheckArray(soa, array)) { return false; } @@ -558,7 +559,7 @@ class ScopedCheck { bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) { return false; } @@ -619,7 +620,7 @@ class ScopedCheck { * to "running" mode before doing the checks. */ bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const char* what = nullptr; switch (kind) { case kClass: @@ -715,7 +716,7 @@ class ScopedCheck { } bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (fmt) { case 'a': // jarray return CheckArray(soa, arg.a); @@ -785,7 +786,7 @@ class ScopedCheck { void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg, std::string* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (fmt) { case 'L': // jobject fall-through. case 'a': // jarray fall-through. @@ -946,7 +947,7 @@ class ScopedCheck { * Since we're dealing with objects, switch to "running" mode. */ bool CheckArray(ScopedObjectAccess& soa, jarray java_array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(java_array == nullptr)) { AbortF("jarray was NULL"); return false; @@ -983,7 +984,7 @@ class ScopedCheck { } ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (fid == nullptr) { AbortF("jfieldID was NULL"); return nullptr; @@ -999,7 +1000,7 @@ class ScopedCheck { } ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (mid == nullptr) { AbortF("jmethodID was NULL"); return nullptr; @@ -1014,7 +1015,7 @@ class ScopedCheck { return m; } - bool CheckThread(JNIEnv* env) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool CheckThread(JNIEnv* env) SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); if (self == nullptr) { AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid()); @@ -1083,10 +1084,29 @@ class ScopedCheck { } const char* errorKind = nullptr; - uint8_t utf8 = CheckUtfBytes(bytes, &errorKind); + const uint8_t* utf8 = CheckUtfBytes(bytes, &errorKind); if (errorKind != nullptr) { + // This is an expensive loop that will resize often, but this isn't supposed to hit in + // practice anyways. + std::ostringstream oss; + oss << std::hex; + const uint8_t* tmp = reinterpret_cast<const uint8_t*>(bytes); + while (*tmp != 0) { + if (tmp == utf8) { + oss << "<"; + } + oss << "0x" << std::setfill('0') << std::setw(2) << static_cast<uint32_t>(*tmp); + if (tmp == utf8) { + oss << '>'; + } + tmp++; + if (*tmp != 0) { + oss << ' '; + } + } + AbortF("input is not valid Modified UTF-8: illegal %s byte %#x\n" - " string: '%s'", errorKind, utf8, bytes); + " string: '%s'\n input: '%s'", errorKind, *utf8, bytes, oss.str().c_str()); return false; } return true; @@ -1094,11 +1114,11 @@ class ScopedCheck { // Checks whether |bytes| is valid modified UTF-8. We also accept 4 byte UTF // sequences in place of encoded surrogate pairs. - static uint8_t CheckUtfBytes(const char* bytes, const char** errorKind) { + static const uint8_t* CheckUtfBytes(const char* bytes, const char** errorKind) { while (*bytes != '\0') { - uint8_t utf8 = *(bytes++); + const uint8_t* utf8 = reinterpret_cast<const uint8_t*>(bytes++); // Switch on the high four bits. - switch (utf8 >> 4) { + switch (*utf8 >> 4) { case 0x00: case 0x01: case 0x02: @@ -1118,11 +1138,11 @@ class ScopedCheck { return utf8; case 0x0f: // Bit pattern 1111, which might be the start of a 4 byte sequence. - if ((utf8 & 0x08) == 0) { + if ((*utf8 & 0x08) == 0) { // Bit pattern 1111 0xxx, which is the start of a 4 byte sequence. // We consume one continuation byte here, and fall through to consume two more. - utf8 = *(bytes++); - if ((utf8 & 0xc0) != 0x80) { + utf8 = reinterpret_cast<const uint8_t*>(bytes++); + if ((*utf8 & 0xc0) != 0x80) { *errorKind = "continuation"; return utf8; } @@ -1135,8 +1155,8 @@ class ScopedCheck { FALLTHROUGH_INTENDED; case 0x0e: // Bit pattern 1110, so there are two additional bytes. - utf8 = *(bytes++); - if ((utf8 & 0xc0) != 0x80) { + utf8 = reinterpret_cast<const uint8_t*>(bytes++); + if ((*utf8 & 0xc0) != 0x80) { *errorKind = "continuation"; return utf8; } @@ -1146,8 +1166,8 @@ class ScopedCheck { case 0x0c: case 0x0d: // Bit pattern 110x, so there is one additional byte. - utf8 = *(bytes++); - if ((utf8 & 0xc0) != 0x80) { + utf8 = reinterpret_cast<const uint8_t*>(bytes++); + if ((*utf8 & 0xc0) != 0x80) { *errorKind = "continuation"; return utf8; } @@ -2670,7 +2690,7 @@ class CheckJNI { static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj, jclass c, jmethodID mid, InvokeType invoke) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { bool checked; switch (invoke) { case kVirtual: { diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h index 504b7536f6..3155b518a8 100644 --- a/runtime/check_reference_map_visitor.h +++ b/runtime/check_reference_map_visitor.h @@ -28,10 +28,10 @@ namespace art { // holding references. class CheckReferenceMapVisitor : public StackVisitor { public: - explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit CheckReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsCalleeSaveMethod() || m->IsNative()) { CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex); @@ -52,7 +52,7 @@ class CheckReferenceMapVisitor : public StackVisitor { } void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (GetMethod()->IsOptimized(sizeof(void*))) { CheckOptimizedMethod(registers, number_of_references, native_pc_offset); } else { @@ -62,7 +62,7 @@ class CheckReferenceMapVisitor : public StackVisitor { private: void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); CodeInfo code_info = m->GetOptimizedCodeInfo(); StackMapEncoding encoding = code_info.ExtractEncoding(); @@ -104,7 +104,7 @@ class CheckReferenceMapVisitor : public StackVisitor { } void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*))); const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset); diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index 21b63c61a2..c08417f542 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -117,8 +117,10 @@ inline ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, ArtMethod* return resolved_method; } -inline ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx, - ArtMethod* referrer, InvokeType type) { +inline ArtMethod* ClassLinker::ResolveMethod(Thread* self, + uint32_t method_idx, + ArtMethod* referrer, + InvokeType type) { ArtMethod* resolved_method = GetResolvedMethod(method_idx, referrer); if (UNLIKELY(resolved_method == nullptr)) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); @@ -143,7 +145,8 @@ inline ArtField* ClassLinker::GetResolvedField( return GetResolvedField(field_idx, field_declaring_class->GetDexCache()); } -inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, ArtMethod* referrer, +inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, + ArtMethod* referrer, bool is_static) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); ArtField* resolved_field = GetResolvedField(field_idx, declaring_class); @@ -187,7 +190,7 @@ inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) } inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!class_roots_.IsNull()); mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read(); mirror::Class* klass = class_roots->Get(class_root); diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 8f7862a3b9..5f5b42f7df 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -30,6 +30,7 @@ #include "base/arena_allocator.h" #include "base/casts.h" #include "base/logging.h" +#include "base/out.h" #include "base/scoped_arena_containers.h" #include "base/scoped_flock.h" #include "base/stl_util.h" @@ -91,7 +92,7 @@ static constexpr bool kDuplicateClassesCheck = false; static void ThrowNoClassDefFoundError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -100,14 +101,12 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_end(args); } -bool ClassLinker::HasInitWithString( - Thread* self, ClassLinker* class_linker, const char* descriptor) { +bool ClassLinker::HasInitWithString(Thread* self, const char* descriptor) { ArtMethod* method = self->GetCurrentMethod(nullptr); StackHandleScope<1> hs(self); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ? - method->GetDeclaringClass()->GetClassLoader() - : nullptr)); - mirror::Class* exception_class = class_linker->FindClass(self, descriptor, class_loader); + method->GetDeclaringClass()->GetClassLoader() : nullptr)); + mirror::Class* exception_class = FindClass(self, descriptor, class_loader); if (exception_class == nullptr) { // No exc class ~ no <init>-with-string. @@ -144,7 +143,7 @@ void ClassLinker::ThrowEarlierClassFailure(mirror::Class* c) { std::string temp; const char* descriptor = c->GetVerifyErrorClass()->GetDescriptor(&temp); - if (HasInitWithString(self, this, descriptor)) { + if (HasInitWithString(self, descriptor)) { self->ThrowNewException(descriptor, PrettyDescriptor(c).c_str()); } else { self->ThrowNewException(descriptor, nullptr); @@ -157,7 +156,7 @@ void ClassLinker::ThrowEarlierClassFailure(mirror::Class* c) { } static void VlogClassInitializationFailure(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (VLOG_IS_ON(class_linker)) { std::string temp; LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from " @@ -166,7 +165,7 @@ static void VlogClassInitializationFailure(Handle<mirror::Class> klass) } static void WrapExceptionInInitializer(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); JNIEnv* env = self->GetJniEnv(); @@ -200,7 +199,7 @@ struct FieldGapsComparator { return lhs.size < rhs.size || (lhs.size == rhs.size && lhs.start_offset > rhs.start_offset); } }; -typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps; +using FieldGaps = std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator>; // Adds largest aligned gaps to queue of gaps. static void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) { @@ -228,7 +227,7 @@ static void ShuffleForward(size_t* current_field_idx, MemberOffset* field_offset, std::deque<ArtField*>* grouped_and_sorted_fields, FieldGaps* gaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(current_field_idx != nullptr); DCHECK(grouped_and_sorted_fields != nullptr); DCHECK(gaps != nullptr); @@ -777,7 +776,8 @@ class DexFileAndClassPair : ValueObject { // be from multidex, which resolves correctly). }; -static void AddDexFilesFromOat(const OatFile* oat_file, bool already_loaded, +static void AddDexFilesFromOat(const OatFile* oat_file, + bool already_loaded, std::priority_queue<DexFileAndClassPair>* heap) { const std::vector<const OatDexFile*>& oat_dex_files = oat_file->GetOatDexFiles(); for (const OatDexFile* oat_dex_file : oat_dex_files) { @@ -838,7 +838,7 @@ const OatFile* ClassLinker::GetPrimaryOatFile() { // against the following top element. If the descriptor is the same, it is now checked whether // the two elements agree on whether their dex file was from an already-loaded oat-file or the // new oat file. Any disagreement indicates a collision. -bool ClassLinker::HasCollisions(const OatFile* oat_file, std::string* error_msg) { +bool ClassLinker::HasCollisions(const OatFile* oat_file, out<std::string> error_msg) { if (!kDuplicateClassesCheck) { return false; } @@ -903,10 +903,9 @@ bool ClassLinker::HasCollisions(const OatFile* oat_file, std::string* error_msg) } std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat( - const char* dex_location, const char* oat_location, - std::vector<std::string>* error_msgs) { - CHECK(error_msgs != nullptr); - + const char* dex_location, + const char* oat_location, + out<std::vector<std::string>> error_msgs) { // Verify we aren't holding the mutator lock, which could starve GC if we // have to generate or relocate an oat file. Locks::mutator_lock_->AssertNotHeld(Thread::Current()); @@ -948,7 +947,7 @@ std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat( std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile(); if (oat_file.get() != nullptr) { // Take the file only if it has no collisions, or we must take it because of preopting. - bool accept_oat_file = !HasCollisions(oat_file.get(), &error_msg); + bool accept_oat_file = !HasCollisions(oat_file.get(), outof(error_msg)); if (!accept_oat_file) { // Failed the collision check. Print warning. if (Runtime::Current()->IsDexFileFallbackEnabled()) { @@ -982,8 +981,7 @@ std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat( if (source_oat_file != nullptr) { dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location); if (dex_files.empty()) { - error_msgs->push_back("Failed to open dex files from " - + source_oat_file->GetLocation()); + error_msgs->push_back("Failed to open dex files from " + source_oat_file->GetLocation()); } } @@ -1019,9 +1017,10 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& return nullptr; } -static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class, +static void SanityCheckArtMethod(ArtMethod* m, + mirror::Class* expected_class, gc::space::ImageSpace* space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (m->IsRuntimeMethod()) { CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m); } else if (m->IsMiranda()) { @@ -1037,9 +1036,11 @@ static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class, } } -static void SanityCheckArtMethodPointerArray( - mirror::PointerArray* arr, mirror::Class* expected_class, size_t pointer_size, - gc::space::ImageSpace* space) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr, + mirror::Class* expected_class, + size_t pointer_size, + gc::space::ImageSpace* space) + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(arr != nullptr); for (int32_t j = 0; j < arr->GetLength(); ++j) { auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size); @@ -1054,7 +1055,7 @@ static void SanityCheckArtMethodPointerArray( } static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj; CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj; @@ -1238,11 +1239,8 @@ void ClassLinker::InitFromImage() { bool ClassLinker::ClassInClassTable(mirror::Class* klass) { ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - auto it = class_table_.Find(GcRoot<mirror::Class>(klass)); - if (it == class_table_.end()) { - return false; - } - return it->Read() == klass; + ClassTable* const class_table = ClassTableForClassLoader(klass->GetClassLoader()); + return class_table != nullptr && class_table->Contains(klass); } void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { @@ -1254,8 +1252,7 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { // There is 3 GC cases to handle: // Non moving concurrent: // This case is easy to handle since the reference members of ArtMethod and ArtFields are held - // live by the class and class roots. In this case we probably don't even need to call - // VisitNativeRoots. + // live by the class and class roots. // // Moving non-concurrent: // This case needs to call visit VisitNativeRoots in case the classes or dex cache arrays move. @@ -1266,35 +1263,30 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { // Moving concurrent: // Need to make sure to not copy ArtMethods without doing read barriers since the roots are // marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy. - for (GcRoot<mirror::Class>& root : class_table_) { - buffered_visitor.VisitRoot(root); - if ((flags & kVisitRootFlagNonMoving) == 0) { - // Don't bother visiting ArtField and ArtMethod if kVisitRootFlagNonMoving is set since - // these roots are all reachable from the class or dex cache. - root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); + std::vector<std::pair<GcRoot<mirror::ClassLoader>, ClassTable*>> reinsert; + for (auto it = classes_.begin(); it != classes_.end(); ) { + it->second->VisitRoots(visitor, flags); + const GcRoot<mirror::ClassLoader>& root = it->first; + mirror::ClassLoader* old_ref = root.Read<kWithoutReadBarrier>(); + root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); + mirror::ClassLoader* new_ref = root.Read<kWithoutReadBarrier>(); + if (new_ref != old_ref) { + reinsert.push_back(*it); + it = classes_.erase(it); + } else { + ++it; } } - // PreZygote classes can't move so we won't need to update fields' declaring classes. - for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) { - buffered_visitor.VisitRoot(root); - if ((flags & kVisitRootFlagNonMoving) == 0) { - root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); - } + for (auto& pair : reinsert) { + classes_.Put(pair.first, pair.second); } } else if ((flags & kVisitRootFlagNewRoots) != 0) { for (auto& root : new_class_roots_) { mirror::Class* old_ref = root.Read<kWithoutReadBarrier>(); - old_ref->VisitNativeRoots(buffered_visitor, image_pointer_size_); root.VisitRoot(visitor, RootInfo(kRootStickyClass)); mirror::Class* new_ref = root.Read<kWithoutReadBarrier>(); - if (UNLIKELY(new_ref != old_ref)) { - // Uh ohes, GC moved a root in the log. Need to search the class_table and update the - // corresponding object. This is slow, but luckily for us, this may only happen with a - // concurrent moving GC. - auto it = class_table_.Find(GcRoot<mirror::Class>(old_ref)); - DCHECK(it != class_table_.end()); - *it = GcRoot<mirror::Class>(new_ref); - } + // Concurrent moving GC marked new roots through the to-space invariant. + CHECK_EQ(new_ref, old_ref); } } buffered_visitor.Flush(); // Flush before clearing new_class_roots_. @@ -1343,91 +1335,103 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { } } -void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) { - if (dex_cache_image_class_lookup_required_) { - MoveImageClassesToClassTable(); - } - // TODO: why isn't this a ReaderMutexLock? - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - for (GcRoot<mirror::Class>& root : class_table_) { - if (!visitor(root.Read(), arg)) { - return; - } - } - for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) { - if (!visitor(root.Read(), arg)) { +void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) { + for (auto& pair : classes_) { + ClassTable* const class_table = pair.second; + if (!class_table->Visit(visitor)) { return; } } } -static bool GetClassesVisitorSet(mirror::Class* c, void* arg) { - std::set<mirror::Class*>* classes = reinterpret_cast<std::set<mirror::Class*>*>(arg); - classes->insert(c); - return true; +void ClassLinker::VisitClasses(ClassVisitor* visitor) { + if (dex_cache_image_class_lookup_required_) { + MoveImageClassesToClassTable(); + } + Thread* const self = Thread::Current(); + ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); + // Not safe to have thread suspension when we are holding a lock. + if (self != nullptr) { + ScopedAssertNoThreadSuspension nts(self, __FUNCTION__); + VisitClassesInternal(visitor); + } else { + VisitClassesInternal(visitor); + } } -struct GetClassesVisitorArrayArg { - Handle<mirror::ObjectArray<mirror::Class>>* classes; - int32_t index; - bool success; +class GetClassesInToVector : public ClassVisitor { + public: + bool Visit(mirror::Class* klass) OVERRIDE { + classes_.push_back(klass); + return true; + } + std::vector<mirror::Class*> classes_; }; -static bool GetClassesVisitorArray(mirror::Class* c, void* varg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - GetClassesVisitorArrayArg* arg = reinterpret_cast<GetClassesVisitorArrayArg*>(varg); - if (arg->index < (*arg->classes)->GetLength()) { - (*arg->classes)->Set(arg->index, c); - arg->index++; - return true; - } else { - arg->success = false; +class GetClassInToObjectArray : public ClassVisitor { + public: + explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr) + : arr_(arr), index_(0) {} + + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + ++index_; + if (index_ <= arr_->GetLength()) { + arr_->Set(index_ - 1, klass); + return true; + } return false; } -} -void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) { + bool Succeeded() const SHARED_REQUIRES(Locks::mutator_lock_) { + return index_ <= arr_->GetLength(); + } + + private: + mirror::ObjectArray<mirror::Class>* const arr_; + int32_t index_; +}; + +void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) { // TODO: it may be possible to avoid secondary storage if we iterate over dex caches. The problem // is avoiding duplicates. if (!kMovingClasses) { - std::set<mirror::Class*> classes; - VisitClasses(GetClassesVisitorSet, &classes); - for (mirror::Class* klass : classes) { - if (!visitor(klass, arg)) { + GetClassesInToVector accumulator; + VisitClasses(&accumulator); + for (mirror::Class* klass : accumulator.classes_) { + if (!visitor->Visit(klass)) { return; } } } else { - Thread* self = Thread::Current(); + Thread* const self = Thread::Current(); StackHandleScope<1> hs(self); - MutableHandle<mirror::ObjectArray<mirror::Class>> classes = - hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr); - GetClassesVisitorArrayArg local_arg; - local_arg.classes = &classes; - local_arg.success = false; + auto classes = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr); // We size the array assuming classes won't be added to the class table during the visit. // If this assumption fails we iterate again. - while (!local_arg.success) { + while (true) { size_t class_table_size; { ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); - class_table_size = class_table_.Size() + pre_zygote_class_table_.Size(); + // Add 100 in case new classes get loaded when we are filling in the object array. + class_table_size = NumZygoteClasses() + NumNonZygoteClasses() + 100; } mirror::Class* class_type = mirror::Class::GetJavaLangClass(); mirror::Class* array_of_class = FindArrayClass(self, &class_type); classes.Assign( mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, class_table_size)); CHECK(classes.Get() != nullptr); // OOME. - local_arg.index = 0; - local_arg.success = true; - VisitClasses(GetClassesVisitorArray, &local_arg); + GetClassInToObjectArray accumulator(classes.Get()); + VisitClasses(&accumulator); + if (accumulator.Succeeded()) { + break; + } } for (int32_t i = 0; i < classes->GetLength(); ++i) { // If the class table shrank during creation of the clases array we expect null elements. If // the class table grew then the loop repeats. If classes are created after the loop has // finished then we don't visit. mirror::Class* klass = classes->Get(i); - if (klass != nullptr && !visitor(klass, arg)) { + if (klass != nullptr && !visitor->Visit(klass)) { return; } } @@ -1455,6 +1459,7 @@ ClassLinker::~ClassLinker() { mirror::LongArray::ResetArrayClass(); mirror::ShortArray::ResetArrayClass(); STLDeleteElements(&oat_files_); + STLDeleteValues(&classes_); } mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) { @@ -1501,7 +1506,8 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi return dex_cache.Get(); } -mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class, +mirror::Class* ClassLinker::AllocClass(Thread* self, + mirror::Class* java_lang_Class, uint32_t class_size) { DCHECK_GE(class_size, sizeof(mirror::Class)); gc::Heap* heap = Runtime::Current()->GetHeap(); @@ -1520,13 +1526,14 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) { return AllocClass(self, GetClassRoot(kJavaLangClass), class_size); } -mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray( - Thread* self, size_t length) { +mirror::ObjectArray<mirror::StackTraceElement>* +ClassLinker::AllocStackTraceElementArray(Thread* self, size_t length) { return mirror::ObjectArray<mirror::StackTraceElement>::Alloc( self, GetClassRoot(kJavaLangStackTraceElementArrayClass), length); } -mirror::Class* ClassLinker::EnsureResolved(Thread* self, const char* descriptor, +mirror::Class* ClassLinker::EnsureResolved(Thread* self, + const char* descriptor, mirror::Class* klass) { DCHECK(klass != nullptr); @@ -1585,7 +1592,8 @@ typedef std::pair<const DexFile*, const DexFile::ClassDef*> ClassPathEntry; // Search a collection of DexFiles for a descriptor ClassPathEntry FindInClassPath(const char* descriptor, - size_t hash, const std::vector<const DexFile*>& class_path) { + size_t hash, + const std::vector<const DexFile*>& class_path) { for (const DexFile* dex_file : class_path) { const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor, hash); if (dex_class_def != nullptr) { @@ -1597,23 +1605,24 @@ ClassPathEntry FindInClassPath(const char* descriptor, static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return class_loader == nullptr || class_loader->GetClass() == soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader); } bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa, - Thread* self, const char* descriptor, + Thread* self, + const char* descriptor, size_t hash, Handle<mirror::ClassLoader> class_loader, - mirror::Class** result) { + out<mirror::Class*> result) { // Termination case: boot class-loader. if (IsBootClassLoader(soa, class_loader.Get())) { // The boot class loader, search the boot class path. ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_); if (pair.second != nullptr) { - mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr); + mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr /* no classloader */); if (klass != nullptr) { *result = EnsureResolved(self, descriptor, klass); } else { @@ -1715,7 +1724,8 @@ bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& return true; } -mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor, +mirror::Class* ClassLinker::FindClass(Thread* self, + const char* descriptor, Handle<mirror::ClassLoader> class_loader) { DCHECK_NE(*descriptor, '\0') << "descriptor is empty string"; DCHECK(self != nullptr); @@ -1751,7 +1761,7 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor, } else { ScopedObjectAccessUnchecked soa(self); mirror::Class* cp_klass; - if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) { + if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, outof(cp_klass))) { // The chain was understood. So the value in cp_klass is either the class we were looking // for, or not found. if (cp_klass != nullptr) { @@ -1804,7 +1814,9 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor, UNREACHABLE(); } -mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, size_t hash, +mirror::Class* ClassLinker::DefineClass(Thread* self, + const char* descriptor, + size_t hash, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) { @@ -1890,7 +1902,7 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, si auto interfaces = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr); MutableHandle<mirror::Class> h_new_class = hs.NewHandle<mirror::Class>(nullptr); - if (!LinkClass(self, descriptor, klass, interfaces, &h_new_class)) { + if (!LinkClass(self, descriptor, klass, interfaces, outof(h_new_class))) { // Linking failed. if (!klass->IsErroneous()) { mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self); @@ -1973,8 +1985,9 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file, image_pointer_size_); } -OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, - bool* found) { +OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, + uint16_t class_def_idx, + out<bool> found) { DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16); const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile(); if (oat_dex_file == nullptr) { @@ -1985,7 +1998,8 @@ OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t cl return oat_dex_file->GetOatClass(class_def_idx); } -static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx, +static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, + uint16_t class_def_idx, uint32_t method_idx) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx); const uint8_t* class_data = dex_file.GetClassData(class_def); @@ -2019,7 +2033,7 @@ static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16 UNREACHABLE(); } -const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, bool* found) { +const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, out<bool> found) { // Although we overwrite the trampoline of non-static methods, we may get here via the resolution // method for direct methods (or virtual methods made direct). mirror::Class* declaring_class = method->GetDeclaringClass(); @@ -2051,7 +2065,7 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, bool* method->GetDexMethodIndex())); OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(), declaring_class->GetDexClassDefIndex(), - found); + outof_forward(found)); if (!(*found)) { return OatFile::OatMethod::Invalid(); } @@ -2065,7 +2079,7 @@ const void* ClassLinker::GetQuickOatCodeFor(ArtMethod* method) { return GetQuickProxyInvokeHandler(); } bool found; - OatFile::OatMethod oat_method = FindOatMethodFor(method, &found); + OatFile::OatMethod oat_method = FindOatMethodFor(method, outof(found)); if (found) { auto* code = oat_method.GetQuickCode(); if (code != nullptr) { @@ -2091,7 +2105,7 @@ const void* ClassLinker::GetOatMethodQuickCodeFor(ArtMethod* method) { return nullptr; } bool found; - OatFile::OatMethod oat_method = FindOatMethodFor(method, &found); + OatFile::OatMethod oat_method = FindOatMethodFor(method, outof(found)); if (found) { return oat_method.GetQuickCode(); } @@ -2105,10 +2119,11 @@ const void* ClassLinker::GetOatMethodQuickCodeFor(ArtMethod* method) { return nullptr; } -const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, +const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, + uint16_t class_def_idx, uint32_t method_idx) { bool found; - OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found); + OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, outof(found)); if (!found) { return nullptr; } @@ -2118,7 +2133,7 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t cl // Returns true if the method must run with interpreter, false otherwise. static bool NeedsInterpreter(ArtMethod* method, const void* quick_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (quick_code == nullptr) { // No code: need interpreter. // May return true for native code, in the case of generic JNI @@ -2159,7 +2174,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { } bool has_oat_class; OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(), - &has_oat_class); + outof(has_oat_class)); // Link the code of methods skipped by LinkCode. for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) { ArtMethod* method = klass->GetDirectMethod(method_index, image_pointer_size_); @@ -2187,7 +2202,8 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { // Ignore virtual methods on the iterator. } -void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class, +void ClassLinker::LinkCode(ArtMethod* method, + const OatFile::OatClass* oat_class, uint32_t class_def_method_index) { Runtime* const runtime = Runtime::Current(); if (runtime->IsAotCompiler()) { @@ -2239,8 +2255,10 @@ void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class } } -void ClassLinker::SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, - Handle<mirror::Class> klass, mirror::ClassLoader* class_loader) { +void ClassLinker::SetupClass(const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def, + Handle<mirror::Class> klass, + mirror::ClassLoader* class_loader) { CHECK(klass.Get() != nullptr); CHECK(klass->GetDexCache() != nullptr); CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus()); @@ -2260,7 +2278,8 @@ void ClassLinker::SetupClass(const DexFile& dex_file, const DexFile::ClassDef& d CHECK(klass->GetDexCacheStrings() != nullptr); } -void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file, +void ClassLinker::LoadClass(Thread* self, + const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, Handle<mirror::Class> klass) { const uint8_t* class_data = dex_file.GetClassData(dex_class_def); @@ -2270,7 +2289,7 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file, bool has_oat_class = false; if (Runtime::Current()->IsStarted() && !Runtime::Current()->IsAotCompiler()) { OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(), - &has_oat_class); + outof(has_oat_class)); if (has_oat_class) { LoadClassMembers(self, dex_file, class_data, klass, &oat_class); } @@ -2299,7 +2318,8 @@ ArtMethod* ClassLinker::AllocArtMethodArray(Thread* self, size_t length) { return reinterpret_cast<ArtMethod*>(ptr); } -void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, +void ClassLinker::LoadClassMembers(Thread* self, + const DexFile& dex_file, const uint8_t* class_data, Handle<mirror::Class> klass, const OatFile::OatClass* oat_class) { @@ -2389,10 +2409,13 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, } DCHECK(!it.HasNext()); } + // Ensure that the card is marked so that remembered sets pick up native roots. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass.Get()); self->AllowThreadSuspension(); } -void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, +void ClassLinker::LoadField(const ClassDataItemIterator& it, + Handle<mirror::Class> klass, ArtField* dst) { const uint32_t field_idx = it.GetMemberIndex(); dst->SetDexFieldIndex(field_idx); @@ -2400,8 +2423,11 @@ void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Clas dst->SetAccessFlags(it.GetFieldAccessFlags()); } -void ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it, - Handle<mirror::Class> klass, ArtMethod* dst) { +void ClassLinker::LoadMethod(Thread* self, + const DexFile& dex_file, + const ClassDataItemIterator& it, + Handle<mirror::Class> klass, + ArtMethod* dst) { uint32_t dex_method_idx = it.GetMemberIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_); @@ -2470,8 +2496,8 @@ void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, bool ClassLinker::IsDexFileRegisteredLocked(const DexFile& dex_file) { dex_lock_.AssertSharedHeld(Thread::Current()); - for (size_t i = 0; i != dex_caches_.size(); ++i) { - mirror::DexCache* dex_cache = GetDexCache(i); + for (GcRoot<mirror::DexCache>& root : dex_caches_) { + mirror::DexCache* dex_cache = root.Read(); if (dex_cache->GetDexFile() == &dex_file) { return true; } @@ -2601,7 +2627,9 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl // array class; that always comes from the base element class. // // Returns null with an exception raised on failure. -mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash, +mirror::Class* ClassLinker::CreateArrayClass(Thread* self, + const char* descriptor, + size_t hash, Handle<mirror::ClassLoader> class_loader) { // Identify the underlying component type CHECK_EQ('[', descriptor[0]); @@ -2769,8 +2797,7 @@ mirror::Class* ClassLinker::FindPrimitiveClass(char type) { return nullptr; } -mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass, - size_t hash) { +mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass, size_t hash) { if (VLOG_IS_ON(class_linker)) { mirror::DexCache* dex_cache = klass->GetDexCache(); std::string source; @@ -2781,11 +2808,13 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k LOG(INFO) << "Loaded class " << descriptor << source; } WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - mirror::Class* existing = LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash); + mirror::ClassLoader* const class_loader = klass->GetClassLoader(); + ClassTable* const class_table = InsertClassTableForClassLoader(class_loader); + mirror::Class* existing = class_table->Lookup(descriptor, hash); if (existing != nullptr) { return existing; } - if (kIsDebugBuild && !klass->IsTemp() && klass->GetClassLoader() == nullptr && + if (kIsDebugBuild && !klass->IsTemp() && class_loader == nullptr && dex_cache_image_class_lookup_required_) { // Check a class loaded with the system class loader matches one in the image if the class // is in the image. @@ -2795,118 +2824,64 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k } } VerifyObject(klass); - class_table_.InsertWithHash(GcRoot<mirror::Class>(klass), hash); + class_table->InsertWithHash(klass, hash); if (log_new_class_table_roots_) { new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); } return nullptr; } -void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods, +void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, + ArtMethod* new_methods, size_t new_num_methods) { - // classlinker_classes_lock_ is used to guard against races between root marking and changing the - // direct and virtual method pointers. - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); + // TODO: Fix the race condition here. b/22832610 klass->SetNumVirtualMethods(new_num_methods); klass->SetVirtualMethodsPtr(new_methods); - if (log_new_class_table_roots_) { - new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); - } -} - -mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass, - size_t hash) { - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - auto existing_it = class_table_.FindWithHash(std::make_pair(descriptor, klass->GetClassLoader()), - hash); - CHECK(existing_it != class_table_.end()); - mirror::Class* existing = existing_it->Read(); - CHECK_NE(existing, klass) << descriptor; - CHECK(!existing->IsResolved()) << descriptor; - CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor; - - CHECK(!klass->IsTemp()) << descriptor; - if (kIsDebugBuild && klass->GetClassLoader() == nullptr && - dex_cache_image_class_lookup_required_) { - // Check a class loaded with the system class loader matches one in the image if the class - // is in the image. - existing = LookupClassFromImage(descriptor); - if (existing != nullptr) { - CHECK_EQ(klass, existing) << descriptor; - } - } - VerifyObject(klass); - - // Update the element in the hash set. - *existing_it = GcRoot<mirror::Class>(klass); - if (log_new_class_table_roots_) { - new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); - } - - return existing; + // Need to mark the card so that the remembered sets and mod union tables get update. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass); } bool ClassLinker::RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) { WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - auto pair = std::make_pair(descriptor, class_loader); - auto it = class_table_.Find(pair); - if (it != class_table_.end()) { - class_table_.Erase(it); - return true; - } - it = pre_zygote_class_table_.Find(pair); - if (it != pre_zygote_class_table_.end()) { - pre_zygote_class_table_.Erase(it); - return true; - } - return false; + ClassTable* const class_table = ClassTableForClassLoader(class_loader); + return class_table != nullptr && class_table->Remove(descriptor); } -mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor, size_t hash, +mirror::Class* ClassLinker::LookupClass(Thread* self, + const char* descriptor, + size_t hash, mirror::ClassLoader* class_loader) { { ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); - mirror::Class* result = LookupClassFromTableLocked(descriptor, class_loader, hash); - if (result != nullptr) { - return result; + ClassTable* const class_table = ClassTableForClassLoader(class_loader); + if (class_table != nullptr) { + mirror::Class* result = class_table->Lookup(descriptor, hash); + if (result != nullptr) { + return result; + } } } if (class_loader != nullptr || !dex_cache_image_class_lookup_required_) { return nullptr; - } else { - // Lookup failed but need to search dex_caches_. - mirror::Class* result = LookupClassFromImage(descriptor); - if (result != nullptr) { - InsertClass(descriptor, result, hash); - } else { - // Searching the image dex files/caches failed, we don't want to get into this situation - // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image - // classes into the class table. - constexpr uint32_t kMaxFailedDexCacheLookups = 1000; - if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) { - MoveImageClassesToClassTable(); - } - } - return result; } -} - -mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor, - mirror::ClassLoader* class_loader, - size_t hash) { - auto descriptor_pair = std::make_pair(descriptor, class_loader); - auto it = pre_zygote_class_table_.FindWithHash(descriptor_pair, hash); - if (it == pre_zygote_class_table_.end()) { - it = class_table_.FindWithHash(descriptor_pair, hash); - if (it == class_table_.end()) { - return nullptr; + // Lookup failed but need to search dex_caches_. + mirror::Class* result = LookupClassFromImage(descriptor); + if (result != nullptr) { + result = InsertClass(descriptor, result, hash); + } else { + // Searching the image dex files/caches failed, we don't want to get into this situation + // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image + // classes into the class table. + constexpr uint32_t kMaxFailedDexCacheLookups = 1000; + if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) { + MoveImageClassesToClassTable(); } } - return it->Read(); + return result; } static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace(); CHECK(image != nullptr); mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches); @@ -2922,6 +2897,7 @@ void ClassLinker::MoveImageClassesToClassTable() { ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table"); mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(); std::string temp; + ClassTable* const class_table = InsertClassTableForClassLoader(nullptr); for (int32_t i = 0; i < dex_caches->GetLength(); i++) { mirror::DexCache* dex_cache = dex_caches->Get(i); mirror::ObjectArray<mirror::Class>* types = dex_cache->GetResolvedTypes(); @@ -2931,12 +2907,12 @@ void ClassLinker::MoveImageClassesToClassTable() { DCHECK(klass->GetClassLoader() == nullptr); const char* descriptor = klass->GetDescriptor(&temp); size_t hash = ComputeModifiedUtf8Hash(descriptor); - mirror::Class* existing = LookupClassFromTableLocked(descriptor, nullptr, hash); + mirror::Class* existing = class_table->Lookup(descriptor, hash); if (existing != nullptr) { CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != " << PrettyClassAndClassLoader(klass); } else { - class_table_.Insert(GcRoot<mirror::Class>(klass)); + class_table->Insert(klass); if (log_new_class_table_roots_) { new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); } @@ -2949,9 +2925,9 @@ void ClassLinker::MoveImageClassesToClassTable() { void ClassLinker::MoveClassTableToPreZygote() { WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - DCHECK(pre_zygote_class_table_.Empty()); - pre_zygote_class_table_ = std::move(class_table_); - class_table_.Clear(); + for (auto& class_table : classes_) { + class_table.second->FreezeSnapshot(); + } } mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) { @@ -2977,37 +2953,21 @@ mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) { return nullptr; } -void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) { +void ClassLinker::LookupClasses(const char* descriptor, + out<std::vector<mirror::Class*>> out_result) { + std::vector<mirror::Class*>& result = *out_result; result.clear(); if (dex_cache_image_class_lookup_required_) { MoveImageClassesToClassTable(); } WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - while (true) { - auto it = class_table_.Find(descriptor); - if (it == class_table_.end()) { - break; + for (auto& pair : classes_) { + // There can only be one class with the same descriptor per class loader. + ClassTable* const class_table = pair.second; + mirror::Class* klass = class_table->Lookup(descriptor, ComputeModifiedUtf8Hash(descriptor)); + if (klass != nullptr) { + result.push_back(klass); } - result.push_back(it->Read()); - class_table_.Erase(it); - } - for (mirror::Class* k : result) { - class_table_.Insert(GcRoot<mirror::Class>(k)); - } - size_t pre_zygote_start = result.size(); - // Now handle the pre zygote table. - // Note: This dirties the pre-zygote table but shouldn't be an issue since LookupClasses is only - // called from the debugger. - while (true) { - auto it = pre_zygote_class_table_.Find(descriptor); - if (it == pre_zygote_class_table_.end()) { - break; - } - result.push_back(it->Read()); - pre_zygote_class_table_.Erase(it); - } - for (size_t i = pre_zygote_start; i < result.size(); ++i) { - pre_zygote_class_table_.Insert(GcRoot<mirror::Class>(result[i])); } } @@ -3172,7 +3132,8 @@ void ClassLinker::EnsurePreverifiedMethods(Handle<mirror::Class> klass) { } } -bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass, +bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, + mirror::Class* klass, mirror::Class::Status& oat_file_class_status) { // If we're compiling, we can only verify the class using the oat file if // we are not compiling the image or if the class we're verifying is not part of @@ -3277,14 +3238,13 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, } const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0); uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); - ClassLinker* linker = Runtime::Current()->GetClassLinker(); for (uint32_t idx = 0; idx < handlers_size; idx++) { CatchHandlerIterator iterator(handlers_ptr); for (; iterator.HasNext(); iterator.Next()) { // Ensure exception types are resolved so that they don't need resolution to be delivered, // unresolved exception types will be ignored by exception delivery if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) { - mirror::Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method); + mirror::Class* exception_type = ResolveType(iterator.GetHandlerTypeIndex(), method); if (exception_type == nullptr) { DCHECK(Thread::Current()->IsExceptionPending()); Thread::Current()->ClearException(); @@ -3295,9 +3255,12 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, } } -mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name, - jobjectArray interfaces, jobject loader, - jobjectArray methods, jobjectArray throws) { +mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, + jstring name, + jobjectArray interfaces, + jobject loader, + jobjectArray methods, + jobjectArray throws) { Thread* self = soa.Self(); StackHandleScope<10> hs(self); MutableHandle<mirror::Class> klass(hs.NewHandle( @@ -3316,7 +3279,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache()); mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self); std::string descriptor(GetDescriptorForProxy(klass.Get())); - size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str()); + const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str()); // Insert the class before loading the fields as the field roots // (ArtField::declaring_class_) are only visited from the class @@ -3392,7 +3355,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& // The new class will replace the old one in the class table. Handle<mirror::ObjectArray<mirror::Class>> h_interfaces( hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces))); - if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) { + if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, outof(new_class))) { mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self); return nullptr; } @@ -3450,8 +3413,7 @@ std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) { return DotToDescriptor(name->ToModifiedUtf8().c_str()); } -ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, - ArtMethod* proxy_method) { +ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) { DCHECK(proxy_class->IsProxyClass()); DCHECK(proxy_method->IsProxyMethod()); { @@ -3498,7 +3460,8 @@ void ClassLinker::CheckProxyConstructor(ArtMethod* constructor) const { DCHECK(constructor->IsPublic()); } -void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, +void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, + ArtMethod* prototype, ArtMethod* out) { // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden // prototype method @@ -3544,7 +3507,8 @@ void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) cons CHECK_EQ(np->GetReturnType(), prototype->GetReturnType()); } -bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, +bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, + bool can_init_statics, bool can_init_parents) { if (can_init_statics && can_init_parents) { return true; @@ -3574,8 +3538,10 @@ bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_stati return CanWeInitializeClass(super_class, can_init_statics, can_init_parents); } -bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, - bool can_init_statics, bool can_init_parents) { +bool ClassLinker::InitializeClass(Thread* self, + Handle<mirror::Class> klass, + bool can_init_statics, + bool can_init_parents) { // see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol // Are we already initialized and therefore done? @@ -3644,7 +3610,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, return true; } // No. That's fine. Wait for another thread to finish initializing. - return WaitForInitializeClass(klass, self, lock); + return WaitForInitializeClass(klass, self, &lock); } if (!ValidateSuperClassDescriptors(klass)) { @@ -3778,13 +3744,16 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, return success; } -bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self, - ObjectLock<mirror::Class>& lock) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, + Thread* self, + ObjectLock<mirror::Class>* lock) + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(lock != nullptr); + while (true) { self->AssertNoPendingException(); CHECK(!klass->IsInitialized()); - lock.WaitIgnoringInterrupts(); + lock->WaitIgnoringInterrupts(); // When we wake up, repeat the test for init-in-progress. If // there's an exception pending (only possible if @@ -3824,7 +3793,7 @@ static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class> Handle<mirror::Class> super_klass, ArtMethod* method, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(Thread::Current()->IsExceptionPending()); DCHECK(!m->IsProxyMethod()); const DexFile* dex_file = m->GetDexFile(); @@ -3847,8 +3816,9 @@ static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass, Handle<mirror::Class> super_klass, ArtMethod* method, ArtMethod* m, - uint32_t index, uint32_t arg_type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t index, + uint32_t arg_type_idx) + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(Thread::Current()->IsExceptionPending()); DCHECK(!m->IsProxyMethod()); const DexFile* dex_file = m->GetDexFile(); @@ -3868,7 +3838,7 @@ static void ThrowSignatureMismatch(Handle<mirror::Class> klass, Handle<mirror::Class> super_klass, ArtMethod* method, const std::string& error_msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ThrowLinkageError(klass.Get(), "Class %s method %s resolves differently in %s %s: %s", PrettyDescriptor(klass.Get()).c_str(), @@ -3883,7 +3853,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, Handle<mirror::Class> super_klass, ArtMethod* method1, ArtMethod* method2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { { StackHandleScope<1> hs(self); Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType())); @@ -4009,7 +3979,8 @@ bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) { return true; } -bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields, +bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, + bool can_init_fields, bool can_init_parents) { DCHECK(c.Get() != nullptr); if (c->IsInitialized()) { @@ -4030,7 +4001,7 @@ bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) { ArtField* fields = new_class->GetIFields(); - DCHECK_EQ(temp_class->NumInstanceFields(), new_class->NumInstanceFields()); + DCHECK_EQ(temp_class->NumInstanceFields(), 0u); for (size_t i = 0, count = new_class->NumInstanceFields(); i < count; i++) { if (fields[i].GetDeclaringClass() == temp_class) { fields[i].SetDeclaringClass(new_class); @@ -4038,31 +4009,56 @@ void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, } fields = new_class->GetSFields(); - DCHECK_EQ(temp_class->NumStaticFields(), new_class->NumStaticFields()); + DCHECK_EQ(temp_class->NumStaticFields(), 0u); for (size_t i = 0, count = new_class->NumStaticFields(); i < count; i++) { if (fields[i].GetDeclaringClass() == temp_class) { fields[i].SetDeclaringClass(new_class); } } - DCHECK_EQ(temp_class->NumDirectMethods(), new_class->NumDirectMethods()); + DCHECK_EQ(temp_class->NumDirectMethods(), 0u); for (auto& method : new_class->GetDirectMethods(image_pointer_size_)) { if (method.GetDeclaringClass() == temp_class) { method.SetDeclaringClass(new_class); } } - DCHECK_EQ(temp_class->NumVirtualMethods(), new_class->NumVirtualMethods()); + DCHECK_EQ(temp_class->NumVirtualMethods(), 0u); for (auto& method : new_class->GetVirtualMethods(image_pointer_size_)) { if (method.GetDeclaringClass() == temp_class) { method.SetDeclaringClass(new_class); } } + + // Make sure the remembered set and mod-union tables know that we updated some of the native + // roots. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class); +} + +ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) { + auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader)); + if (it != classes_.end()) { + return it->second; + } + // Class table for loader not found, add it to the table. + auto* const class_table = new ClassTable; + classes_.Put(GcRoot<mirror::ClassLoader>(class_loader), class_table); + return class_table; +} + +ClassTable* ClassLinker::ClassTableForClassLoader(mirror::ClassLoader* class_loader) { + auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader)); + if (it != classes_.end()) { + return it->second; + } + return nullptr; } -bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass, +bool ClassLinker::LinkClass(Thread* self, + const char* descriptor, + Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - MutableHandle<mirror::Class>* h_new_class_out) { + out<MutableHandle<mirror::Class>> h_new_class_out) { CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus()); if (!LinkSuperClass(klass)) { @@ -4070,14 +4066,14 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: } ArtMethod* imt[mirror::Class::kImtSize]; std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod()); - if (!LinkMethods(self, klass, interfaces, imt)) { + if (!LinkMethods(self, klass, interfaces, outof(imt))) { return false; } if (!LinkInstanceFields(self, klass)) { return false; } size_t class_size; - if (!LinkStaticFields(self, klass, &class_size)) { + if (!LinkStaticFields(self, klass, outof(class_size))) { return false; } CreateReferenceInstanceOffsets(klass); @@ -4101,6 +4097,14 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: // Retire the temporary class and create the correctly sized resolved class. StackHandleScope<1> hs(self); auto h_new_class = hs.NewHandle(klass->CopyOf(self, class_size, imt, image_pointer_size_)); + // Set array lengths to 0 since we don't want the GC to visit two different classes with the + // same ArtFields with the same If this occurs, it causes bugs in remembered sets since the GC + // may not see any references to the from space and clean the card. Though there was references + // to the from space that got marked by the first class. + klass->SetNumDirectMethods(0); + klass->SetNumVirtualMethods(0); + klass->SetNumStaticFields(0); + klass->SetNumInstanceFields(0); if (UNLIKELY(h_new_class.Get() == nullptr)) { self->AssertPendingOOMException(); mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self); @@ -4110,9 +4114,26 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: CHECK_EQ(h_new_class->GetClassSize(), class_size); ObjectLock<mirror::Class> lock(self, h_new_class); FixupTemporaryDeclaringClass(klass.Get(), h_new_class.Get()); - mirror::Class* existing = UpdateClass(descriptor, h_new_class.Get(), - ComputeModifiedUtf8Hash(descriptor)); - CHECK(existing == nullptr || existing == klass.Get()); + + { + WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); + mirror::ClassLoader* const class_loader = h_new_class.Get()->GetClassLoader(); + ClassTable* const table = InsertClassTableForClassLoader(class_loader); + mirror::Class* existing = table->UpdateClass(descriptor, h_new_class.Get(), + ComputeModifiedUtf8Hash(descriptor)); + CHECK_EQ(existing, klass.Get()); + if (kIsDebugBuild && class_loader == nullptr && dex_cache_image_class_lookup_required_) { + // Check a class loaded with the system class loader matches one in the image if the class + // is in the image. + mirror::Class* const image_class = LookupClassFromImage(descriptor); + if (image_class != nullptr) { + CHECK_EQ(klass.Get(), existing) << descriptor; + } + } + if (log_new_class_table_roots_) { + new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get())); + } + } // This will notify waiters on temp class that saw the not yet resolved class in the // class_table_ during EnsureResolved. @@ -4128,30 +4149,31 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: return true; } -static void CountMethodsAndFields(ClassDataItemIterator& dex_data, - size_t* virtual_methods, - size_t* direct_methods, - size_t* static_fields, - size_t* instance_fields) { +static void CountMethodsAndFields(ClassDataItemIterator* dex_data, + out<size_t> virtual_methods, + out<size_t> direct_methods, + out<size_t> static_fields, + out<size_t> instance_fields) { + DCHECK(dex_data != nullptr); *virtual_methods = *direct_methods = *static_fields = *instance_fields = 0; - while (dex_data.HasNextStaticField()) { - dex_data.Next(); + while (dex_data->HasNextStaticField()) { + dex_data->Next(); (*static_fields)++; } - while (dex_data.HasNextInstanceField()) { - dex_data.Next(); + while (dex_data->HasNextInstanceField()) { + dex_data->Next(); (*instance_fields)++; } - while (dex_data.HasNextDirectMethod()) { + while (dex_data->HasNextDirectMethod()) { (*direct_methods)++; - dex_data.Next(); + dex_data->Next(); } - while (dex_data.HasNextVirtualMethod()) { + while (dex_data->HasNextVirtualMethod()) { (*virtual_methods)++; - dex_data.Next(); + dex_data->Next(); } - DCHECK(!dex_data.HasNext()); + DCHECK(!dex_data->HasNext()); } static void DumpClass(std::ostream& os, @@ -4185,8 +4207,10 @@ static void DumpClass(std::ostream& os, } } -static std::string DumpClasses(const DexFile& dex_file1, const DexFile::ClassDef& dex_class_def1, - const DexFile& dex_file2, const DexFile::ClassDef& dex_class_def2) { +static std::string DumpClasses(const DexFile& dex_file1, + const DexFile::ClassDef& dex_class_def1, + const DexFile& dex_file2, + const DexFile::ClassDef& dex_class_def2) { std::ostringstream os; DumpClass(os, dex_file1, dex_class_def1, " (Compile time)"); DumpClass(os, dex_file2, dex_class_def2, " (Runtime)"); @@ -4196,20 +4220,28 @@ static std::string DumpClasses(const DexFile& dex_file1, const DexFile::ClassDef // Very simple structural check on whether the classes match. Only compares the number of // methods and fields. -static bool SimpleStructuralCheck(const DexFile& dex_file1, const DexFile::ClassDef& dex_class_def1, - const DexFile& dex_file2, const DexFile::ClassDef& dex_class_def2, +static bool SimpleStructuralCheck(const DexFile& dex_file1, + const DexFile::ClassDef& dex_class_def1, + const DexFile& dex_file2, + const DexFile::ClassDef& dex_class_def2, std::string* error_msg) { ClassDataItemIterator dex_data1(dex_file1, dex_file1.GetClassData(dex_class_def1)); ClassDataItemIterator dex_data2(dex_file2, dex_file2.GetClassData(dex_class_def2)); // Counters for current dex file. size_t dex_virtual_methods1, dex_direct_methods1, dex_static_fields1, dex_instance_fields1; - CountMethodsAndFields(dex_data1, &dex_virtual_methods1, &dex_direct_methods1, &dex_static_fields1, - &dex_instance_fields1); + CountMethodsAndFields(&dex_data1, + outof(dex_virtual_methods1), + outof(dex_direct_methods1), + outof(dex_static_fields1), + outof(dex_instance_fields1)); // Counters for compile-time dex file. size_t dex_virtual_methods2, dex_direct_methods2, dex_static_fields2, dex_instance_fields2; - CountMethodsAndFields(dex_data2, &dex_virtual_methods2, &dex_direct_methods2, &dex_static_fields2, - &dex_instance_fields2); + CountMethodsAndFields(&dex_data2, + outof(dex_virtual_methods2), + outof(dex_direct_methods2), + outof(dex_static_fields2), + outof(dex_instance_fields2)); if (dex_virtual_methods1 != dex_virtual_methods2) { std::string class_dump = DumpClasses(dex_file1, dex_class_def1, dex_file2, dex_class_def2); @@ -4245,7 +4277,7 @@ static bool CheckSuperClassChange(Handle<mirror::Class> klass, const DexFile& dex_file, const DexFile::ClassDef& class_def, mirror::Class* super_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Check for unexpected changes in the superclass. // Quick check 1) is the super_class class-loader the boot class loader? This always has // precedence. @@ -4412,9 +4444,10 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) { } // Populate the class vtable and itable. Compute return type indices. -bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, +bool ClassLinker::LinkMethods(Thread* self, + Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - ArtMethod** out_imt) { + out<ArtMethod* [mirror::Class::kImtSize]> out_imt) { self->AllowThreadSuspension(); if (klass->IsInterface()) { // No vtable. @@ -4429,7 +4462,10 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, } else if (!LinkVirtualMethods(self, klass)) { // Link virtual methods first. return false; } - return LinkInterfaceMethods(self, klass, interfaces, out_imt); // Link interface method last. + return LinkInterfaceMethods(self, + klass, + interfaces, + outof_forward(out_imt)); // Link interface method last. } // Comparator for name and signature of a method, used in finding overriding methods. Implementation @@ -4438,7 +4474,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, class MethodNameAndSignatureComparator FINAL : public ValueObject { public: explicit MethodNameAndSignatureComparator(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + SHARED_REQUIRES(Locks::mutator_lock_) : dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())), name_(nullptr), name_len_(0) { DCHECK(!method->IsProxyMethod()) << PrettyMethod(method); @@ -4452,7 +4488,7 @@ class MethodNameAndSignatureComparator FINAL : public ValueObject { } bool HasSameNameAndSignature(ArtMethod* other) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!other->IsProxyMethod()) << PrettyMethod(other); const DexFile* other_dex_file = other->GetDexFile(); const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex()); @@ -4482,13 +4518,15 @@ class MethodNameAndSignatureComparator FINAL : public ValueObject { class LinkVirtualHashTable { public: - LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table, + LinkVirtualHashTable(Handle<mirror::Class> klass, + size_t hash_size, + uint32_t* hash_table, size_t image_pointer_size) : klass_(klass), hash_size_(hash_size), hash_table_(hash_table), image_pointer_size_(image_pointer_size) { std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_); } - void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Add(uint32_t virtual_method_index) SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking( virtual_method_index, image_pointer_size_); const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName(); @@ -4503,7 +4541,7 @@ class LinkVirtualHashTable { hash_table_[index] = virtual_method_index; } uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const char* name = comparator->GetName(); uint32_t hash = ComputeModifiedUtf8Hash(name); size_t index = hash % hash_size_; @@ -4686,9 +4724,12 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) return true; } -bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass, +bool ClassLinker::LinkInterfaceMethods(Thread* self, + Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - ArtMethod** out_imt) { + out<ArtMethod* [mirror::Class::kImtSize]> out_imt_array) { + auto& out_imt = *out_imt_array; + StackHandleScope<3> hs(self); Runtime* const runtime = Runtime::Current(); const bool has_superclass = klass->HasSuperClass(); @@ -4876,7 +4917,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass } } - auto* old_cause = self->StartAssertNoThreadSuspension( + const char* old_cause = self->StartAssertNoThreadSuspension( "Copying ArtMethods for LinkInterfaceMethods"); for (size_t i = 0; i < ifcount; ++i) { size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods(); @@ -4987,6 +5028,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass self, old_virtuals, old_method_count * method_size, new_method_count * method_size)); if (UNLIKELY(virtuals == nullptr)) { self->AssertPendingOOMException(); + self->EndAssertNoThreadSuspension(old_cause); return false; } ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter()); @@ -5096,16 +5138,20 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass bool ClassLinker::LinkInstanceFields(Thread* self, Handle<mirror::Class> klass) { CHECK(klass.Get() != nullptr); - return LinkFields(self, klass, false, nullptr); + size_t class_size_dont_care; + UNUSED(class_size_dont_care); // This doesn't get set for instance fields. + return LinkFields(self, klass, false, outof(class_size_dont_care)); } -bool ClassLinker::LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size) { +bool ClassLinker::LinkStaticFields(Thread* self, + Handle<mirror::Class> klass, + out<size_t> class_size) { CHECK(klass.Get() != nullptr); - return LinkFields(self, klass, true, class_size); + return LinkFields(self, klass, true, outof_forward(class_size)); } struct LinkFieldsComparator { - explicit LinkFieldsComparator() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + explicit LinkFieldsComparator() SHARED_REQUIRES(Locks::mutator_lock_) { } // No thread safety analysis as will be called from STL. Checked lock held in constructor. bool operator()(ArtField* field1, ArtField* field2) @@ -5138,8 +5184,10 @@ struct LinkFieldsComparator { } }; -bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, - size_t* class_size) { +bool ClassLinker::LinkFields(Thread* self, + Handle<mirror::Class> klass, + bool is_static, + out<size_t> class_size) { self->AllowThreadSuspension(); const size_t num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields(); ArtField* const fields = is_static ? klass->GetSFields() : klass->GetIFields(); @@ -5310,7 +5358,8 @@ void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) { klass->SetReferenceInstanceOffsets(reference_offsets); } -mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx, +mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, + uint32_t string_idx, Handle<mirror::DexCache> dex_cache) { DCHECK(dex_cache.Get() != nullptr); mirror::String* resolved = dex_cache->GetResolvedString(string_idx); @@ -5324,7 +5373,8 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t str return string; } -mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx, +mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, + uint16_t type_idx, mirror::Class* referrer) { StackHandleScope<2> hs(Thread::Current()); Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache())); @@ -5332,7 +5382,8 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i return ResolveType(dex_file, type_idx, dex_cache, class_loader); } -mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx, +mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, + uint16_t type_idx, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader) { DCHECK(dex_cache.Get() != nullptr); @@ -5365,7 +5416,8 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i return resolved; } -ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx, +ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, + uint32_t method_idx, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, ArtMethod* referrer, InvokeType type) { @@ -5522,9 +5574,11 @@ ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_i } } -ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx, +ArtField* ClassLinker::ResolveField(const DexFile& dex_file, + uint32_t field_idx, Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, bool is_static) { + Handle<mirror::ClassLoader> class_loader, + bool is_static) { DCHECK(dex_cache.Get() != nullptr); ArtField* resolved = dex_cache->GetResolvedField(field_idx, image_pointer_size_); if (resolved != nullptr) { @@ -5563,7 +5617,8 @@ ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx, return resolved; } -ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, +ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, + uint32_t field_idx, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader) { DCHECK(dex_cache.Get() != nullptr); @@ -5593,7 +5648,8 @@ ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, uint32_t field_i return resolved; } -const char* ClassLinker::MethodShorty(uint32_t method_idx, ArtMethod* referrer, +const char* ClassLinker::MethodShorty(uint32_t method_idx, + ArtMethod* referrer, uint32_t* length) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); mirror::DexCache* dex_cache = declaring_class->GetDexCache(); @@ -5602,23 +5658,22 @@ const char* ClassLinker::MethodShorty(uint32_t method_idx, ArtMethod* referrer, return dex_file.GetMethodShorty(method_id, length); } -void ClassLinker::DumpAllClasses(int flags) { - if (dex_cache_image_class_lookup_required_) { - MoveImageClassesToClassTable(); - } - // TODO: at the time this was written, it wasn't safe to call PrettyField with the ClassLinker - // lock held, because it might need to resolve a field's type, which would try to take the lock. - std::vector<mirror::Class*> all_classes; - { - ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - for (GcRoot<mirror::Class>& it : class_table_) { - all_classes.push_back(it.Read()); - } - } +class DumpClassVisitor : public ClassVisitor { + public: + explicit DumpClassVisitor(int flags) : flags_(flags) {} - for (size_t i = 0; i < all_classes.size(); ++i) { - all_classes[i]->DumpClass(std::cerr, flags); + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + klass->DumpClass(LOG(ERROR), flags_); + return true; } + + private: + const int flags_; +}; + +void ClassLinker::DumpAllClasses(int flags) { + DumpClassVisitor visitor(flags); + VisitClasses(&visitor); } static OatFile::OatMethod CreateOatMethod(const void* code) { @@ -5671,8 +5726,24 @@ void ClassLinker::DumpForSigQuit(std::ostream& os) { MoveImageClassesToClassTable(); } ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); - os << "Zygote loaded classes=" << pre_zygote_class_table_.Size() << " post zygote classes=" - << class_table_.Size() << "\n"; + os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes=" + << NumNonZygoteClasses() << "\n"; +} + +size_t ClassLinker::NumZygoteClasses() const { + size_t sum = 0; + for (auto& pair : classes_) { + sum += pair.second->NumZygoteClasses(); + } + return sum; +} + +size_t ClassLinker::NumNonZygoteClasses() const { + size_t sum = 0; + for (auto& pair : classes_) { + sum += pair.second->NumNonZygoteClasses(); + } + return sum; } size_t ClassLinker::NumLoadedClasses() { @@ -5681,7 +5752,7 @@ size_t ClassLinker::NumLoadedClasses() { } ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); // Only return non zygote classes since these are the ones which apps which care about. - return class_table_.Size(); + return NumNonZygoteClasses(); } pid_t ClassLinker::GetClassesLockOwner() { @@ -5752,43 +5823,6 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) { return descriptor; } -std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root) - const { - std::string temp; - return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp)); -} - -bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, - const GcRoot<mirror::Class>& b) const { - if (a.Read()->GetClassLoader() != b.Read()->GetClassLoader()) { - return false; - } - std::string temp; - return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp)); -} - -std::size_t ClassLinker::ClassDescriptorHashEquals::operator()( - const std::pair<const char*, mirror::ClassLoader*>& element) const { - return ComputeModifiedUtf8Hash(element.first); -} - -bool ClassLinker::ClassDescriptorHashEquals::operator()( - const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) const { - if (a.Read()->GetClassLoader() != b.second) { - return false; - } - return a.Read()->DescriptorEquals(b.first); -} - -bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, - const char* descriptor) const { - return a.Read()->DescriptorEquals(descriptor); -} - -std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descriptor) const { - return ComputeModifiedUtf8Hash(descriptor); -} - bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) { if (Runtime::Current()->UseJit()) { // JIT can have direct code pointers from any method to any other method. @@ -5822,7 +5856,8 @@ bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) { } } -jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) { +jobject ClassLinker::CreatePathClassLoader(Thread* self, + const std::vector<const DexFile*>& dex_files) { // SOAAlreadyRunnable is protected, and we need something to add a global reference. // We could move the jobject to the callers, but all call-sites do this... ScopedObjectAccessUnchecked soa(self); diff --git a/runtime/class_linker.h b/runtime/class_linker.h index b60cba43d0..54f1f3dac8 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -25,6 +25,8 @@ #include "base/hash_set.h" #include "base/macros.h" #include "base/mutex.h" +#include "base/out_fwd.h" +#include "class_table.h" #include "dex_file.h" #include "gc_root.h" #include "jni.h" @@ -56,8 +58,6 @@ class Runtime; class ScopedObjectAccessAlreadyRunnable; template<size_t kNumReferences> class PACKED(4) StackHandleScope; -typedef bool (ClassVisitor)(mirror::Class* c, void* arg); - enum VisitRootFlags : uint8_t; class ClassLinker { @@ -109,16 +109,19 @@ class ClassLinker { // Initialize class linker by bootstraping from dex files. void InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Initialize class linker from one or more images. - void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void InitFromImage() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. - mirror::Class* FindClass(Thread* self, const char* descriptor, + mirror::Class* FindClass(Thread* self, + const char* descriptor, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Finds a class in the path class loader, loading it if necessary without using JNI. Hash // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the @@ -126,19 +129,24 @@ class ClassLinker { // was encountered while walking the parent chain (currently only BootClassLoader and // PathClassLoader are supported). bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa, - Thread* self, const char* descriptor, size_t hash, + Thread* self, + const char* descriptor, + size_t hash, Handle<mirror::ClassLoader> class_loader, - mirror::Class** result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + out<mirror::Class*> result) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Finds a class by its descriptor using the "system" class loader, ie by searching the // boot_class_path_. mirror::Class* FindSystemClass(Thread* self, const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Finds the array class given for the element class. - mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* FindArrayClass(Thread* self, /* in parameter */ mirror::Class** element_class) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Returns true if the class linker is initialized. bool IsInitialized() const { @@ -146,100 +154,123 @@ class ClassLinker { } // Define a new a class based on a ClassDef from a DexFile - mirror::Class* DefineClass(Thread* self, const char* descriptor, size_t hash, + mirror::Class* DefineClass(Thread* self, + const char* descriptor, + size_t hash, Handle<mirror::ClassLoader> class_loader, - const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Finds a class by its descriptor, returning null if it isn't wasn't loaded // by the given 'class_loader'. - mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash, - mirror::ClassLoader* class_loader) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* LookupClass(Thread* self, + const char* descriptor, + size_t hash, + mirror::ClassLoader* + class_loader) + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Finds all the classes with the given descriptor, regardless of ClassLoader. - void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void LookupClasses(const char* descriptor, out<std::vector<mirror::Class*>> classes) + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* FindPrimitiveClass(char type) SHARED_REQUIRES(Locks::mutator_lock_); // General class unloading is not supported, this is used to prune // unwanted classes during image writing. - bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool RemoveClass(const char* descriptor, + mirror::ClassLoader* class_loader) + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void DumpAllClasses(int flags) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); + REQUIRES(!Locks::classlinker_classes_lock_); size_t NumLoadedClasses() - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. - mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx, + mirror::String* ResolveString(const DexFile& dex_file, + uint32_t string_idx, Handle<mirror::DexCache> dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identity the // target DexCache and ClassLoader to use for resolution. - mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::Class* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* ResolveType(const DexFile& dex_file, + uint16_t type_idx, + mirror::Class* referrer) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. - mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* ResolveType(uint16_t type_idx, + ArtMethod* referrer) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); - mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* ResolveType(uint16_t type_idx, + ArtField* referrer) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a type with the given ID from the DexFile, storing the // result in DexCache. The ClassLoader is used to search for the // type, since it may be referenced from but not contained within // the given DexFile. - mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, + mirror::Class* ResolveType(const DexFile& dex_file, + uint16_t type_idx, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a method with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. What is unique is the method type argument which // is used to determine if this method is a direct, static, or // virtual method. - ArtMethod* ResolveMethod(const DexFile& dex_file, uint32_t method_idx, + ArtMethod* ResolveMethod(const DexFile& dex_file, + uint32_t method_idx, Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, ArtMethod* referrer, + Handle<mirror::ClassLoader> class_loader, + ArtMethod* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -247,45 +278,50 @@ class ClassLinker { // used to determine if we are resolving a static or non-static // field. ArtField* ResolveField(const DexFile& dex_file, - uint32_t field_idx, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t field_idx, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + bool is_static) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. No is_static argument is provided so that Java // field resolution semantics are followed. - ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, + ArtField* ResolveFieldJLS(const DexFile& dex_file, + uint32_t field_idx, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Get shorty from method index without resolution. Used to do handlerization. const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns true on success, false if there's an exception pending. // can_run_clinit=false allows the compiler to attempt to init a class, // given the restriction that no <clinit> execution is possible. - bool EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields, + bool EnsureInitialized(Thread* self, + Handle<mirror::Class> c, + bool can_init_fields, bool can_init_parents) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Initializes classes that have instances in the image but that have // <clinit> methods so they could not be initialized by the compiler. - void RunRootClinits() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void RunRootClinits() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); void RegisterDexFile(const DexFile& dex_file) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); const OatFile* RegisterOatFile(const OatFile* oat_file) - LOCKS_EXCLUDED(dex_lock_); + REQUIRES(!dex_lock_); const std::vector<const DexFile*>& GetBootClassPath() { return boot_class_path_; @@ -293,34 +329,36 @@ class ClassLinker { // Returns the first non-image oat file in the class path. const OatFile* GetPrimaryOatFile() - LOCKS_EXCLUDED(dex_lock_); + REQUIRES(!dex_lock_); - void VisitClasses(ClassVisitor* visitor, void* arg) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitClasses(ClassVisitor* visitor) + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Less efficient variant of VisitClasses that copies the class_table_ into secondary storage // so that it can visit individual classes without holding the doesn't hold the // Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code // can race with insertion and deletion of classes while the visitor is being called. - void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitClassesWithoutClassesLock(ClassVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); mirror::DexCache* FindDexCache(const DexFile& dex_file) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); bool IsDexFileRegistered(const DexFile& dex_file) - LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void FixupDexCaches(ArtMethod* resolution_method) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Finds or creates the oat file holding dex_location. Then loads and returns // all corresponding dex files (there may be more than one dex file loaded @@ -336,83 +374,101 @@ class ClassLinker { // This method should not be called with the mutator_lock_ held, because it // could end up starving GC if we need to generate or relocate any oat // files. - std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat( - const char* dex_location, const char* oat_location, - std::vector<std::string>* error_msgs) - LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_); + std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(const char* dex_location, + const char* oat_location, + out<std::vector<std::string>> + error_msgs) + REQUIRES(!dex_lock_, !Locks::mutator_lock_); // Allocate an instance of a java.lang.Object. - mirror::Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* AllocObject(Thread* self) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // TODO: replace this with multiple methods that allocate the correct managed type. template <class T> mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); + + ArtField* AllocArtFieldArray(Thread* self, size_t length); ArtMethod* AllocArtMethodArray(Thread* self, size_t length); mirror::PointerArray* AllocPointerArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ArtField* AllocArtFieldArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); void VerifyClass(Thread* self, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass, + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); + bool VerifyClassUsingOatFile(const DexFile& dex_file, + mirror::Class* klass, mirror::Class::Status& oat_file_class_status) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, ArtMethod* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name, - jobjectArray interfaces, jobject loader, jobjectArray methods, + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); + + mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, + jstring name, + jobjectArray interfaces, + jobject loader, + jobjectArray methods, jobjectArray throws) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string GetDescriptorForProxy(mirror::Class* proxy_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, + ArtMethod* proxy_method) + REQUIRES(!dex_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized const void* GetQuickOatCodeFor(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the oat code for a method from a method index. - const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, + const void* GetQuickOatCodeFor(const DexFile& dex_file, + uint16_t class_def_idx, uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get compiled code for a method, return null if no code // exists. This is unlike Get..OatCodeFor which will return a bridge // or interpreter entrypoint. const void* GetOatMethodQuickCodeFor(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, out<bool> found) + SHARED_REQUIRES(Locks::mutator_lock_); pid_t GetClassesLockOwner(); // For SignalCatcher. pid_t GetDexLockOwner(); // For SignalCatcher. - mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_REQUIRES(Locks::mutator_lock_); static const char* GetClassRootDescriptor(ClassRoot class_root); @@ -431,20 +487,20 @@ class ClassLinker { // Set the entrypoints up for method to the given code. void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Set the entrypoints up for method to the enter the interpreter. void SetEntryPointsToInterpreter(ArtMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Attempts to insert a class into a class table. Returns null if // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_REQUIRES(Locks::mutator_lock_) { mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read(); DCHECK(class_roots != nullptr); return class_roots; @@ -452,23 +508,25 @@ class ClassLinker { // Move all of the image classes into the class table for faster lookups. void MoveImageClassesToClassTable() - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring // that no more classes are ever added to the pre zygote table which makes it that the pages // always remain shared dirty instead of private dirty. void MoveClassTableToPreZygote() - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the method can be called with its direct code pointer, false otherwise. bool MayBeCalledWithDirectCodePointer(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files. // Note: the objects are not completely set up. Do not use this outside of tests and the compiler. - jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + jobject CreatePathClassLoader(Thread* self, const std::vector<const DexFile*>& dex_files) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); size_t GetImagePointerSize() const { DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_; @@ -477,48 +535,77 @@ class ClassLinker { // Used by image writer for checking. bool ClassInClassTable(mirror::Class* klass) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* CreateRuntimeMethod(); // Clear the ArrayClass cache. This is necessary when cleaning up for the image, as the cache // entries are roots, but potentially not image classes. - void DropFindArrayClassCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_); private: + class CompareClassLoaderGcRoot { + public: + bool operator()(const GcRoot<mirror::ClassLoader>& a, const GcRoot<mirror::ClassLoader>& b) + const SHARED_REQUIRES(Locks::mutator_lock_) { + return a.Read() < b.Read(); + } + }; + + typedef SafeMap<GcRoot<mirror::ClassLoader>, ClassTable*, CompareClassLoaderGcRoot> + ClassLoaderClassTable; + + void VisitClassesInternal(ClassVisitor* visitor) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns the number of zygote and image classes. + size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_); + + // Returns the number of non zygote nor image classes. + size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_); + OatFile& GetImageOatFile(gc::space::ImageSpace* space) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - void FinishInit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FinishInit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // For early bootstrapping by Init mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Alloc* convenience functions to avoid needing to pass in mirror::Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. mirror::Class* AllocClass(Thread* self, uint32_t class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); - - mirror::Class* CreateArrayClass(Thread* self, const char* descriptor, size_t hash, + mirror::Class* CreateArrayClass(Thread* self, + const char* descriptor, + size_t hash, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); void AppendToBootClassPath(Thread* self, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Precomputes size needed for Class, in the case of a non-temporary class this size must be // sufficient to hold all static fields. @@ -527,133 +614,157 @@ class ClassLinker { // Setup the classloader, class def index, type idx so that we can insert this class in the class // table. - void SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, - Handle<mirror::Class> klass, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, + void SetupClass(const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def, + Handle<mirror::Class> klass, + mirror::ClassLoader* class_loader) + SHARED_REQUIRES(Locks::mutator_lock_); + + void LoadClass(Thread* self, + const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data, - Handle<mirror::Class> klass, const OatFile::OatClass* oat_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, + SHARED_REQUIRES(Locks::mutator_lock_); + void LoadClassMembers(Thread* self, + const DexFile& dex_file, + const uint8_t* class_data, + Handle<mirror::Class> klass, + const OatFile::OatClass* oat_class) + SHARED_REQUIRES(Locks::mutator_lock_); + + void LoadField(const ClassDataItemIterator& it, + Handle<mirror::Class> klass, ArtField* dst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it, - Handle<mirror::Class> klass, ArtMethod* dst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void LoadMethod(Thread* self, + const DexFile& dex_file, + const ClassDataItemIterator& it, + Handle<mirror::Class> klass, + ArtMethod* dst) + SHARED_REQUIRES(Locks::mutator_lock_); - void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupStaticTrampolines(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); // Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on // error and sets found to false. - OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, out<bool> found) + SHARED_REQUIRES(Locks::mutator_lock_); void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) - EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); bool IsDexFileRegisteredLocked(const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(dex_lock_, Locks::mutator_lock_); - bool InitializeClass(Thread* self, Handle<mirror::Class> klass, bool can_run_clinit, + bool InitializeClass(Thread* self, + Handle<mirror::Class> klass, + bool can_run_clinit, bool can_init_parents) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self, - ObjectLock<mirror::Class>& lock); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); + bool WaitForInitializeClass(Handle<mirror::Class> klass, + Thread* self, + ObjectLock<mirror::Class>* lock); bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor, + bool IsSameDescriptorInDifferentClassContexts(Thread* self, + const char* descriptor, Handle<mirror::ClassLoader> class_loader1, Handle<mirror::ClassLoader> class_loader2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, ArtMethod* method, - mirror::Class* klass1, mirror::Class* klass2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, + ArtMethod* method, + mirror::Class* klass1, + mirror::Class* klass2) + SHARED_REQUIRES(Locks::mutator_lock_); - bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass, + bool LinkClass(Thread* self, + const char* descriptor, + Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - MutableHandle<mirror::Class>* h_new_class_out) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + out<MutableHandle<mirror::Class>> h_new_class_out) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::classlinker_classes_lock_); bool LinkSuperClass(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); - bool LinkMethods(Thread* self, Handle<mirror::Class> klass, + bool LinkMethods(Thread* self, + Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - ArtMethod** out_imt) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + out<ArtMethod* [mirror::Class::kImtSize]> out_imt) + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass, + bool LinkInterfaceMethods(Thread* self, + Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - ArtMethod** out_imt) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + out<ArtMethod* [mirror::Class::kImtSize]> out_imt) + SHARED_REQUIRES(Locks::mutator_lock_); - bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool LinkStaticFields(Thread* self, + Handle<mirror::Class> klass, + out<size_t> class_size) + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkInstanceFields(Thread* self, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class, + SHARED_REQUIRES(Locks::mutator_lock_); + bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, out<size_t> class_size) + SHARED_REQUIRES(Locks::mutator_lock_); + void LinkCode(ArtMethod* method, + const OatFile::OatClass* oat_class, uint32_t class_def_method_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckProxyConstructor(ArtMethod* constructor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots ReaderWriterMutex* DexLock() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) { return &dex_lock_; } - size_t GetDexCacheCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_) { + size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) { return dex_caches_.size(); } - mirror::DexCache* GetDexCache(size_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_); + mirror::DexCache* GetDexCache(size_t idx) SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_); const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location) - LOCKS_EXCLUDED(dex_lock_); + REQUIRES(!dex_lock_); // Returns the boot image oat file. - const OatFile* GetBootOatFile() SHARED_LOCKS_REQUIRED(dex_lock_); + const OatFile* GetBootOatFile() SHARED_REQUIRES(dex_lock_); void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the // class access flags to determine whether this has been done before. void EnsurePreverifiedMethods(Handle<mirror::Class> c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - mirror::Class* LookupClassFromTableLocked(const char* descriptor, - mirror::ClassLoader* class_loader, - size_t hash) - SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_); - - mirror::Class* UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* LookupClassFromImage(const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns null if not found. + ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader) + SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_); + // Insert a new class table if not found. + ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::classlinker_classes_lock_); // EnsureResolved is called to make sure that a class in the class_table_ has been resolved // before returning it to the caller. Its the responsibility of the thread that placed the class @@ -662,13 +773,13 @@ class ClassLinker { // retire a class, the version of the class in the table is returned and this may differ from // the class passed in. mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass) - WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + WARN_UNUSED SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetClassRoot(ClassRoot class_root, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return the quick generic JNI stub for testing. const void* GetRuntimeQuickGenericJniStub() const; @@ -677,20 +788,23 @@ class ClassLinker { // class. // Note: Currently we only store the descriptor, so we cannot throw the exact throwable, only // a recreation with a custom string. - void ThrowEarlierClassFailure(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ThrowEarlierClassFailure(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Check for duplicate class definitions of the given oat file against all open oat files. - bool HasCollisions(const OatFile* oat_file, std::string* error_msg) LOCKS_EXCLUDED(dex_lock_); + bool HasCollisions(const OatFile* oat_file, out<std::string> error_msg) REQUIRES(!dex_lock_); - bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool HasInitWithString(Thread* self, const char* descriptor) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods, + void UpdateClassVirtualMethods(mirror::Class* klass, + ArtMethod* new_methods, size_t new_num_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::classlinker_classes_lock_); std::vector<const DexFile*> boot_class_path_; std::vector<std::unique_ptr<const DexFile>> opened_dex_files_; @@ -700,43 +814,11 @@ class ClassLinker { std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_); std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_); - class ClassDescriptorHashEquals { - public: - // Same class loader and descriptor. - std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS; - bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const - NO_THREAD_SAFETY_ANALYSIS; - // Same class loader and descriptor. - std::size_t operator()(const std::pair<const char*, mirror::ClassLoader*>& element) const - NO_THREAD_SAFETY_ANALYSIS; - bool operator()(const GcRoot<mirror::Class>& a, - const std::pair<const char*, mirror::ClassLoader*>& b) const - NO_THREAD_SAFETY_ANALYSIS; - // Same descriptor. - bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const - NO_THREAD_SAFETY_ANALYSIS; - std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS; - }; - class GcRootEmptyFn { - public: - void MakeEmpty(GcRoot<mirror::Class>& item) const { - item = GcRoot<mirror::Class>(); - } - bool IsEmpty(const GcRoot<mirror::Class>& item) const { - return item.IsNull(); - } - }; + // This contains strong roots. To enable concurrent root scanning of the class table. + ClassLoaderClassTable classes_ GUARDED_BY(Locks::classlinker_classes_lock_); - // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results - // should be compared for a matching Class descriptor and class loader. - typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals, - ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>> - Table; - // This contains strong roots. To enable concurrent root scanning of - // the class table, be careful to use a read barrier when accessing this. - Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_); - Table pre_zygote_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_); - std::vector<GcRoot<mirror::Class>> new_class_roots_; + // New class roots, only used by CMS since the GC needs to mark these in the pause. + std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_); // Do we need to search dex caches to find image classes? bool dex_cache_image_class_lookup_required_; diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index a4e0227a6b..3f8259ab6c 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -46,7 +46,7 @@ namespace art { class ClassLinkerTest : public CommonRuntimeTest { protected: void AssertNonExistentClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr); EXPECT_TRUE(self->IsExceptionPending()); @@ -58,13 +58,13 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertPrimitiveClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(self, descriptor.c_str())); } void AssertPrimitiveClass(const std::string& descriptor, mirror::Class* primitive) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(primitive != nullptr); ASSERT_TRUE(primitive->GetClass() != nullptr); ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass()); @@ -102,7 +102,7 @@ class ClassLinkerTest : public CommonRuntimeTest { void AssertArrayClass(const std::string& array_descriptor, const std::string& component_type, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); StackHandleScope<2> hs(self); Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader)); @@ -116,7 +116,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(array.Get() != nullptr); ASSERT_TRUE(array->GetClass() != nullptr); ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass()); @@ -159,7 +159,7 @@ class ClassLinkerTest : public CommonRuntimeTest { EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get()); } - void AssertMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void AssertMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { EXPECT_TRUE(method != nullptr); EXPECT_TRUE(method->GetDeclaringClass() != nullptr); EXPECT_TRUE(method->GetName() != nullptr); @@ -174,7 +174,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertField(mirror::Class* klass, ArtField* field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { EXPECT_TRUE(field != nullptr); EXPECT_EQ(klass, field->GetDeclaringClass()); EXPECT_TRUE(field->GetName() != nullptr); @@ -182,7 +182,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string temp; EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor(&temp)); if (descriptor == "Ljava/lang/Object;") { @@ -319,7 +319,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertDexFileClass(mirror::ClassLoader* class_loader, const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != nullptr); Thread* self = Thread::Current(); StackHandleScope<1> hs(self); @@ -339,7 +339,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertDexFile(const DexFile& dex, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Verify all the classes defined in this file for (size_t i = 0; i < dex.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex.GetClassDef(i); @@ -385,7 +385,7 @@ struct CheckOffsets { std::string class_descriptor; std::vector<CheckOffset> offsets; - bool Check() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Check() SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); mirror::Class* klass = Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str()); @@ -1107,7 +1107,7 @@ TEST_F(ClassLinkerTest, ValidatePredefinedClassSizes) { } static void CheckMethod(ArtMethod* method, bool verified) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!method->IsNative() && !method->IsAbstract()) { EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified) << PrettyMethod(method, true); @@ -1115,7 +1115,7 @@ static void CheckMethod(ArtMethod* method, bool verified) } static void CheckPreverified(mirror::Class* c, bool preverified) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified) << "Class " << PrettyClass(c) << " not as expected"; for (auto& m : c->GetDirectMethods(sizeof(void*))) { diff --git a/runtime/class_table.cc b/runtime/class_table.cc new file mode 100644 index 0000000000..c245d4e780 --- /dev/null +++ b/runtime/class_table.cc @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "class_table.h" + +#include "mirror/class-inl.h" + +namespace art { + +ClassTable::ClassTable() { + classes_.push_back(ClassSet()); +} + +void ClassTable::FreezeSnapshot() { + classes_.push_back(ClassSet()); +} + +bool ClassTable::Contains(mirror::Class* klass) { + for (ClassSet& class_set : classes_) { + auto it = class_set.Find(GcRoot<mirror::Class>(klass)); + if (it != class_set.end()) { + return it->Read() == klass; + } + } + return false; +} + +mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) { + // Should only be updating latest table. + auto existing_it = classes_.back().FindWithHash(descriptor, hash); + if (kIsDebugBuild && existing_it == classes_.back().end()) { + for (const ClassSet& class_set : classes_) { + if (class_set.FindWithHash(descriptor, hash) != class_set.end()) { + LOG(FATAL) << "Updating class found in frozen table " << descriptor; + } + } + LOG(FATAL) << "Updating class not found " << descriptor; + } + mirror::Class* const existing = existing_it->Read(); + CHECK_NE(existing, klass) << descriptor; + CHECK(!existing->IsResolved()) << descriptor; + CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor; + CHECK(!klass->IsTemp()) << descriptor; + VerifyObject(klass); + // Update the element in the hash set with the new class. This is safe to do since the descriptor + // doesn't change. + *existing_it = GcRoot<mirror::Class>(klass); + return existing; +} + +void ClassTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags ATTRIBUTE_UNUSED) { + BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( + visitor, RootInfo(kRootStickyClass)); + for (ClassSet& class_set : classes_) { + for (GcRoot<mirror::Class>& root : class_set) { + buffered_visitor.VisitRoot(root); + } + } +} + +bool ClassTable::Visit(ClassVisitor* visitor) { + for (ClassSet& class_set : classes_) { + for (GcRoot<mirror::Class>& root : class_set) { + if (!visitor->Visit(root.Read())) { + return false; + } + } + } + return true; +} + +size_t ClassTable::NumZygoteClasses() const { + size_t sum = 0; + for (size_t i = 0; i < classes_.size() - 1; ++i) { + sum += classes_[i].Size(); + } + return sum; +} + +size_t ClassTable::NumNonZygoteClasses() const { + return classes_.back().Size(); +} + +mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) { + for (ClassSet& class_set : classes_) { + auto it = class_set.FindWithHash(descriptor, hash); + if (it != class_set.end()) { + return it->Read(); + } + } + return nullptr; +} + +void ClassTable::Insert(mirror::Class* klass) { + classes_.back().Insert(GcRoot<mirror::Class>(klass)); +} + +void ClassTable::InsertWithHash(mirror::Class* klass, size_t hash) { + classes_.back().InsertWithHash(GcRoot<mirror::Class>(klass), hash); +} + +bool ClassTable::Remove(const char* descriptor) { + for (ClassSet& class_set : classes_) { + auto it = class_set.Find(descriptor); + if (it != class_set.end()) { + class_set.Erase(it); + return true; + } + } + return false; +} + +std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root) + const { + std::string temp; + return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp)); +} + +bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, + const GcRoot<mirror::Class>& b) const { + DCHECK_EQ(a.Read()->GetClassLoader(), b.Read()->GetClassLoader()); + std::string temp; + return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp)); +} + +bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, + const char* descriptor) const { + return a.Read()->DescriptorEquals(descriptor); +} + +std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const char* descriptor) const { + return ComputeModifiedUtf8Hash(descriptor); +} + +} // namespace art diff --git a/runtime/class_table.h b/runtime/class_table.h new file mode 100644 index 0000000000..252a47dd25 --- /dev/null +++ b/runtime/class_table.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_CLASS_TABLE_H_ +#define ART_RUNTIME_CLASS_TABLE_H_ + +#include <string> +#include <utility> +#include <vector> + +#include "base/allocator.h" +#include "base/hash_set.h" +#include "base/macros.h" +#include "base/mutex.h" +#include "dex_file.h" +#include "gc_root.h" +#include "object_callbacks.h" +#include "runtime.h" + +namespace art { + +namespace mirror { + class ClassLoader; +} // namespace mirror + +class ClassVisitor { + public: + virtual ~ClassVisitor() {} + // Return true to continue visiting. + virtual bool Visit(mirror::Class* klass) = 0; +}; + +// Each loader has a ClassTable +class ClassTable { + public: + ClassTable(); + + // Used by image writer for checking. + bool Contains(mirror::Class* klass) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Freeze the current class tables by allocating a new table and never updating or modifying the + // existing table. This helps prevents dirty pages after caused by inserting after zygote fork. + void FreezeSnapshot() + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns the number of classes in previous snapshots. + size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_); + + // Returns all off the classes in the lastest snapshot. + size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_); + + // Update a class in the table with the new class. Returns the existing class which was replaced. + mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Return false if the callback told us to exit. + bool Visit(ClassVisitor* visitor) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + mirror::Class* Lookup(const char* descriptor, size_t hash) + SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_); + + void Insert(mirror::Class* klass) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + void InsertWithHash(mirror::Class* klass, size_t hash) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns true if the class was found and removed, false otherwise. + bool Remove(const char* descriptor) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + private: + class ClassDescriptorHashEquals { + public: + // Same class loader and descriptor. + std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS; + bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const + NO_THREAD_SAFETY_ANALYSIS;; + // Same descriptor. + bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const + NO_THREAD_SAFETY_ANALYSIS; + std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS; + }; + class GcRootEmptyFn { + public: + void MakeEmpty(GcRoot<mirror::Class>& item) const { + item = GcRoot<mirror::Class>(); + } + bool IsEmpty(const GcRoot<mirror::Class>& item) const { + return item.IsNull(); + } + }; + // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results + // should be compared for a matching Class descriptor and class loader. + typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals, + ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>> + ClassSet; + + // TODO: shard lock to have one per class loader. + std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_); +}; + +} // namespace art + +#endif // ART_RUNTIME_CLASS_TABLE_H_ diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h index 2332f97895..a474ae6361 100644 --- a/runtime/common_runtime_test.h +++ b/runtime/common_runtime_test.h @@ -122,12 +122,12 @@ class CommonRuntimeTest : public testing::Test { std::string GetTestDexFileName(const char* name); std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::unique_ptr<const DexFile> OpenTestDexFile(const char* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + jobject LoadDex(const char* dex_name) SHARED_REQUIRES(Locks::mutator_lock_); std::string android_data_; std::string dalvik_cache_; diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 3acd366cd2..de692d1368 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -34,7 +34,7 @@ namespace art { static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (referrer != nullptr) { std::string location(referrer->GetLocation()); if (!location.empty()) { @@ -46,7 +46,7 @@ static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer) static void ThrowException(const char* exception_descriptor, mirror::Class* referrer, const char* fmt, va_list* args = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream msg; if (args != nullptr) { std::string vmsg; @@ -62,7 +62,7 @@ static void ThrowException(const char* exception_descriptor, static void ThrowWrappedException(const char* exception_descriptor, mirror::Class* referrer, const char* fmt, va_list* args = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream msg; if (args != nullptr) { std::string vmsg; @@ -336,7 +336,7 @@ void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) { static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx, const DexFile& dex_file, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream msg; msg << "Attempt to invoke " << type << " method '" << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference"; diff --git a/runtime/common_throws.h b/runtime/common_throws.h index b391c5b92e..2402e6f7a0 100644 --- a/runtime/common_throws.h +++ b/runtime/common_throws.h @@ -33,169 +33,169 @@ class StringPiece; // AbstractMethodError void ThrowAbstractMethodError(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ArithmeticException -void ThrowArithmeticExceptionDivideByZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; +void ThrowArithmeticExceptionDivideByZero() SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ArrayIndexOutOfBoundsException void ThrowArrayIndexOutOfBoundsException(int index, int length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ArrayStoreException void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ClassCircularityError void ThrowClassCircularityError(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ClassCastException void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowClassCastException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ClassFormatError void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IllegalAccessError void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed, ArtMethod* called, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IllegalAccessException void ThrowIllegalAccessException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IllegalArgumentException void ThrowIllegalArgumentException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, ArtMethod* method, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method, mirror::Object* this_object, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IOException void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // LinkageError void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // NegativeArraySizeException void ThrowNegativeArraySizeException(int size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNegativeArraySizeException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // NoSuchFieldError void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, const StringPiece& type, const StringPiece& name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // NoSuchMethodError void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name, const Signature& signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNoSuchMethodError(uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // NullPointerException void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerExceptionFromDexPC() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // RuntimeException void ThrowRuntimeException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // VerifyError void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; } // namespace art diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h index b296e39c5e..af7b04f62e 100644 --- a/runtime/compiler_callbacks.h +++ b/runtime/compiler_callbacks.h @@ -38,7 +38,7 @@ class CompilerCallbacks { virtual ~CompilerCallbacks() { } virtual bool MethodVerified(verifier::MethodVerifier* verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual void ClassRejected(ClassReference ref) = 0; // Return true if we should attempt to relocate to a random base address if we have not already diff --git a/runtime/debugger.cc b/runtime/debugger.cc index eccebf1fc4..1865516939 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -24,6 +24,7 @@ #include "art_field-inl.h" #include "art_method-inl.h" #include "base/time_utils.h" +#include "base/out.h" #include "class_linker.h" #include "class_linker-inl.h" #include "dex_file-inl.h" @@ -72,7 +73,7 @@ class Breakpoint { public: Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) { CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing || deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization || @@ -81,14 +82,14 @@ class Breakpoint { method_ = soa.EncodeMethod(method); } - Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_) : method_(nullptr), dex_pc_(other.dex_pc_), deoptimization_kind_(other.deoptimization_kind_) { ScopedObjectAccessUnchecked soa(Thread::Current()); method_ = soa.EncodeMethod(other.Method()); } - ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) { ScopedObjectAccessUnchecked soa(Thread::Current()); return soa.DecodeMethod(method_); } @@ -111,7 +112,7 @@ class Breakpoint { }; static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc()); return os; } @@ -123,7 +124,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (method->IsNative()) { // TODO: post location events is a suspension point and native method entry stubs aren't. return; @@ -149,7 +150,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (method->IsNative()) { // TODO: post location events is a suspension point and native method entry stubs aren't. return; @@ -166,7 +167,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method, uint32_t dex_pc) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method) << " " << dex_pc; @@ -174,7 +175,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) { // We also listen to kMethodExited instrumentation event and the current instruction is a // RETURN so we know the MethodExited method is going to be called right after us. Like in @@ -195,47 +196,47 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field); } void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value); } void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::PostException(exception_object); } // We only care about how many backward branches were executed in the Jit. void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method) << " " << dex_pc_offset; } private: static bool IsReturn(ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = method->GetCodeItem(); const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]); return instruction->IsReturn(); } - static bool IsListeningToDexPcMoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) { return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved); } - static bool IsListeningToMethodExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) { return IsListeningTo(instrumentation::Instrumentation::kMethodExited); } static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return (Dbg::GetInstrumentationEvents() & event) != 0; } @@ -298,8 +299,8 @@ bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const { } static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(!Locks::breakpoint_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) { @@ -311,7 +312,7 @@ static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc) } static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) { + REQUIRES(!Locks::thread_suspend_count_lock_) { MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); // A thread may be suspended for GC; in this code, we really want to know whether // there's a debugger suspension active. @@ -319,7 +320,7 @@ static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thr } static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error); if (o == nullptr) { *error = JDWP::ERR_INVALID_OBJECT; @@ -334,7 +335,7 @@ static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* er } static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error); if (o == nullptr) { *error = JDWP::ERR_INVALID_OBJECT; @@ -350,8 +351,8 @@ static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error) static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) { mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error); if (thread_peer == nullptr) { // This isn't even an object. @@ -381,14 +382,14 @@ static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { } static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string temp; const char* descriptor = klass->GetDescriptor(&temp); return BasicTagFromDescriptor(descriptor); } static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(c != nullptr); if (c->IsArrayClass()) { return JDWP::JT_ARRAY; @@ -764,7 +765,7 @@ JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, OwnedMonitorVisitor(Thread* thread, Context* context, std::vector<JDWP::ObjectId>* monitor_vector, std::vector<uint32_t>* stack_depth_vector) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), current_stack_depth(0), monitors(monitor_vector), @@ -781,7 +782,7 @@ JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, } static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg); visitor->monitors->push_back(gRegistry->Add(owned_monitor)); visitor->stack_depths->push_back(visitor->current_stack_depth); @@ -948,33 +949,27 @@ JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* return JDWP::ERR_NONE; } -void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) { - // Get the complete list of reference classes (i.e. all classes except - // the primitive types). - // Returns a newly-allocated buffer full of RefTypeId values. - struct ClassListCreator { - explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) { - } +// Get the complete list of reference classes (i.e. all classes except +// the primitive types). +// Returns a newly-allocated buffer full of RefTypeId values. +class ClassListCreator : public ClassVisitor { + public: + explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {} - static bool Visit(mirror::Class* c, void* arg) { - return reinterpret_cast<ClassListCreator*>(arg)->Visit(c); - } - - // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses - // annotalysis. - bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS { - if (!c->IsPrimitive()) { - classes->push_back(gRegistry->AddRefType(c)); - } - return true; + bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (!c->IsPrimitive()) { + classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c)); } + return true; + } - std::vector<JDWP::RefTypeId>* const classes; - }; + private: + std::vector<JDWP::RefTypeId>* const classes_; +}; +void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) { ClassListCreator clc(classes); - Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit, - &clc); + Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc); } JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, @@ -1006,7 +1001,7 @@ JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* p void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) { std::vector<mirror::Class*> classes; - Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes); + Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, outof(classes)); ids->clear(); for (size_t i = 0; i < classes.size(); ++i) { ids->push_back(gRegistry->Add(classes[i])); @@ -1270,17 +1265,17 @@ JDWP::FieldId Dbg::ToFieldId(const ArtField* f) { } static JDWP::MethodId ToMethodId(const ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m)); } static ArtField* FromFieldId(JDWP::FieldId fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid)); } static ArtMethod* FromMethodId(JDWP::MethodId mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid)); } @@ -1326,10 +1321,7 @@ bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* eve return modifier_instance == event_instance; } -void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) { +void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) { if (m == nullptr) { memset(location, 0, sizeof(*location)); } else { @@ -1376,7 +1368,7 @@ static uint32_t MangleAccessFlags(uint32_t accessFlags) { * the end. */ static uint16_t MangleSlot(uint16_t slot, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { // We should not get here for a method without code (native, proxy or abstract). Log it and @@ -1398,7 +1390,7 @@ static uint16_t MangleSlot(uint16_t slot, ArtMethod* m) * slots to dex style argument placement. */ static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { // We should not get here for a method without code (native, proxy or abstract). Log it and @@ -1424,7 +1416,8 @@ static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error return DexFile::kDexNoIndex16; } -JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) { +JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, + JDWP::ExpandBuf* pReply) { JDWP::JdwpError error; mirror::Class* c = DecodeClass(class_id, &error); if (c == nullptr) { @@ -1437,7 +1430,8 @@ JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_ge expandBufAdd4BE(pReply, instance_field_count + static_field_count); for (size_t i = 0; i < instance_field_count + static_field_count; ++i) { - ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count); + ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : + c->GetStaticField(i - instance_field_count); expandBufAddFieldId(pReply, ToFieldId(f)); expandBufAddUtf8String(pReply, f->GetName()); expandBufAddUtf8String(pReply, f->GetTypeDescriptor()); @@ -1553,7 +1547,7 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, const char* name, const char* descriptor, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", @@ -1641,7 +1635,7 @@ JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) { } static JValue GetArtFieldValue(ArtField* f, mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Primitive::Type fieldType = f->GetTypeAsPrimitiveType(); JValue field_value; switch (fieldType) { @@ -1688,7 +1682,7 @@ static JValue GetArtFieldValue(ArtField* f, mirror::Object* o) static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JDWP::JdwpError error; mirror::Class* c = DecodeClass(ref_type_id, &error); if (ref_type_id != 0 && c == nullptr) { @@ -1744,7 +1738,7 @@ JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::Fiel } static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Primitive::Type fieldType = f->GetTypeAsPrimitiveType(); // Debugging only happens at runtime so we know we are not running in a transaction. static constexpr bool kNoTransactionMode = false; @@ -1815,7 +1809,7 @@ static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, int width, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JDWP::JdwpError error; mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error); if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { @@ -1945,7 +1939,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_group_id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id, error); if (*error != JDWP::ERR_NONE) { @@ -2004,7 +1998,7 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP:: static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group, std::vector<JDWP::ObjectId>* child_thread_group_ids) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(thread_group != nullptr); // Get the ArrayList<ThreadGroup> "groups" out of this thread group... @@ -2158,7 +2152,7 @@ JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) { static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group, mirror::Object* peer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Do we want threads from all thread groups? if (desired_thread_group == nullptr) { return true; @@ -2202,7 +2196,7 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* } } -static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) { struct CountStackDepthVisitor : public StackVisitor { explicit CountStackDepthVisitor(Thread* thread_in) : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), @@ -2245,7 +2239,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram public: GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in, JDWP::ExpandBuf* buf_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), depth_(0), start_frame_(start_frame_in), @@ -2254,7 +2248,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram expandBufAdd4BE(buf_, frame_count_); } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (GetMethod()->IsRuntimeMethod()) { return true; // The debugger can't do anything useful with a frame that has no Method*. } @@ -2366,7 +2360,7 @@ void Dbg::SuspendSelf() { struct GetThisVisitor : public StackVisitor { GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_object(nullptr), frame_id(frame_id_in) {} @@ -2408,7 +2402,7 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame class FindFrameVisitor FINAL : public StackVisitor { public: FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), frame_id_(frame_id), error_(JDWP::ERR_INVALID_FRAMEID) {} @@ -2482,14 +2476,14 @@ JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pRe constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION; static std::string GetStackContextAsString(const StackVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false), PrettyMethod(visitor.GetMethod()).c_str()); } static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg, JDWP::JdwpTag tag) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg << GetStackContextAsString(visitor); return kStackFrameLocalAccessError; @@ -2651,7 +2645,7 @@ JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) { template<typename T> static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg, JDWP::JdwpTag tag, T value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Failed to write " << tag << " local " << value << " (0x" << std::hex << value << ") into register v" << vreg << GetStackContextAsString(visitor); @@ -2736,7 +2730,7 @@ JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTa } static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(location != nullptr); if (m == nullptr) { memset(location, 0, sizeof(*location)); @@ -2814,7 +2808,7 @@ void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc, class CatchLocationFinder : public StackVisitor { public: CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), exception_(exception), @@ -2826,7 +2820,7 @@ class CatchLocationFinder : public StackVisitor { throw_dex_pc_(DexFile::kDexNoIndex) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = GetMethod(); DCHECK(method != nullptr); if (method->IsRuntimeMethod()) { @@ -2860,15 +2854,15 @@ class CatchLocationFinder : public StackVisitor { return true; // Continue stack walk. } - ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) { return catch_method_; } - ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) { return throw_method_; } - mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) { return this_at_throw_.Get(); } @@ -3170,7 +3164,7 @@ void Dbg::ManageDeoptimization() { } static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { // TODO We should not be asked to watch location in a native or abstract method so the code item @@ -3191,7 +3185,7 @@ static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m) } static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) { for (Breakpoint& breakpoint : gBreakpoints) { if (breakpoint.Method() == m) { return &breakpoint; @@ -3208,7 +3202,7 @@ bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) { // Sanity checks all existing breakpoints on the same method. static void SanityCheckExistingBreakpoints(ArtMethod* m, DeoptimizationRequest::Kind deoptimization_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) { for (const Breakpoint& breakpoint : gBreakpoints) { if (breakpoint.Method() == m) { CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind()); @@ -3237,7 +3231,7 @@ static void SanityCheckExistingBreakpoints(ArtMethod* m, static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self, ArtMethod* m, const Breakpoint** existing_brkpt) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!Dbg::RequiresDeoptimization()) { // We already run in interpreter-only mode so we don't need to deoptimize anything. VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method " @@ -3498,8 +3492,8 @@ bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) { class ScopedThreadSuspension { public: ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + REQUIRES(!Locks::thread_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : thread_(nullptr), error_(JDWP::ERR_NONE), self_suspend_(false), @@ -3560,7 +3554,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently // is for step-out. struct SingleStepStackVisitor : public StackVisitor { - explicit SingleStepStackVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), stack_depth(0), method(nullptr), @@ -4419,7 +4413,7 @@ class HeapChunkContext { needHeader_ = false; } - void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Flush() SHARED_REQUIRES(Locks::mutator_lock_) { if (pieceLenField_ == nullptr) { // Flush immediately post Reset (maybe back-to-back Flush). Ignore. CHECK(needHeader_); @@ -4435,13 +4429,13 @@ class HeapChunkContext { } static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes); } static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes); } @@ -4461,7 +4455,7 @@ class HeapChunkContext { } // Returns true if the object is not an empty chunk. - bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) { // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken // in the following code not to allocate memory, by ensuring buf_ is of the correct size if (used_bytes == 0) { @@ -4498,7 +4492,7 @@ class HeapChunkContext { } void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (ProcessRecord(start, used_bytes)) { uint8_t state = ExamineNativeObject(start); AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/); @@ -4507,7 +4501,7 @@ class HeapChunkContext { } void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { if (ProcessRecord(start, used_bytes)) { // Determine the type of this chunk. // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. @@ -4519,7 +4513,7 @@ class HeapChunkContext { } void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Make sure there's enough room left in the buffer. // We need to use two bytes for every fractional 256 allocation units used by the chunk plus // 17 bytes for any header. @@ -4552,12 +4546,12 @@ class HeapChunkContext { *p_++ = length - 1; } - uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) { return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } uint8_t ExamineJavaObject(mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { if (o == nullptr) { return HPSG_STATE(SOLIDITY_FREE, 0); } @@ -4607,7 +4601,7 @@ class HeapChunkContext { }; static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment); HeapChunkContext::HeapChunkJavaCallback( obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg); @@ -4633,7 +4627,7 @@ void Dbg::DdmSendHeapSegments(bool native) { // Send a series of heap segment chunks. HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native); if (native) { -#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC) +#if defined(__ANDROID__) && defined(USE_DLMALLOC) dlmalloc_inspect_all(HeapChunkContext::HeapChunkNativeCallback, &context); HeapChunkContext::HeapChunkNativeCallback(nullptr, nullptr, 0, &context); // Indicate end of a space. #else @@ -4772,7 +4766,7 @@ class StringTable { }; static const char* GetMethodSourceFile(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(method != nullptr); const char* source_file = method->GetDeclaringClassSourceFile(); return (source_file != nullptr) ? source_file : ""; diff --git a/runtime/debugger.h b/runtime/debugger.h index fd7d46c37e..a9fa6ce8cb 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -79,7 +79,7 @@ struct DebugInvokeReq { JDWP::ExpandBuf* const reply; void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq); @@ -155,15 +155,15 @@ class DeoptimizationRequest { DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {} DeoptimizationRequest(const DeoptimizationRequest& other) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) { // Create a new JNI global reference for the method. SetMethod(other.Method()); } - ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_); - void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetMethod(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_); // Name 'Kind()' would collide with the above enum name. Kind GetKind() const { @@ -205,7 +205,7 @@ class Dbg { static void StopJdwp(); // Invoked by the GC in case we need to keep DDMS informed. - static void GcDidFinish() LOCKS_EXCLUDED(Locks::mutator_lock_); + static void GcDidFinish() REQUIRES(!Locks::mutator_lock_); // Return the DebugInvokeReq for the current thread. static DebugInvokeReq* GetInvokeReq(); @@ -219,8 +219,8 @@ class Dbg { */ static void Connected(); static void GoActive() - LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_); - static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_, !Locks::deoptimization_lock_, !Locks::mutator_lock_); + static void Disconnected() REQUIRES(!Locks::deoptimization_lock_, !Locks::mutator_lock_); static void Dispose() { gDisposed = true; } @@ -239,8 +239,7 @@ class Dbg { // Returns true if a method has any breakpoints. static bool MethodHasAnyBreakpoints(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::breakpoint_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_); static bool IsDisposed() { return gDisposed; @@ -254,248 +253,233 @@ class Dbg { static int64_t LastDebuggerActivity(); static void UndoDebuggerSuspensions() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); /* * Class, Object, Array */ static std::string GetClassName(JDWP::RefTypeId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static std::string GetClassName(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void GetClassList(std::vector<JDWP::RefTypeId>* classes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static size_t GetTagWidth(JDWP::JdwpTag tag); static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count, JDWP::Request* request) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length, JDWP::ObjectId* new_array_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // // Event filtering. // static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchLocation(const JDWP::JdwpLocation& expected_location, const JDWP::EventLocation& event_location) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id, ArtField* event_field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // // Monitors. // static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id, std::vector<JDWP::ObjectId>* monitors, std::vector<uint32_t>* stack_depths) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectId* contended_monitor) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // // Heap. // static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids, std::vector<uint64_t>* counts) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>* instances) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, std::vector<JDWP::ObjectId>* referring_objects) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // // Methods and fields. // static std::string GetMethodName(JDWP::MethodId method_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id, std::vector<uint8_t>* bytecodes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static std::string GetFieldName(JDWP::FieldId field_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Thread, ThreadGroup, Frame */ static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::ObjectId GetSystemThreadGroupId() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state); static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // static void WaitForSuspend(JDWP::ObjectId thread_id); // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0, // returns all threads. static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - static JDWP::ObjectId GetThreadSelfId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static JDWP::ObjectId GetThreadSelfId() SHARED_REQUIRES(Locks::mutator_lock_); + static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_); static void SuspendVM() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static void ResumeVM() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); static void ResumeThread(JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static void SuspendSelf(); static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, JDWP::ObjectId* result) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetLocalValues(JDWP::Request* request) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); /* * Debugger notification @@ -508,47 +492,42 @@ class Dbg { }; static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostFieldModificationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f, const JValue* field_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostException(mirror::Throwable* exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostThreadStart(Thread* t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostThreadDeath(Thread* t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostClassPrepare(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void UpdateDebugger(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc, int event_flags, const JValue* return_value) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Indicates whether we need deoptimization for debugging. static bool RequiresDeoptimization(); // Records deoptimization request in the queue. static void RequestDeoptimization(const DeoptimizationRequest& req) - LOCKS_EXCLUDED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each // request and finally resumes all threads. static void ManageDeoptimization() - LOCKS_EXCLUDED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Breakpoints. static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * Forced interpreter checkers for single-step and continue support. @@ -557,7 +536,7 @@ class Dbg { // Indicates whether we need to force the use of interpreter to invoke a method. // This allows to single-step or continue into the called method. static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -568,7 +547,7 @@ class Dbg { // method through the resolution trampoline. This allows to single-step or continue into // the called method. static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -579,7 +558,7 @@ class Dbg { // a method through the resolution trampoline. This allows to deoptimize the stack for // debugging when we returned from the called method. static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -590,7 +569,7 @@ class Dbg { // interpreter into the runtime. This allows to deoptimize the stack and continue // execution with interpreter for debugging. static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -600,10 +579,9 @@ class Dbg { // Single-stepping. static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size, JDWP::JdwpStepDepth depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void UnconfigureStep(JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * Invoke support @@ -623,9 +601,8 @@ class Dbg { JDWP::MethodId method_id, uint32_t arg_count, uint64_t arg_values[], JDWP::JdwpTag* arg_types, uint32_t options) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Called by the event thread to execute a method prepared by the JDWP thread in the given // DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply @@ -642,30 +619,29 @@ class Dbg { * DDM support. */ static void DdmSendThreadNotification(Thread* t, uint32_t type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSetThreadNotification(bool enable) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen); - static void DdmConnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void DdmDisconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmConnected() SHARED_REQUIRES(Locks::mutator_lock_); + static void DdmDisconnected() SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Allocation tracking support. */ - static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_); static jbyteArray GetRecentAllocations() - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void DumpRecentAllocations() LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + REQUIRES(!Locks::alloc_tracker_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_); enum HpifWhen { HPIF_WHEN_NEVER = 0, @@ -674,7 +650,7 @@ class Dbg { HPIF_WHEN_EVERY_GC = 3 }; static int DdmHandleHpifChunk(HpifWhen when) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); enum HpsgWhen { HPSG_WHEN_NEVER = 0, @@ -687,78 +663,76 @@ class Dbg { static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native); static void DdmSendHeapInfo(HpifWhen reason) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendHeapSegments(bool native) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static ObjectRegistry* GetObjectRegistry() { return gRegistry; } static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::FieldId ToFieldId(const ArtField* f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static JDWP::JdwpState* GetJdwpState(); - static uint32_t GetInstrumentationEvents() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uint32_t GetInstrumentationEvents() SHARED_REQUIRES(Locks::mutator_lock_) { return instrumentation_events_; } private: static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag, uint64_t result_value, JDWP::ObjectId exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmBroadcast(bool connect) SHARED_REQUIRES(Locks::mutator_lock_); static void PostThreadStartOrStop(Thread*, uint32_t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostLocationEvent(ArtMethod* method, int pcOffset, mirror::Object* thisPtr, int eventFlags, const JValue* return_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); static void RequestDeoptimizationLocked(const DeoptimizationRequest& req) - EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Indicates whether the debugger is making requests. static bool gDebuggerActive; diff --git a/runtime/dex_file.h b/runtime/dex_file.h index 3a15f1ac70..ceefdecac9 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -870,7 +870,7 @@ class DexFile { // // This is used by runtime; therefore use art::Method not art::DexFile::Method. int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb, @@ -1314,10 +1314,10 @@ class EncodedStaticFieldValueIterator { EncodedStaticFieldValueIterator(const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache, Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void ReadValueToField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ReadValueToField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_); bool HasNext() const { return pos_ < array_size_; } diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index de925b7e8c..3e15cc5446 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -41,7 +41,7 @@ namespace art { inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, const InlineInfo& inline_info, uint8_t inlining_depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t method_index = inline_info.GetMethodIndexAtDepth(inlining_depth); InvokeType invoke_type = static_cast<InvokeType>( inline_info.GetInvokeTypeAtDepth(inlining_depth)); @@ -74,7 +74,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, Runtime::CalleeSaveType type, bool do_caller_check = false) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type)); const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type); @@ -110,7 +110,7 @@ inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, } inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetCalleeSaveMethodCaller( self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */); } @@ -403,7 +403,7 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx, ArtMethod* referrer, // Explicit template declarations of FindFieldFromCode for all field access types. #define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ +template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \ ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \ ArtMethod* referrer, \ Thread* self, size_t expected_size) \ @@ -531,7 +531,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ // Explicit template declarations of FindMethodFromCode for all invoke types. #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \ ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \ mirror::Object** this_object, \ ArtMethod** referrer, \ diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index fc7f8b782a..eaf26bc462 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -38,7 +38,7 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, ArtMethod* referrer, Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(component_count < 0)) { ThrowNegativeArraySizeException(component_count); return nullptr; // Failure diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 47865a2a80..dc04c0ac98 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -45,12 +45,12 @@ template <const bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, ArtMethod* method, Thread* self, bool* slow_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, Thread* self, bool* slow_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. @@ -61,21 +61,21 @@ ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method and a resolved class, create an instance. template <bool kInstrumented> ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method and an initialized class, create an instance. template <bool kInstrumented> ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kAccessCheck> @@ -83,7 +83,7 @@ ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, int32_t component_count, ArtMethod* method, bool* slow_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If // it cannot be resolved, throw an error. If it can, use it to create an array. @@ -95,7 +95,7 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, @@ -103,13 +103,13 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* kl ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self, bool access_check, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, int32_t component_count, @@ -117,7 +117,7 @@ extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, Thread* self, bool access_check, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Type of find field operation for fast and slow case. enum FindFieldType { @@ -134,47 +134,47 @@ enum FindFieldType { template<FindFieldType type, bool access_check> inline ArtField* FindFieldFromCode( uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<InvokeType type, bool access_check> inline ArtMethod* FindMethodFromCode( uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Fast path field resolution that can't initialize classes or throw exceptions. inline ArtField* FindFieldFast( uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Fast path method resolution that can't throw exceptions. inline ArtMethod* FindMethodFast( uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); inline mirror::Class* ResolveVerifyAndClinit( uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); -extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +extern void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // TODO: annotalysis disabled as monitor semantics are maintained in Java code. inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) NO_THREAD_SAFETY_ANALYSIS; void CheckReferenceResult(mirror::Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty, jobject rcvr_jobj, jobject interface_art_method_jobj, std::vector<jvalue>& args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <typename INT_TYPE, typename FLOAT_TYPE> inline INT_TYPE art_float_to_integral(FLOAT_TYPE f); diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h index 7a4415846c..331de91a40 100644 --- a/runtime/entrypoints/quick/callee_save_frame.h +++ b/runtime/entrypoints/quick/callee_save_frame.h @@ -39,32 +39,32 @@ class ScopedQuickEntrypointChecks { explicit ScopedQuickEntrypointChecks(Thread *self, bool entry_check = kIsDebugBuild, bool exit_check = kIsDebugBuild) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) { + SHARED_REQUIRES(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) { if (entry_check) { TestsOnEntry(); } } - ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) : self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) { if (kIsDebugBuild) { TestsOnEntry(); } } - ~ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ~ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) { if (exit_check_) { TestsOnExit(); } } private: - void TestsOnEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void TestsOnEntry() SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(self_); self_->VerifyStack(); } - void TestsOnExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void TestsOnExit() SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(self_); self_->VerifyStack(); } diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index f56b5e45b6..9311791a42 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -30,7 +30,7 @@ static constexpr bool kUseTlabFastPath = true; #define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \ uint32_t type_idx, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \ @@ -57,7 +57,7 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \ } \ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \ mirror::Class* klass, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ UNUSED(method); \ ScopedQuickEntrypointChecks sqec(self); \ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \ @@ -84,7 +84,7 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \ } \ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \ mirror::Class* klass, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ UNUSED(method); \ ScopedQuickEntrypointChecks sqec(self); \ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \ @@ -109,34 +109,34 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \ } \ extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \ uint32_t type_idx, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \ mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (!instrumented_bool) { \ return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \ @@ -146,7 +146,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \ } \ extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (!instrumented_bool) { \ return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \ @@ -157,7 +157,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix## extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \ mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \ Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ StackHandleScope<1> hs(self); \ Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \ @@ -166,7 +166,7 @@ extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \ } \ extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \ int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ StackHandleScope<1> hs(self); \ Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \ return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \ @@ -174,7 +174,7 @@ extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \ } \ extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( \ mirror::String* string, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ StackHandleScope<1> hs(self); \ Handle<mirror::String> handle_string(hs.NewHandle(string)); \ return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \ diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h index ec0aef57a7..14a8e0428b 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h @@ -31,10 +31,10 @@ void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints); // holding the runtime shutdown lock and the mutator lock when we update the entrypoints. void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); + REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); void SetQuickAllocEntryPointsInstrumented(bool instrumented) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); + REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); } // namespace art diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc index 37de380151..968ac534b3 100644 --- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc @@ -21,7 +21,7 @@ namespace art { // Assignable test for code, won't throw. Null and equality tests already performed extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(klass != nullptr); DCHECK(ref_class != nullptr); return klass->IsAssignableFrom(ref_class) ? 1 : 0; diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc index f1b54459df..a4feac1ea1 100644 --- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc @@ -28,7 +28,7 @@ namespace art { -extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); if (VLOG_IS_ON(deopt)) { diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc index 3cefc47fd2..b12b1189c2 100644 --- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc @@ -26,7 +26,7 @@ namespace art { extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Called to ensure static storage base is initialized for direct static field reads and writes. // A class may be accessing another class' fields when it doesn't have access, as access has been // given by inheritance. @@ -36,7 +36,7 @@ extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, } extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Called when method->dex_cache_resolved_types_[] misses. ScopedQuickEntrypointChecks sqec(self); auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly); @@ -44,7 +44,7 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* s } extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated. ScopedQuickEntrypointChecks sqec(self); @@ -53,7 +53,7 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type } extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly); return ResolveStringFromCode(caller, string_idx); diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h index cef2510451..3d3f7a1bdb 100644 --- a/runtime/entrypoints/quick/quick_entrypoints.h +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -20,6 +20,7 @@ #include <jni.h> #include "base/macros.h" +#include "base/mutex.h" #include "offsets.h" #define QUICK_ENTRYPOINT_OFFSET(ptr_size, x) \ @@ -71,6 +72,16 @@ extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_o Thread* self) NO_THREAD_SAFETY_ANALYSIS HOT_ATTR; +// Read barrier entrypoints. +// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to this function directly. +// For x86 and x86_64, compilers need a wrapper assembly function, to handle mismatch in ABI. +// This is the read barrier slow path for instance and static fields and reference-type arrays. +// TODO: Currently the read barrier does not have a fast path for compilers to directly generate. +// Ideally the slow path should only take one parameter "ref". +extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref, mirror::Object* obj, + uint32_t offset) + SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR; + } // namespace art #endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index 60bbf4ac82..73d8ae76ae 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -145,7 +145,8 @@ V(NewStringFromStringBuffer, void) \ V(NewStringFromStringBuilder, void) \ \ - V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) + V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \ + V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t) #endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ #undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint. diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index 871cf3c256..0a1d80648d 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -27,7 +27,7 @@ namespace art { extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr)) { @@ -42,7 +42,7 @@ extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referr extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr)) { @@ -57,7 +57,7 @@ extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* re extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr)) { @@ -73,7 +73,7 @@ extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* refe extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr)) { @@ -89,7 +89,7 @@ extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx, extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); if (LIKELY(field != nullptr)) { @@ -105,7 +105,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); if (LIKELY(field != nullptr)) { @@ -121,7 +121,7 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(mirror::HeapReference<mirror::Object>)); @@ -138,7 +138,7 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -158,7 +158,7 @@ extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -177,7 +177,7 @@ extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Obj } extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -197,7 +197,7 @@ extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Objec extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -217,7 +217,7 @@ extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Objec extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -237,7 +237,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -258,7 +258,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::HeapReference<mirror::Object>)); @@ -279,7 +279,7 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror: extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t)); if (LIKELY(field != nullptr)) { @@ -310,7 +310,7 @@ extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value, extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t)); if (LIKELY(field != nullptr)) { @@ -341,7 +341,7 @@ extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value, extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != nullptr)) { @@ -360,7 +360,7 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer, uint64_t new_value, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != nullptr)) { @@ -379,7 +379,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer, extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(mirror::HeapReference<mirror::Object>)); @@ -402,7 +402,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -441,7 +441,7 @@ extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -481,7 +481,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -509,7 +509,7 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -534,7 +534,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, mirror::Object* new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(mirror::HeapReference<mirror::Object>)); @@ -557,4 +557,16 @@ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj return -1; // failure } +// TODO: Currently the read barrier does not have a fast path. Ideally the slow path should only +// take one parameter "ref", which is generated by the fast path. +extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED, + mirror::Object* obj, uint32_t offset) { + DCHECK(kUseReadBarrier); + uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset; + mirror::HeapReference<mirror::Object>* ref_addr = + reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr); + return ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, true>(obj, MemberOffset(offset), + ref_addr); +} + } // namespace art diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc index d3991cdb78..22b2fa3f45 100644 --- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc @@ -26,7 +26,7 @@ namespace art { */ extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array, ArtMethod* method, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); const uint16_t* const insns = method->GetCodeItem()->insns_; const Instruction::ArrayDataPayload* payload = diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc index 2b5c15bcbd..ad5ee8475e 100644 --- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc @@ -28,7 +28,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, mirror::Object* this_object, Thread* self, uintptr_t lr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip // that part. ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); @@ -50,7 +50,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp, uint64_t gpr_result, uint64_t fpr_result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Compute address of return PC and sanity check that it currently holds 0. size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly); uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index de225ad8e8..f69c39e8bc 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -63,7 +63,7 @@ static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS { } static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JNIEnvExt* env = self->GetJniEnv(); env->locals.SetSegmentState(env->local_ref_cookie); env->local_ref_cookie = saved_local_ref_cookie; diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc index 4423c08288..3bf001e249 100644 --- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc @@ -21,7 +21,7 @@ namespace art { extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ { ScopedQuickEntrypointChecks sqec(self); if (UNLIKELY(obj == nullptr)) { @@ -41,7 +41,7 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self) } extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ { ScopedQuickEntrypointChecks sqec(self); if (UNLIKELY(obj == nullptr)) { diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc index 87e0c6eecd..47b3eff40d 100644 --- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc @@ -19,7 +19,7 @@ namespace art { -extern "C" void artTestSuspendFromCode(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +extern "C" void artTestSuspendFromCode(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { // Called when suspend count check value is 0 and thread->suspend_count_ != 0 ScopedQuickEntrypointChecks sqec(self); self->CheckSuspend(); diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index f22edc1b9e..5a82b3ae2e 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -25,14 +25,14 @@ namespace art { // Deliver an exception that's pending on thread helping set up a callee save frame on the way. extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); self->QuickDeliverException(); } // Called by generated call to throw an exception. extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { /* * exception may be null, in which case this routine should * throw NPE. NOTE: this is a convenience for generated code, @@ -51,7 +51,7 @@ extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* excepti // Called by generated call to throw a NPE exception. extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); self->NoteSignalBeingHandled(); ThrowNullPointerExceptionFromDexPC(); @@ -61,7 +61,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self) // Called by generated call to throw an arithmetic divide by zero exception. extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArithmeticExceptionDivideByZero(); self->QuickDeliverException(); @@ -69,14 +69,14 @@ extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self) // Called by generated call to throw an array index out of bounds exception. extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArrayIndexOutOfBoundsException(index, length); self->QuickDeliverException(); } extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); self->NoteSignalBeingHandled(); ThrowStackOverflowError(self); @@ -85,7 +85,7 @@ extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self) } extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowNoSuchMethodError(method_idx); self->QuickDeliverException(); @@ -94,7 +94,7 @@ extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Threa extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); DCHECK(!dest_type->IsAssignableFrom(src_type)); ThrowClassCastException(dest_type, src_type); @@ -103,7 +103,7 @@ extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type, extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArrayStoreException(value->GetClass(), array->GetClass()); self->QuickDeliverException(); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 4f76ebdd40..6fe2bb61e0 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -280,7 +280,7 @@ class QuickArgumentVisitor { // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the // 1st GPR. static mirror::Object* GetProxyThisObject(ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK((*sp)->IsProxyMethod()); CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes()); CHECK_GT(kNumQuickGprArgs, 0u); @@ -291,19 +291,19 @@ class QuickArgumentVisitor { return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); } - static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs); } - static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; return *reinterpret_cast<ArtMethod**>(previous_sp); } - static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( @@ -329,14 +329,14 @@ class QuickArgumentVisitor { } // For the given quick ref and args quick frame, return the caller's PC. - static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; return *reinterpret_cast<uintptr_t*>(lr); } QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, - uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) : is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), @@ -421,7 +421,7 @@ class QuickArgumentVisitor { } } - void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) { // (a) 'stack_args_' should point to the first method's argument // (b) whatever the argument type it is, the 'stack_index_' should // be moved forward along with every visiting. @@ -571,7 +571,7 @@ class QuickArgumentVisitor { // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It // allows to use the QuickArgumentVisitor constants without moving all the code in its own module. extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return QuickArgumentVisitor::GetProxyThisObject(sp); } @@ -582,7 +582,7 @@ class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; private: ShadowFrame* const sf_; @@ -625,7 +625,7 @@ void BuildQuickShadowFrameVisitor::Visit() { } extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Ensure we don't get thread suspension until the object arguments are safely in the shadow // frame. ScopedQuickEntrypointChecks sqec(self); @@ -692,9 +692,9 @@ class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; - void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); private: ScopedObjectAccessUnchecked* const soa_; @@ -753,7 +753,7 @@ void BuildQuickArgumentVisitor::FixupReferences() { // field within the proxy object, which will box the primitive arguments and deal with error cases. extern "C" uint64_t artQuickProxyInvokeHandler( ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); // Ensure we don't get thread suspension until the object arguments are safely in jobjects. @@ -809,9 +809,9 @@ class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; - void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); private: ScopedObjectAccessUnchecked* const soa_; @@ -842,7 +842,7 @@ void RememberForGcArgumentVisitor::FixupReferences() { // Lazily resolve a method for quick. Called by stub code. extern "C" const void* artQuickResolutionTrampoline( ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // The resolution trampoline stashes the resolved method into the callee-save frame to transport // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely // does not have the same stack layout as the callee-save method). @@ -1196,7 +1196,7 @@ template<class T> class BuildNativeCallFrameStateMachine { return gpr_index_ > 0; } - void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { uintptr_t handle = PushHandle(ptr); if (HaveHandleScopeGpr()) { gpr_index_--; @@ -1384,7 +1384,7 @@ template<class T> class BuildNativeCallFrameStateMachine { void PushStack(uintptr_t val) { delegate_->PushStack(val); } - uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) { return delegate_->PushHandle(ref); } @@ -1443,11 +1443,11 @@ class ComputeNativeCallFrameSize { } virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(sm); } - void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) { BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); WalkHeader(&sm); @@ -1519,7 +1519,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // // Note: assumes ComputeAll() has been run before. void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = **m; DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); @@ -1560,7 +1560,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. // Returns the new bottom. Note: this may be unaligned. uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // First, fix up the layout of the callee-save frame. // We have to squeeze in the HandleScope, and relocate the method pointer. LayoutCalleeSaveFrame(self, m, sp, handle_scope); @@ -1578,7 +1578,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Walk(shorty, shorty_len); // JNI part. @@ -1594,7 +1594,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // Add JNIEnv* and jobj/jclass before the shorty-derived elements. void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: uint32_t num_handle_scope_references_; @@ -1650,7 +1650,7 @@ class FillNativeCall { cur_stack_arg_++; } - virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) { LOG(FATAL) << "(Non-JNI) Native call does not use handles."; UNREACHABLE(); } @@ -1688,16 +1688,16 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { } } - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; - void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); StackReference<mirror::Object>* GetFirstHandleScopeEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return handle_scope_->GetHandle(0).GetReference(); } - jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) { return handle_scope_->GetHandle(0).ToJObject(); } @@ -1713,7 +1713,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), handle_scope_(handle_scope), cur_entry_(0) {} - uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); @@ -1721,7 +1721,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { cur_entry_ = 0U; } - void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) { // Initialize padding entries. size_t expected_slots = handle_scope_->NumberOfReferences(); while (cur_entry_ < expected_slots) { @@ -1841,7 +1841,7 @@ void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) * 2) An error, if the value is negative. */ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* called = *sp; DCHECK(called->IsNative()) << PrettyMethod(called, true); uint32_t shorty_len = 0; @@ -1914,7 +1914,7 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** * unlocking. */ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); ArtMethod* called = *sp; @@ -1971,7 +1971,7 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, // for the method pointer. // // It is valid to use this, as at the usage points here (returns from C functions) we are assuming -// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations). +// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations). template<InvokeType type, bool access_check> static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self, @@ -2013,7 +2013,7 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_o // Explicit artInvokeCommon template function declarations to please analysis tool. #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ TwoWordReturn artInvokeCommon<type, access_check>( \ uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) @@ -2032,31 +2032,31 @@ EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); // See comments in runtime_support_asm.S extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); } @@ -2064,7 +2064,7 @@ extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); // The optimizing compiler currently does not inline methods that have an interface // invocation. We use the outer method directly to avoid fetching a stack map, which is diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc index c05c93555c..f7a3cd53cd 100644 --- a/runtime/entrypoints_order_test.cc +++ b/runtime/entrypoints_order_test.cc @@ -311,8 +311,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest { sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni, sizeof(void*)); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierSlow, sizeof(void*)); - CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierJni) + CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierSlow) + sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all); } }; diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index 93f32e8d2e..55b1772675 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -74,12 +74,12 @@ class AtomicStack { // Beware: Mixing atomic pushes and atomic pops will cause ABA problem. // Returns false if we overflowed the stack. - bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { return AtomicPushBackInternal(value, capacity_); } // Returns false if we overflowed the stack. - bool AtomicPushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { return AtomicPushBackInternal(value, growth_limit_); } @@ -87,7 +87,7 @@ class AtomicStack { // slots. Returns false if we overflowed the stack. bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address, StackReference<T>** end_address) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } @@ -113,7 +113,7 @@ class AtomicStack { return true; } - void AssertAllZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { for (size_t i = 0; i < capacity_; ++i) { DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i; @@ -121,7 +121,7 @@ class AtomicStack { } } - void PushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } @@ -131,7 +131,7 @@ class AtomicStack { begin_[index].Assign(value); } - T* PopBack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed()); // Decrement the back index non atomically. back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1); @@ -194,12 +194,12 @@ class AtomicStack { } } - bool ContainsSorted(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(debug_is_sorted_); return std::binary_search(Begin(), End(), value, ObjectComparator()); } - bool Contains(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) { for (auto cur = Begin(), end = End(); cur != end; ++cur) { if (cur->AsMirrorPtr() == value) { return true; @@ -221,7 +221,7 @@ class AtomicStack { // Returns false if we overflowed the stack. bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h index 34e6aa31f2..88a6c6c6e4 100644 --- a/runtime/gc/accounting/card_table.h +++ b/runtime/gc/accounting/card_table.h @@ -107,8 +107,8 @@ class CardTable { size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor, const uint8_t minimum_age = kCardDirty) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Assertion used to check the given address is covered by the card table void CheckAddrIsInCardTable(const uint8_t* addr) const; diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h index 1648aef51f..0b96979a30 100644 --- a/runtime/gc/accounting/heap_bitmap.h +++ b/runtime/gc/accounting/heap_bitmap.h @@ -35,34 +35,34 @@ namespace accounting { class HeapBitmap { public: - bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + bool Test(const mirror::Object* obj) SHARED_REQUIRES(Locks::heap_bitmap_lock_); + void Clear(const mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_); template<typename LargeObjectSetVisitor> bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE; + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE; template<typename LargeObjectSetVisitor> bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE; + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE; ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const; LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const; void Walk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_); template <typename Visitor> void Visit(const Visitor& visitor) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC. void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); // Find and replace a object set pointer, this is used by for the bitmap swapping in the GC. void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); explicit HeapBitmap(Heap* heap) : heap_(heap) {} diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index 009254b8c1..157f609312 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -100,7 +100,7 @@ class ModUnionUpdateObjectReferencesVisitor { // Extra parameters are required since we use this same visitor signature for checking objects. void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Only add the reference if it is non null and fits our criteria. mirror::HeapReference<Object>* const obj_ptr = obj->GetFieldObjectReferenceAddr(offset); mirror::Object* ref = obj_ptr->AsMirrorPtr(); @@ -110,6 +110,18 @@ class ModUnionUpdateObjectReferencesVisitor { } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (kIsDebugBuild && !root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(from_space_->HasAddress(root->AsMirrorPtr())); + } + private: MarkObjectVisitor* const visitor_; // Space which we are scanning @@ -131,8 +143,8 @@ class ModUnionScanImageRootVisitor { contains_reference_to_other_space_(contains_reference_to_other_space) {} void operator()(Object* root) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(root != nullptr); ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, from_space_, immune_space_, contains_reference_to_other_space_); @@ -163,8 +175,8 @@ class AddToReferenceArrayVisitor { } // Extra parameters are required since we use this same visitor signature for checking objects. - void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset); mirror::Object* ref = ref_ptr->AsMirrorPtr(); // Only add the reference if it is non null and fits our criteria. @@ -174,6 +186,18 @@ class AddToReferenceArrayVisitor { } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (kIsDebugBuild && !root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr())); + } + private: ModUnionTableReferenceCache* const mod_union_table_; std::vector<mirror::HeapReference<Object>*>* const references_; @@ -188,7 +212,7 @@ class ModUnionReferenceVisitor { } void operator()(Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. AddToReferenceArrayVisitor visitor(mod_union_table_, references_); @@ -208,8 +232,8 @@ class CheckReferenceVisitor { } // Extra parameters are required since we use this same visitor signature for checking objects. - void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) && references_.find(ref) == references_.end()) { @@ -228,6 +252,18 @@ class CheckReferenceVisitor { } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (kIsDebugBuild && !root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr())); + } + private: ModUnionTableReferenceCache* const mod_union_table_; const std::set<const Object*>& references_; @@ -237,7 +273,7 @@ class ModUnionCheckReferences { public: explicit ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table, const std::set<const Object*>& references) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + REQUIRES(Locks::heap_bitmap_lock_) : mod_union_table_(mod_union_table), references_(references) { } diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h index 520cc1cc4c..5888193e7b 100644 --- a/runtime/gc/accounting/mod_union_table.h +++ b/runtime/gc/accounting/mod_union_table.h @@ -82,7 +82,7 @@ class ModUnionTable { // for said cards. Exclusive lock is required since verify sometimes uses // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the // bitmap or not. - virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0; + virtual void Verify() REQUIRES(Locks::heap_bitmap_lock_) = 0; // Returns true if a card is marked inside the mod union table. Used for testing. The address // doesn't need to be aligned. @@ -118,21 +118,21 @@ class ModUnionTableReferenceCache : public ModUnionTable { // Update table based on cleared cards and mark all references to the other spaces. void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and // VisitMarkedRange can't know if the callback will modify the bitmap or not. void Verify() OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Function that tells whether or not to add a reference to the table. virtual bool ShouldAddReference(const mirror::Object* ref) const = 0; virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE; - virtual void Dump(std::ostream& os) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void Dump(std::ostream& os) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); virtual void SetCards() OVERRIDE; @@ -158,8 +158,8 @@ class ModUnionTableCardCache : public ModUnionTable { // Mark all references to the alloc space(s). virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Nothing to verify. virtual void Verify() OVERRIDE {} diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc index aad8a25ed3..edab1b0a60 100644 --- a/runtime/gc/accounting/mod_union_table_test.cc +++ b/runtime/gc/accounting/mod_union_table_test.cc @@ -46,7 +46,7 @@ class ModUnionTableTest : public CommonRuntimeTest { } mirror::ObjectArray<mirror::Object>* AllocObjectArray( Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto* klass = GetObjectArrayClass(self, space); const size_t size = mirror::ComputeArraySize(component_count, 2); size_t bytes_allocated = 0, bytes_tl_bulk_allocated; @@ -67,7 +67,7 @@ class ModUnionTableTest : public CommonRuntimeTest { private: mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (java_lang_object_array_ == nullptr) { java_lang_object_array_ = Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass); @@ -97,12 +97,12 @@ class CollectVisitedVisitor : public MarkObjectVisitor { public: explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {} virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(ref != nullptr); MarkObject(ref->AsMirrorPtr()); } virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); out_->insert(obj); return obj; diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc index 23ab8df2e1..b9f24f348f 100644 --- a/runtime/gc/accounting/remembered_set.cc +++ b/runtime/gc/accounting/remembered_set.cc @@ -67,8 +67,8 @@ class RememberedSetReferenceVisitor { : collector_(collector), target_space_(target_space), contains_reference_to_target_space_(contains_reference_to_target_space) {} - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset); if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) { @@ -79,14 +79,29 @@ class RememberedSetReferenceVisitor { } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { if (target_space_->HasAddress(ref->GetReferent())) { *contains_reference_to_target_space_ = true; collector_->DelayReferenceReferent(klass, ref); } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (target_space_->HasAddress(root->AsMirrorPtr())) { + *contains_reference_to_target_space_ = true; + root->Assign(collector_->MarkObject(root->AsMirrorPtr())); + DCHECK(!target_space_->HasAddress(root->AsMirrorPtr())); + } + } + private: collector::GarbageCollector* const collector_; space::ContinuousSpace* const target_space_; @@ -101,8 +116,8 @@ class RememberedSetObjectVisitor { : collector_(collector), target_space_(target_space), contains_reference_to_target_space_(contains_reference_to_target_space) {} - void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_, collector_); obj->VisitReferences<kMovingClasses>(visitor, visitor); diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h index affe863daa..3a0dcf798d 100644 --- a/runtime/gc/accounting/remembered_set.h +++ b/runtime/gc/accounting/remembered_set.h @@ -56,8 +56,8 @@ class RememberedSet { // Mark through all references to the target space. void UpdateAndMarkReferences(space::ContinuousSpace* target_space, collector::GarbageCollector* collector) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void Dump(std::ostream& os); diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index cdeaa50cee..7914b66769 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -188,7 +188,7 @@ template<size_t kAlignment> void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback, mirror::Object* obj, mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Visit fields of parent classes first. mirror::Class* super = klass->GetSuperClass(); if (super != nullptr) { diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index e0661b6454..b8ff471c69 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -123,7 +123,7 @@ class SpaceBitmap { // Visit the live objects in the range [visit_begin, visit_end). // TODO: Use lock annotations when clang is fixed. - // EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // REQUIRES(Locks::heap_bitmap_lock_) SHARED_REQUIRES(Locks::mutator_lock_); template <typename Visitor> void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const NO_THREAD_SAFETY_ANALYSIS; @@ -131,12 +131,12 @@ class SpaceBitmap { // Visits set bits in address order. The callback is not permitted to change the bitmap bits or // max during the traversal. void Walk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Visits set bits with an in order traversal. The callback is not permitted to change the bitmap // bits or max during the traversal. void InOrderWalk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Walk through the bitmaps in increasing address order, and find the object pointers that // correspond to garbage objects. Call <callback> zero or more times with lists of these object @@ -204,12 +204,12 @@ class SpaceBitmap { // For an unvisited object, visit it then all its children found via fields. static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj, - void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void* arg) SHARED_REQUIRES(Locks::mutator_lock_); // Walk instance fields of the given Class. Separate function to allow recursion on the super // class. static void WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback, mirror::Object* obj, mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Backing storage for bitmap. std::unique_ptr<MemMap> mem_map_; diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index 3108b7ca3c..16c9354137 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -20,7 +20,7 @@ #include "base/stl_util.h" #include "stack.h" -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/properties.h" #endif @@ -42,7 +42,7 @@ const char* AllocRecord::GetClassDescriptor(std::string* storage) const { } void AllocRecordObjectMap::SetProperties() { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Check whether there's a system property overriding the max number of records. const char* propertyName = "dalvik.vm.allocTrackerMax"; char allocMaxString[PROPERTY_VALUE_MAX]; @@ -111,8 +111,8 @@ void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) { } static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { GcRoot<mirror::Class>& klass = record->GetClassGcRoot(); // This does not need a read barrier because this is called by GC. mirror::Object* old_object = klass.Read<kWithoutReadBarrier>(); @@ -177,7 +177,7 @@ void AllocRecordObjectMap::DisallowNewAllocationRecords() { struct AllocRecordStackVisitor : public StackVisitor { AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), trace(trace_in), depth(0), diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h index 933363b7fc..0a4f53226d 100644 --- a/runtime/gc/allocation_record.h +++ b/runtime/gc/allocation_record.h @@ -39,7 +39,7 @@ class AllocRecordStackTraceElement { public: AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {} - int32_t ComputeLineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetMethod() const { return method_; @@ -184,14 +184,14 @@ class AllocRecord { return trace_->GetTid(); } - mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) { return klass_.Read(); } const char* GetClassDescriptor(std::string* storage) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - GcRoot<mirror::Class>& GetClassGcRoot() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) { return klass_; } @@ -221,12 +221,12 @@ class AllocRecordObjectMap { // in order to make sure the AllocRecordObjectMap object is not null. static void RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass, size_t byte_count) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::alloc_tracker_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_); - AllocRecordObjectMap() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) + AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_) : alloc_record_max_(kDefaultNumAllocRecords), recent_record_max_(kDefaultNumRecentRecords), max_stack_depth_(kDefaultAllocStackDepth), @@ -238,8 +238,8 @@ class AllocRecordObjectMap { ~AllocRecordObjectMap(); void Put(mirror::Object* obj, AllocRecord* record) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { if (entries_.size() == alloc_record_max_) { delete entries_.front().second; entries_.pop_front(); @@ -247,23 +247,23 @@ class AllocRecordObjectMap { entries_.emplace_back(GcRoot<mirror::Object>(obj), record); } - size_t Size() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) { return entries_.size(); } - size_t GetRecentAllocationSize() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) { CHECK_LE(recent_record_max_, alloc_record_max_); size_t sz = entries_.size(); return std::min(recent_record_max_, sz); } void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); void SweepAllocationRecords(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and // AllowNewAllocationRecords(), in which case new allocation records can be added although they @@ -272,34 +272,34 @@ class AllocRecordObjectMap { // swept from the list. But missing the first few records is acceptable for using the button to // enable allocation tracking. void DisallowNewAllocationRecords() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); void AllowNewAllocationRecords() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); // TODO: Is there a better way to hide the entries_'s type? EntryList::iterator Begin() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.begin(); } EntryList::iterator End() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.end(); } EntryList::reverse_iterator RBegin() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.rbegin(); } EntryList::reverse_iterator REnd() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.rend(); } @@ -318,7 +318,7 @@ class AllocRecordObjectMap { // see the comment in typedef of EntryList EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_); - void SetProperties() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + void SetProperties() REQUIRES(Locks::alloc_tracker_lock_); }; } // namespace gc diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h index 0e91a4372c..0558921a56 100644 --- a/runtime/gc/allocator/dlmalloc.h +++ b/runtime/gc/allocator/dlmalloc.h @@ -35,7 +35,7 @@ #include "../../bionic/libc/upstream-dlmalloc/malloc.h" #pragma GCC diagnostic pop -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Define dlmalloc routines from bionic that cannot be included directly because of redefining // symbols from the include above. extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg); diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index abaa97f2db..470bc1cb22 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -1170,7 +1170,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { // First mark slots to free in the bulk free bit map without locking the // size bracket locks. On host, unordered_set is faster than vector + flag. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ std::vector<Run*> runs; #else std::unordered_set<Run*, hash_run, eq_run> runs; @@ -1237,7 +1237,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { DCHECK_EQ(run->magic_num_, kMagicNum); // Set the bit in the bulk free bit map. freed_bytes += run->MarkBulkFreeBitMap(ptr); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ if (!run->to_be_bulk_freed_) { run->to_be_bulk_freed_ = true; runs.push_back(run); @@ -1252,7 +1252,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { // union the bulk free bit map into the thread-local free bit map // (for thread-local runs.) for (Run* run : runs) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ DCHECK(run->to_be_bulk_freed_); run->to_be_bulk_freed_ = false; #endif diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index c356a39531..a7f29af274 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -51,7 +51,7 @@ class RosAlloc { bool IsFree() const { return !kIsDebugBuild || magic_num_ == kMagicNumFree; } - size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) { const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this); size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx]; @@ -60,7 +60,7 @@ class RosAlloc { return byte_size; } void SetByteSize(RosAlloc* rosalloc, size_t byte_size) - EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + REQUIRES(rosalloc->lock_) { DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this); size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); @@ -69,20 +69,20 @@ class RosAlloc { void* Begin() { return reinterpret_cast<void*>(this); } - void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this); uint8_t* end = fpr_base + ByteSize(rosalloc); return end; } bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc) - EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + REQUIRES(rosalloc->lock_) { return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_; } bool IsAtEndOfSpace(RosAlloc* rosalloc) - EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + REQUIRES(rosalloc->lock_) { return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_; } - bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { switch (rosalloc->page_release_mode_) { case kPageReleaseModeNone: return false; @@ -99,7 +99,7 @@ class RosAlloc { return false; } } - void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { uint8_t* start = reinterpret_cast<uint8_t*>(this); size_t byte_size = ByteSize(rosalloc); DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); @@ -254,8 +254,8 @@ class RosAlloc { std::string Dump(); // Verify for debugging. void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::thread_list_lock_); private: // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket @@ -512,51 +512,51 @@ class RosAlloc { // Page-granularity alloc/free void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); // Returns how many bytes were freed. - size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_); + size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_); // Allocate/free a run slot. void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Allocate/free a run slot without acquiring locks. - // TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) + // TODO: REQUIRES(Locks::mutator_lock_) void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); - void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx); + REQUIRES(!lock_); + void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_); // Returns the bracket size. size_t FreeFromRun(Thread* self, void* ptr, Run* run) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Used to allocate a new thread local run for a size bracket. - Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); + Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_); // Used to acquire a new/reused run for a size bracket. Used when a // thread-local or current run gets full. - Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); + Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_); // The internal of non-bulk Free(). - size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_); + size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_); // Allocates large objects. void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Revoke a run by adding it to non_full_runs_ or freeing the pages. - void RevokeRun(Thread* self, size_t idx, Run* run); + void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_); // Revoke the current runs which share an index with the thread local runs. - void RevokeThreadUnsafeCurrentRuns(); + void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_); // Release a range of pages. - size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_); + size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_); // Dumps the page map for debugging. - std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_); + std::string DumpPageMap() REQUIRES(lock_); public: RosAlloc(void* base, size_t capacity, size_t max_capacity, @@ -570,11 +570,11 @@ class RosAlloc { template<bool kThreadSafe = true> void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); size_t Free(Thread* self, void* ptr) - LOCKS_EXCLUDED(bulk_free_lock_); + REQUIRES(!bulk_free_lock_, !lock_); size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs) - LOCKS_EXCLUDED(bulk_free_lock_); + REQUIRES(!bulk_free_lock_, !lock_); // Returns true if the given allocation request can be allocated in // an existing thread local run without allocating a new run. @@ -589,7 +589,7 @@ class RosAlloc { ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size); // Returns the size of the allocated slot for a given allocated memory chunk. - size_t UsableSize(const void* ptr); + size_t UsableSize(const void* ptr) REQUIRES(!lock_); // Returns the size of the allocated slot for a given size. size_t UsableSize(size_t bytes) { if (UNLIKELY(bytes > kLargeSizeThreshold)) { @@ -600,33 +600,33 @@ class RosAlloc { } // Try to reduce the current footprint by releasing the free page // run at the end of the memory region, if any. - bool Trim(); + bool Trim() REQUIRES(!lock_); // Iterates over all the memory slots and apply the given function. void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Release empty pages. - size_t ReleasePages() LOCKS_EXCLUDED(lock_); + size_t ReleasePages() REQUIRES(!lock_); // Returns the current footprint. - size_t Footprint() LOCKS_EXCLUDED(lock_); + size_t Footprint() REQUIRES(!lock_); // Returns the current capacity, maximum footprint. - size_t FootprintLimit() LOCKS_EXCLUDED(lock_); + size_t FootprintLimit() REQUIRES(!lock_); // Update the current capacity. - void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_); + void SetFootprintLimit(size_t bytes) REQUIRES(!lock_); // Releases the thread-local runs assigned to the given thread back to the common set of runs. // Returns the total bytes of free slots in the revoked thread local runs. This is to be // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting. - size_t RevokeThreadLocalRuns(Thread* thread); + size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_); // Releases the thread-local runs assigned to all the threads back to the common set of runs. // Returns the total bytes of free slots in the revoked thread local runs. This is to be // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting. - size_t RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_); + size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_); // Assert the thread local runs of a thread are revoked. - void AssertThreadLocalRunsAreRevoked(Thread* thread); + void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_); // Assert all the thread local runs are revoked. - void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_); + void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_); static Run* GetDedicatedFullRun() { return dedicated_full_run_; @@ -647,9 +647,11 @@ class RosAlloc { } // Verify for debugging. - void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_, + !lock_); - void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes); + void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) + REQUIRES(!bulk_free_lock_, !lock_); private: friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs); diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index c803655cc6..ec689f8528 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -185,7 +185,7 @@ class ThreadFlipVisitor : public Closure { : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { } - virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { // Note: self is not necessarily equal to thread since thread may be suspended. Thread* self = Thread::Current(); CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) @@ -221,7 +221,7 @@ class FlipCallback : public Closure { : concurrent_copying_(concurrent_copying) { } - virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) { ConcurrentCopying* cc = concurrent_copying_; TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); // Note: self is not necessarily equal to thread since thread may be suspended. @@ -290,8 +290,8 @@ class ConcurrentCopyingImmuneSpaceObjVisitor { explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {} - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { DCHECK(obj != nullptr); DCHECK(collector_->immune_region_.ContainsObject(obj)); accounting::ContinuousSpaceBitmap* cc_bitmap = @@ -599,7 +599,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { : collector_(collector) {} void operator()(mirror::Object* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { if (ref == nullptr) { // OK. return; @@ -624,7 +624,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { } void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(root != nullptr); operator()(root); } @@ -638,19 +638,32 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) : collector_(collector) {} - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); visitor(ref); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { CHECK(klass->IsTypeOfReferenceClass()); this->operator()(ref, mirror::Reference::ReferentOffset(), false); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); + visitor(root->AsMirrorPtr()); + } + private: ConcurrentCopying* const collector_; }; @@ -660,11 +673,11 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectCallback(obj, collector_); } static void ObjectCallback(mirror::Object* obj, void *arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(obj != nullptr); ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); space::RegionSpace* region_space = collector->RegionSpace(); @@ -733,7 +746,7 @@ class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { : collector_(collector) {} void operator()(mirror::Object* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { if (ref == nullptr) { // OK. return; @@ -750,18 +763,31 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) : collector_(collector) {} - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); visitor(ref); } - void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { CHECK(klass->IsTypeOfReferenceClass()); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); + visitor(root->AsMirrorPtr()); + } + private: ConcurrentCopying* const collector_; }; @@ -771,11 +797,11 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectCallback(obj, collector_); } static void ObjectCallback(mirror::Object* obj, void *arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(obj != nullptr); ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); space::RegionSpace* region_space = collector->RegionSpace(); @@ -1130,8 +1156,8 @@ class ConcurrentCopyingClearBlackPtrsVisitor { #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { DCHECK(obj != nullptr); DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; @@ -1277,8 +1303,8 @@ class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { public: explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) : collector_(cc) {} - void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { DCHECK(ref != nullptr); DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; @@ -1335,7 +1361,7 @@ class RootPrinter { template <class MirrorType> ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!root->IsNull()) { VisitRoot(root); } @@ -1343,13 +1369,13 @@ class RootPrinter { template <class MirrorType> void VisitRoot(mirror::Object** root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; } template <class MirrorType> void VisitRoot(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); } }; @@ -1489,17 +1515,29 @@ class ConcurrentCopyingRefFieldsVisitor { : collector_(collector) {} void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) - const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { collector_->Process(obj, offset); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { CHECK(klass->IsTypeOfReferenceClass()); collector_->DelayReferenceReferent(klass, ref); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + collector_->MarkRoot(root); + } + private: ConcurrentCopying* const collector_; }; @@ -1513,7 +1551,8 @@ void ConcurrentCopying::Scan(mirror::Object* to_ref) { // Process a field. inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { - mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); + mirror::Object* ref = obj->GetFieldObject< + mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); if (ref == nullptr || region_space_->IsInToSpace(ref)) { return; } @@ -1530,8 +1569,8 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) // It was updated by the mutator. break; } - } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( - offset, expected_ref, new_ref)); + } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier< + false, false, kVerifyNone>(offset, expected_ref, new_ref)); } // Process some roots. @@ -1559,22 +1598,18 @@ void ConcurrentCopying::VisitRoots( } } -void ConcurrentCopying::VisitRoots( - mirror::CompressedReference<mirror::Object>** roots, size_t count, - const RootInfo& info ATTRIBUTE_UNUSED) { - for (size_t i = 0; i < count; ++i) { - mirror::CompressedReference<mirror::Object>* root = roots[i]; - mirror::Object* ref = root->AsMirrorPtr(); - if (ref == nullptr || region_space_->IsInToSpace(ref)) { - continue; - } - mirror::Object* to_ref = Mark(ref); - if (to_ref == ref) { - continue; - } +void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { + DCHECK(!root->IsNull()); + mirror::Object* const ref = root->AsMirrorPtr(); + if (region_space_->IsInToSpace(ref)) { + return; + } + mirror::Object* to_ref = Mark(ref); + if (to_ref != ref) { auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); + // If the cas fails, then it was updated by the mutator. do { if (ref != addr->LoadRelaxed().AsMirrorPtr()) { // It was updated by the mutator. @@ -1584,6 +1619,17 @@ void ConcurrentCopying::VisitRoots( } } +void ConcurrentCopying::VisitRoots( + mirror::CompressedReference<mirror::Object>** roots, size_t count, + const RootInfo& info ATTRIBUTE_UNUSED) { + for (size_t i = 0; i < count; ++i) { + mirror::CompressedReference<mirror::Object>* const root = roots[i]; + if (!root->IsNull()) { + MarkRoot(root); + } + } +} + // Fill the given memory block with a dummy object. Used to fill in a // copy of objects that was lost in race. void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index f1317b8f78..a4fd71c0a5 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -62,14 +62,15 @@ class ConcurrentCopying : public GarbageCollector { ConcurrentCopying(Heap* heap, const std::string& name_prefix = ""); ~ConcurrentCopying(); - virtual void RunPhases() OVERRIDE; - void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FinishPhase(); - - void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + + void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); virtual GcType GetGcType() const OVERRIDE { return kGcTypePartial; } @@ -85,14 +86,15 @@ class ConcurrentCopying : public GarbageCollector { return region_space_; } void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(ref != nullptr); return IsMarked(ref) == ref; } - mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); bool IsMarking() const { return is_marking_; } @@ -105,68 +107,79 @@ class ConcurrentCopying : public GarbageCollector { bool IsWeakRefAccessEnabled() { return weak_ref_access_enabled_.LoadRelaxed(); } - void RevokeThreadLocalMarkStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); private: - void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_); + void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); void Process(mirror::Object* obj, MemberOffset offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void MarkRoot(mirror::CompressedReference<mirror::Object>* root) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_); accounting::ObjectStack* GetAllocationStack(); accounting::ObjectStack* GetLiveStack(); - virtual void ProcessMarkStack() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool ProcessMarkStackOnce() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SwitchToSharedMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SwitchToGcExclusiveMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_); virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ProcessReferences(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SweepSystemWeaks(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); void Sweep(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); void SweepLargeObjects(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); void ClearBlackPtrs() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* AllocateInSkippedBlock(size_t alloc_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CheckEmptyMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_); + void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_); + bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* GetFwdPtr(mirror::Object* from_ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_); - void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_); + void SwapStacks(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); void RecordLiveStackFreezeSize(Thread* self); void ComputeUnevacFromSpaceLiveRatio(); void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ReenableWeakRefAccess(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); space::RegionSpace* region_space_; // The underlying region space. std::unique_ptr<Barrier> gc_barrier_; diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index cfc4f963e2..954c80ec7b 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -142,7 +142,7 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark virtual GcType GetGcType() const = 0; virtual CollectorType GetCollectorType() const = 0; // Run the garbage collector. - void Run(GcCause gc_cause, bool clear_soft_references); + void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_); Heap* GetHeap() const { return heap_; } @@ -150,11 +150,11 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark const CumulativeLogger& GetCumulativeTimings() const { return cumulative_timings_; } - void ResetCumulativeStatistics(); + void ResetCumulativeStatistics() REQUIRES(!pause_histogram_lock_); // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC, // this is the allocation space, for full GC then we swap the zygote bitmaps too. - void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - uint64_t GetTotalPausedTimeNs() LOCKS_EXCLUDED(pause_histogram_lock_); + void SwapBitmaps() REQUIRES(Locks::heap_bitmap_lock_); + uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_); int64_t GetTotalFreedBytes() const { return total_freed_bytes_; } @@ -162,7 +162,7 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark return total_freed_objects_; } // Reset the cumulative timings and pause histogram. - void ResetMeasurements(); + void ResetMeasurements() REQUIRES(!pause_histogram_lock_); // Returns the estimated throughput in bytes / second. uint64_t GetEstimatedMeanThroughput() const; // Returns how many GC iterations have been run. @@ -179,23 +179,23 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark void RecordFree(const ObjectBytePair& freed); // Record a free of large objects. void RecordFreeLOS(const ObjectBytePair& freed); - void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_); + void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_); // Helper functions for querying if objects are marked. These are used for processing references, // and will be used for reading system weaks while the GC is running. virtual mirror::Object* IsMarked(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Used by reference processor. - virtual void ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + virtual void ProcessMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Force mark an object. virtual mirror::Object* MarkObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; protected: // Run all of the GC phases. diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h index 30144f0b16..3ead501046 100644 --- a/runtime/gc/collector/immune_region.h +++ b/runtime/gc/collector/immune_region.h @@ -41,7 +41,7 @@ class ImmuneRegion { ImmuneRegion(); void Reset(); bool AddContinuousSpace(space::ContinuousSpace* space) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); bool ContainsSpace(const space::ContinuousSpace* space) const; // Returns true if an object is inside of the immune region (assumed to be marked). bool ContainsObject(const mirror::Object* obj) const ALWAYS_INLINE { diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 0623fd41a1..4b2c588dae 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -89,7 +89,7 @@ class CalculateObjectForwardingAddressVisitor { public: explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector) : collector_(collector) {} - void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, + void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment); DCHECK(collector_->IsMarked(obj) != nullptr); @@ -301,8 +301,8 @@ class UpdateRootVisitor : public RootVisitor { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mirror::Object* obj = *roots[i]; mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj); @@ -315,8 +315,8 @@ class UpdateRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mirror::Object* obj = roots[i]->AsMirrorPtr(); mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj); @@ -335,8 +335,8 @@ class UpdateObjectReferencesVisitor { public: explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) { } - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { collector_->UpdateObjectReferences(obj); } @@ -428,16 +428,29 @@ class UpdateReferenceVisitor { } void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const - ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); } void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { collector_->UpdateHeapReference( ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset())); } + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + root->Assign(collector_->GetMarkedForwardAddress(root->AsMirrorPtr())); + } + private: MarkCompact* const collector_; }; @@ -491,8 +504,8 @@ class MoveObjectVisitor { public: explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) { } - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { collector_->MoveObject(obj, obj->SizeOf()); } @@ -564,17 +577,30 @@ class MarkCompactMarkObjectVisitor { } void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // Object was already verified when we scanned it. collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset)); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { collector_->DelayReferenceReferent(klass, ref); } + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + collector_->MarkObject(root->AsMirrorPtr()); + } + private: MarkCompact* const collector_; }; diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h index 89d66b5104..8d91939057 100644 --- a/runtime/gc/collector/mark_compact.h +++ b/runtime/gc/collector/mark_compact.h @@ -64,13 +64,13 @@ class MarkCompact : public GarbageCollector { virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS; void InitializePhase(); - void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkingPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + void ReclaimPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + void FinishPhase() REQUIRES(Locks::mutator_lock_); void MarkReachableObjects() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual GcType GetGcType() const OVERRIDE { return kGcTypePartial; } @@ -88,106 +88,106 @@ class MarkCompact : public GarbageCollector { void FindDefaultMarkBitmap(); void ScanObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Marks the root set at the start of a garbage collection. void MarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie // the image. Mark that portion of the heap as immune. - void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); void UnBindBitmaps() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); - void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. - void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); // Sweeps unmarked objects to complete the garbage collection. - void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); void SweepSystemWeaks() - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Schedules an unmarked object for reference processing. void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); protected: // Returns null if the object is not marked, otherwise returns the forwarding address (same as // object for non movable things). mirror::Object* GetMarkedForwardAddress(mirror::Object* object) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Marks or unmarks a large object based on whether or not set is true. If set is true, then we // mark, otherwise we unmark. bool MarkLargeObject(const mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Expand mark stack to 2x its current size. - void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if we should sweep the space. bool ShouldSweepSpace(space::ContinuousSpace* space) const; // Push an object onto the mark stack. - void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void UpdateAndMarkModUnion() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Recursively blackens objects on the mark stack. void ProcessMarkStack() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // 3 pass mark compact approach. - void Compact() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + void Compact() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding // bitmap. void CalculateObjectForwardingAddresses() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Update the references of objects by using the forwarding addresses. - void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + void UpdateReferences() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Move objects and restore lock words. - void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void MoveObjects() REQUIRES(Locks::mutator_lock_); // Move a single object to its forward address. - void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void MoveObject(mirror::Object* obj, size_t len) REQUIRES(Locks::mutator_lock_); // Mark a single object. virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); - void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); + void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Update a single heap reference. void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); // Update all of the references of a single object. void UpdateObjectReferences(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); // Revoke all the thread-local buffers. void RevokeAllThreadLocalBuffers(); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index abb1d3dcf7..7f2c2048f6 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -365,7 +365,7 @@ class MarkSweepMarkObjectSlowPath { : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) { } - void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { + void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { if (kProfileLargeObjects) { // TODO: Differentiate between marking and testing somehow. ++mark_sweep_->large_object_test_; @@ -522,7 +522,7 @@ class VerifyRootMarkedVisitor : public SingleRootVisitor { explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { CHECK(collector_->IsMarked(root) != nullptr) << info.ToString(); } @@ -547,7 +547,7 @@ void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, class VerifyRootVisitor : public SingleRootVisitor { public: void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // See if the root is on any space bitmap. auto* heap = Runtime::Current()->GetHeap(); if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { @@ -597,8 +597,7 @@ class ScanObjectVisitor { : mark_sweep_(mark_sweep) {} void operator()(mirror::Object* obj) const ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { if (kCheckLocks) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); @@ -616,8 +615,8 @@ class DelayReferenceReferentVisitor { } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { collector_->DelayReferenceReferent(klass, ref); } @@ -649,13 +648,33 @@ class MarkStackTask : public Task { protected: class MarkObjectParallelVisitor { public: - explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, - MarkSweep* mark_sweep) ALWAYS_INLINE - : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} + ALWAYS_INLINE explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, + MarkSweep* mark_sweep) + : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} - void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); + void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { + Mark(obj->GetFieldObject<mirror::Object>(offset)); + } + + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (kCheckLocks) { + Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + Mark(root->AsMirrorPtr()); + } + + private: + void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { if (kUseFinger) { std::atomic_thread_fence(std::memory_order_seq_cst); @@ -668,7 +687,6 @@ class MarkStackTask : public Task { } } - private: MarkStackTask<kUseFinger>* const chunk_task_; MarkSweep* const mark_sweep_; }; @@ -679,8 +697,8 @@ class MarkStackTask : public Task { : chunk_task_(chunk_task) {} // No thread safety analysis since multiple threads will use this visitor. - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); DelayReferenceReferentVisitor ref_visitor(mark_sweep); @@ -707,7 +725,7 @@ class MarkStackTask : public Task { size_t mark_stack_pos_; ALWAYS_INLINE void MarkStackPush(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. mark_stack_pos_ /= 2; @@ -725,8 +743,8 @@ class MarkStackTask : public Task { } // Scans all of the objects - virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { UNUSED(self); ScanObjectParallelVisitor visitor(this); // TODO: Tune this. @@ -1015,7 +1033,7 @@ class VerifySystemWeakVisitor : public IsMarkedVisitor { explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { mark_sweep_->VerifyIsLive(obj); return obj; } @@ -1048,8 +1066,8 @@ class CheckpointMarkThreadRoots : public Closure, public RootVisitor { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mark_sweep_->MarkObjectNonNullParallel(*roots[i]); } @@ -1057,8 +1075,8 @@ class CheckpointMarkThreadRoots : public Closure, public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr()); } @@ -1259,8 +1277,8 @@ class MarkVisitor { } void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { if (kCheckLocks) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); @@ -1268,6 +1286,22 @@ class MarkVisitor { mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { + if (kCheckLocks) { + Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + mark_sweep_->MarkObject(root->AsMirrorPtr()); + } + private: MarkSweep* const mark_sweep_; }; diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index 7692b06fe1..606be63d93 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h @@ -58,15 +58,14 @@ class MarkSweep : public GarbageCollector { ~MarkSweep() {} - virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS; + virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_); void InitializePhase(); - void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PausePhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); - void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_); + void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void FinishPhase(); virtual void MarkReachableObjects() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); bool IsConcurrent() const { return is_concurrent_; @@ -88,113 +87,96 @@ class MarkSweep : public GarbageCollector { // Marks all objects in the root set at the start of a garbage collection. void MarkRoots(Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void MarkNonThreadRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void MarkConcurrentRoots(VisitRootFlags flags) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Builds a mark stack and recursively mark until it empties. void RecursiveMark() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie // the image. Mark that portion of the heap as immune. - virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_); // Builds a mark stack with objects on dirty cards and recursively mark until it empties. void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Remarks the root set after completing the concurrent mark. void ReMarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void ProcessReferences(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); // Update and mark references from immune spaces. void UpdateAndMarkModUnion() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); // Pre clean cards to reduce how much work is needed in the pause. void PreCleanCards() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap. - virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. - void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); // Sweep only pointers within an array. WARNING: Trashes objects. void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); // Blackens an object. void ScanObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // No thread safety analysis due to lambdas. template<typename MarkVisitor, typename ReferenceVisitor> void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor, const ReferenceVisitor& ref_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); void SweepSystemWeaks(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void VerifySystemWeaks() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Verify that an object is live, either in a live bitmap or in the allocation stack. void VerifyIsLive(const mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); // Marks an object. virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); Barrier& GetBarrier() { return *gc_barrier_; @@ -202,21 +184,20 @@ class MarkSweep : public GarbageCollector { // Schedules an unmarked object for reference processing. void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); protected: // Returns object if the object is marked in the heap bitmap, otherwise null. virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_); void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr, MemberOffset offset = MemberOffset(0)) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); // Marks an object atomically, safe to use from multiple threads. void MarkObjectNonNullParallel(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); // Returns true if we need to add obj to a mark stack. bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; @@ -227,36 +208,34 @@ class MarkSweep : public GarbageCollector { NO_THREAD_SAFETY_ANALYSIS; // Expand mark stack to 2x its current size. - void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ExpandMarkStack() REQUIRES(mark_stack_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + void ResizeMarkStack(size_t new_size) REQUIRES(mark_stack_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Returns how many threads we should use for the current GC phase based on if we are paused, // whether or not we care about pauses. size_t GetThreadCount(bool paused) const; // Push a single reference on a mark stack. - void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PushOnMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); // Blackens objects grayed during a garbage collection. void ScanGrayObjects(bool paused, uint8_t minimum_age) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - virtual void ProcessMarkStack() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + virtual void ProcessMarkStack() OVERRIDE REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { ProcessMarkStack(false); } // Recursively blackens objects on the mark stack. void ProcessMarkStack(bool paused) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void ProcessMarkStackParallel(size_t thread_count) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by // IsExclusiveHeld. diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h index 1a211cd3b6..7b69bce0e1 100644 --- a/runtime/gc/collector/partial_mark_sweep.h +++ b/runtime/gc/collector/partial_mark_sweep.h @@ -37,7 +37,7 @@ class PartialMarkSweep : public MarkSweep { // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by // StickyMarkSweep. - virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep); diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h index a7de44fc93..06d20f583a 100644 --- a/runtime/gc/collector/semi_space-inl.h +++ b/runtime/gc/collector/semi_space-inl.h @@ -83,6 +83,14 @@ inline void SemiSpace::MarkObject( } } +template<bool kPoisonReferences> +inline void SemiSpace::MarkObjectIfNotInToSpace( + mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) { + if (!to_space_->HasAddress(obj_ptr->AsMirrorPtr())) { + MarkObject(obj_ptr); + } +} + } // namespace collector } // namespace gc } // namespace art diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 2a9f47a577..63def2452b 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -273,8 +273,7 @@ void SemiSpace::MarkingPhase() { class SemiSpaceScanObjectVisitor { public: explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} - void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, - Locks::heap_bitmap_lock_) { + void operator()(Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK(obj != nullptr); semi_space_->ScanObject(obj); } @@ -289,15 +288,33 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor { from_space_(from_space) {} void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); if (from_space_->HasAddress(ref)) { Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); LOG(FATAL) << ref << " found in from space"; } } + + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (kIsDebugBuild) { + Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + CHECK(!from_space_->HasAddress(root->AsMirrorPtr())); + } + private: - space::ContinuousMemMapAllocSpace* from_space_; + space::ContinuousMemMapAllocSpace* const from_space_; }; void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { @@ -310,10 +327,11 @@ class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { public: explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} void operator()(Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != nullptr); semi_space_->VerifyNoFromSpaceReferences(obj); } + private: SemiSpace* const semi_space_; }; @@ -665,17 +683,35 @@ class SemiSpaceMarkObjectVisitor { } void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // Object was already verified when we scanned it. collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { collector_->DelayReferenceReferent(klass, ref); } + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (kIsDebugBuild) { + Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + // We may visit the same root multiple times, so avoid marking things in the to-space since + // this is not handled by the GC. + collector_->MarkObjectIfNotInToSpace(root); + } + private: SemiSpace* const collector_; }; diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index 6b7ea0d31a..b9246ca2fc 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -66,13 +66,13 @@ class SemiSpace : public GarbageCollector { virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS; virtual void InitializePhase(); - virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + virtual void ReclaimPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + virtual void FinishPhase() REQUIRES(Locks::mutator_lock_); void MarkReachableObjects() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual GcType GetGcType() const OVERRIDE { return kGcTypePartial; } @@ -101,94 +101,98 @@ class SemiSpace : public GarbageCollector { // Updates obj_ptr if the object has moved. template<bool kPoisonReferences> void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + + template<bool kPoisonReferences> + void MarkObjectIfNotInToSpace(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void ScanObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void VerifyNoFromSpaceReferences(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Marks the root set at the start of a garbage collection. void MarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie // the image. Mark that portion of the heap as immune. - virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); void UnBindBitmaps() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); - void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. - virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); // Sweeps unmarked objects to complete the garbage collection. - void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); void SweepSystemWeaks() - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Schedules an unmarked object for reference processing. void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); protected: // Returns null if the object is not marked, otherwise returns the forwarding address (same as // object for non movable things). virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_); virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Marks or unmarks a large object based on whether or not set is true. If set is true, then we // mark, otherwise we unmark. bool MarkLargeObject(const mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Expand mark stack to 2x its current size. - void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if we should sweep the space. virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; // Push an object onto the mark stack. - void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void UpdateAndMarkModUnion() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Recursively blackens objects on the mark stack. void ProcessMarkStack() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Revoke all the thread-local buffers. void RevokeAllThreadLocalBuffers(); diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h index b9ef137e89..e8e70de228 100644 --- a/runtime/gc/collector/sticky_mark_sweep.h +++ b/runtime/gc/collector/sticky_mark_sweep.h @@ -36,15 +36,15 @@ class StickyMarkSweep FINAL : public PartialMarkSweep { protected: // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the // alloc space will be marked as immune. - void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void MarkReachableObjects() OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); void Sweep(bool swap_bitmaps) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); private: DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 2b94cf1691..5f617bd274 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1277,7 +1277,7 @@ void Heap::TrimSpaces(Thread* self) { FinishGC(self, collector::kGcTypeNone); size_t native_reclaimed = 0; -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Only trim the native heap if we don't care about pauses. if (!CareAboutPauseTimes()) { #if defined(USE_DLMALLOC) @@ -1290,7 +1290,7 @@ void Heap::TrimSpaces(Thread* self) { UNIMPLEMENTED(WARNING) << "Add trimming support"; #endif } -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ uint64_t end_ns = NanoTime(); VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration=" @@ -1695,11 +1695,11 @@ uint64_t Heap::GetBytesAllocatedEver() const { class InstanceCounter { public: InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { } static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg); mirror::Class* instance_class = obj->GetClass(); CHECK(instance_class != nullptr); @@ -1731,11 +1731,11 @@ void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_i class InstanceCollector { public: InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : class_(c), max_count_(max_count), instances_(instances) { } static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK(arg != nullptr); InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg); if (obj->GetClass() == instance_collector->class_) { @@ -1763,12 +1763,12 @@ class ReferringObjectsFinder { public: ReferringObjectsFinder(mirror::Object* object, int32_t max_count, std::vector<mirror::Object*>& referring_objects) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : object_(object), max_count_(max_count), referring_objects_(referring_objects) { } static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj); } @@ -1780,14 +1780,18 @@ class ReferringObjectsFinder { } // For Object::VisitReferences. - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { referring_objects_.push_back(obj); } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + private: const mirror::Object* const object_; const uint32_t max_count_; @@ -2111,7 +2115,7 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace { const bool is_running_on_memory_tool_; static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(arg != nullptr); BinContext* context = reinterpret_cast<BinContext*>(arg); ZygoteCompactingCollector* collector = context->collector_; @@ -2139,7 +2143,7 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace { } virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { size_t obj_size = obj->SizeOf(); size_t alloc_size = RoundUp(obj_size, kObjectAlignment); mirror::Object* forward_address; @@ -2583,7 +2587,7 @@ class RootMatchesObjectVisitor : public SingleRootVisitor { explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { } void VisitRoot(mirror::Object* root, const RootInfo& info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (root == obj_) { LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString(); } @@ -2605,23 +2609,22 @@ class ScanVisitor { class VerifyReferenceVisitor : public SingleRootVisitor { public: explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} size_t GetFailureCount() const { return fail_count_->LoadSequentiallyConsistent(); } - void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - UNUSED(klass); + void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const + SHARED_REQUIRES(Locks::mutator_lock_) { if (verify_referent_) { VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset()); } } - void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset); } @@ -2629,8 +2632,20 @@ class VerifyReferenceVisitor : public SingleRootVisitor { return heap_->IsLiveObjectLocked(obj, true, false, true); } - void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + const_cast<VerifyReferenceVisitor*>(this)->VisitRoot( + root->AsMirrorPtr(), RootInfo(kRootVMInternal)); + } + + virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) { if (root == nullptr) { LOG(ERROR) << "Root is null with info " << root_info.GetType(); } else if (!VerifyReference(nullptr, root, MemberOffset(0))) { @@ -2747,8 +2762,8 @@ class VerifyObjectVisitor { : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) { } - void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // Note: we are verifying the references in obj but not obj itself, this is because obj must // be live or else how did we find it in the live bitmap? VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); @@ -2757,13 +2772,12 @@ class VerifyObjectVisitor { } static void VisitCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg); visitor->operator()(obj); } - void VerifyRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) { + void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) { ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); Runtime::Current()->VisitRoots(&visitor); @@ -2855,11 +2869,16 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) { class VerifyReferenceCardVisitor { public: VerifyReferenceCardVisitor(Heap* heap, bool* failed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) : heap_(heap), failed_(failed) { } + // There is no card marks for native roots on a class. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for // annotalysis on visitors. void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const @@ -2932,7 +2951,7 @@ class VerifyLiveStackReferences { failed_(false) {} void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); obj->VisitReferences<true>(visitor, VoidFunctor()); } @@ -3425,7 +3444,7 @@ class Heap::ConcurrentGCTask : public HeapTask { const bool force_full_; // If true, force full (or partial) collection. }; -static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) { +static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) { Runtime* runtime = Runtime::Current(); return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) && !self->IsHandlingStackOverflow(); diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index ee3d510812..09c18b8f75 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -188,26 +188,30 @@ class Heap { template <bool kInstrumented, typename PreFenceVisitor> mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, - GetCurrentAllocator(), - pre_fence_visitor); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_, + !Roles::uninterruptible_) { + return AllocObjectWithAllocator<kInstrumented, true>( + self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor); } template <bool kInstrumented, typename PreFenceVisitor> mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, - GetCurrentNonMovingAllocator(), - pre_fence_visitor); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_, + !Roles::uninterruptible_) { + return AllocObjectWithAllocator<kInstrumented, true>( + self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor); } template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_, + !Roles::uninterruptible_); AllocatorType GetCurrentAllocator() const { return current_allocator_; @@ -219,29 +223,29 @@ class Heap { // Visit all of the live objects in the heap. void VisitObjects(ObjectCallback callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); void VisitObjectsPaused(ObjectCallback callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void RegisterNativeAllocation(JNIEnv* env, size_t bytes); - void RegisterNativeFree(JNIEnv* env, size_t bytes); + void RegisterNativeAllocation(JNIEnv* env, size_t bytes) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); + void RegisterNativeFree(JNIEnv* env, size_t bytes) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); // Change the allocator, updates entrypoints. void ChangeAllocator(AllocatorType allocator) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_); // Transition the garbage collector during runtime, may copy objects from one space to another. - void TransitionCollector(CollectorType collector_type); + void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_); // Change the collector to be one of the possible options (MS, CMS, SS). void ChangeCollector(CollectorType collector_type) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); // The given reference is believed to be to an object in the Java heap, check the soundness of it. // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a @@ -249,61 +253,64 @@ class Heap { void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS; // Check sanity of all live references. - void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_); // Returns how many failures occured. size_t VerifyHeapReferences(bool verify_referents = true) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); bool VerifyMissingCardMarks() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, // and doesn't abort on error, allowing the caller to report more // meaningful diagnostics. bool IsValidObjectAddress(const mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Faster alternative to IsHeapAddress since finding if an object is in the large object space is // very slow. bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). // Requires the heap lock to be held. bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true, bool search_live_stack = true, bool sorted = false) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Returns true if there is any chance that the object (obj) will move. - bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_); // Enables us to compacting GC until objects are released. - void IncrementDisableMovingGC(Thread* self); - void DecrementDisableMovingGC(Thread* self); + void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); + void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits. - void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void ClearMarkedObjects() REQUIRES(Locks::heap_bitmap_lock_); // Initiates an explicit garbage collection. - void CollectGarbage(bool clear_soft_references); + void CollectGarbage(bool clear_soft_references) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); // Does a concurrent GC, should only be called by the GC daemon thread // through runtime. - void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); + void ConcurrentGC(Thread* self, bool force_full) + REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_); // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. // The boolean decides whether to use IsAssignableFrom or == when comparing classes. void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Implements JDWP RT_Instances. void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Implements JDWP OR_ReferringObjects. - void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void GetReferringObjects(mirror::Object* o, int32_t max_count, + std::vector<mirror::Object*>& referring_objects) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to // implement dalvik.system.VMRuntime.clearGrowthLimit. @@ -311,7 +318,7 @@ class Heap { // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit. - void ClampGrowthLimit() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_); // Target ideal heap utilization ratio, implements // dalvik.system.VMRuntime.getTargetHeapUtilization. @@ -326,9 +333,9 @@ class Heap { // Set the heap's private space pointers to be the same as the space based on it's type. Public // due to usage by tests. void SetSpaceAsDefault(space::ContinuousSpace* continuous_space) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(!Locks::heap_bitmap_lock_); + void AddSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_); + void RemoveSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_); // Set target ideal heap utilization ratio, implements // dalvik.system.VMRuntime.setTargetHeapUtilization. @@ -341,10 +348,11 @@ class Heap { // Blocks the caller until the garbage collector becomes idle and returns the type of GC we // waited for. collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) - LOCKS_EXCLUDED(gc_complete_lock_); + REQUIRES(!*gc_complete_lock_); // Update the heap's process state to a new value, may cause compaction to occur. - void UpdateProcessState(ProcessState process_state); + void UpdateProcessState(ProcessState process_state) + REQUIRES(!*pending_task_lock_, !*gc_complete_lock_); const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const { return continuous_spaces_; @@ -428,7 +436,7 @@ class Heap { } // Returns the number of objects currently allocated. - size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + size_t GetObjectsAllocated() const REQUIRES(!Locks::heap_bitmap_lock_); // Returns the total number of objects allocated since the heap was created. uint64_t GetObjectsAllocatedEver() const; @@ -487,13 +495,13 @@ class Heap { bool fail_ok) const; space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const; - void DumpForSigQuit(std::ostream& os); + void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_); // Do a pending collector transition. - void DoPendingCollectorTransition(); + void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_); // Deflate monitors, ... and trim the spaces. - void Trim(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); + void Trim(Thread* self) REQUIRES(!*gc_complete_lock_); void RevokeThreadLocalBuffers(Thread* thread); void RevokeRosAllocThreadLocalBuffers(Thread* thread); @@ -501,17 +509,17 @@ class Heap { void AssertThreadLocalBuffersAreRevoked(Thread* thread); void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); void RosAllocVerification(TimingLogger* timings, const char* name) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); - accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) { return live_bitmap_.get(); } - accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) { return mark_bitmap_.get(); } - accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) { return live_stack_.get(); } @@ -519,13 +527,12 @@ class Heap { // Mark and empty stack. void FlushAllocStack() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Revoke all the thread-local allocation stacks. void RevokeAllThreadLocalAllocationStacks(Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); // Mark all the objects in the allocation stack in the specified bitmap. // TODO: Refactor? @@ -533,23 +540,21 @@ class Heap { accounting::SpaceBitmap<kObjectAlignment>* bitmap2, accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects, accounting::ObjectStack* stack) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); // Mark the specified allocation stack as live. void MarkAllocStackAsLive(accounting::ObjectStack* stack) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); // Unbind any bound bitmaps. - void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_); // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. // Assumes there is only one image space. space::ImageSpace* GetImageSpace() const; // Permenantly disable moving garbage collection. - void DisableMovingGc(); + void DisableMovingGc() REQUIRES(!*gc_complete_lock_); space::DlMallocSpace* GetDlMallocSpace() const { return dlmalloc_space_; @@ -595,8 +600,8 @@ class Heap { std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; // GC performance measuring - void DumpGcPerformanceInfo(std::ostream& os); - void ResetGcPerformanceInfo(); + void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_); + void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_); // Returns true if we currently care about pause times. bool CareAboutPauseTimes() const { @@ -656,16 +661,16 @@ class Heap { return false; } - bool IsMovingGCDisabled(Thread* self) { + bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) { MutexLock mu(self, *gc_complete_lock_); return disable_moving_gc_count_ > 0; } // Request an asynchronous trim. - void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); + void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_); // Request asynchronous GC. - void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_); + void RequestConcurrentGC(Thread* self, bool force_full) REQUIRES(!*pending_task_lock_); // Whether or not we may use a garbage collector, used so that we only create collectors we need. bool MayUseCollector(CollectorType type) const; @@ -680,8 +685,8 @@ class Heap { uint64_t GetGcTime() const; uint64_t GetBlockingGcCount() const; uint64_t GetBlockingGcTime() const; - void DumpGcCountRateHistogram(std::ostream& os) const; - void DumpBlockingGcCountRateHistogram(std::ostream& os) const; + void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_); + void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_); // Allocation tracking support // Callers to this function use double-checked locking to ensure safety on allocation_records_ @@ -689,33 +694,33 @@ class Heap { return alloc_tracking_enabled_.LoadRelaxed(); } - void SetAllocTrackingEnabled(bool enabled) EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) { alloc_tracking_enabled_.StoreRelaxed(enabled); } AllocRecordObjectMap* GetAllocationRecords() const - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + REQUIRES(Locks::alloc_tracker_lock_) { return allocation_records_.get(); } void SetAllocationRecords(AllocRecordObjectMap* records) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + REQUIRES(Locks::alloc_tracker_lock_); void VisitAllocationRecords(RootVisitor* visitor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); void SweepAllocationRecords(IsMarkedVisitor* visitor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); void DisallowNewAllocationRecords() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); void AllowNewAllocationRecords() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); private: class ConcurrentGCTask; @@ -726,10 +731,10 @@ class Heap { collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space, space::ContinuousMemMapAllocSpace* source_space, GcCause gc_cause) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); void LogGC(GcCause gc_cause, collector::GarbageCollector* collector); - void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); + void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_); // Create a mem map with a preferred base address. static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, @@ -758,10 +763,10 @@ class Heap { collector_type == kCollectorTypeHomogeneousSpaceCompact; } bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_, !*gc_complete_lock_); accounting::ObjectStack* GetMarkStack() { return mark_stack_.get(); @@ -771,7 +776,8 @@ class Heap { template <bool kInstrumented, typename PreFenceVisitor> mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_); // Handles Allocate()'s slow allocation path with GC involved after // an initial allocation attempt failed. @@ -779,17 +785,17 @@ class Heap { size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated, mirror::Class** klass) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Allocate into a specific space. mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c, size_t bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the // wrong space. - void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_); // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so // that the switch statement is constant optimized in the entrypoints. @@ -798,17 +804,17 @@ class Heap { size_t alloc_size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kGrow> ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size); // Returns true if the address passed in is within the address range of a continuous space. bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Run the finalizers. If timeout is non zero, then we use the VMRuntime version. void RunFinalization(JNIEnv* env, uint64_t timeout); @@ -816,36 +822,34 @@ class Heap { // Blocks the caller until the garbage collector becomes idle and returns the type of GC we // waited for. collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); + REQUIRES(gc_complete_lock_); void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) - LOCKS_EXCLUDED(pending_task_lock_); + REQUIRES(!*pending_task_lock_); void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_); bool IsGCRequestPending() const; // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns // which type of Gc was actually ran. collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause, bool clear_soft_references) - LOCKS_EXCLUDED(gc_complete_lock_, - Locks::heap_bitmap_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_, + !*pending_task_lock_); void PreGcVerification(collector::GarbageCollector* gc) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_); void PreGcVerificationPaused(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); void PrePauseRosAllocVerification(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); void PreSweepingGcVerification(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); void PostGcVerification(collector::GarbageCollector* gc) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_); void PostGcVerificationPaused(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); // Update the watermark for the native allocated bytes based on the current number of native // bytes allocated and the target utilization ratio. @@ -855,7 +859,7 @@ class Heap { collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type); // Create a new alloc space and compact default alloc space to it. - HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact(); + HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_); // Create the main free list malloc space, either a RosAlloc space or DlMalloc space. void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit, @@ -876,10 +880,10 @@ class Heap { size_t GetPercentFree(); static void VerificationCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Swap the allocation stack with the live stack. - void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SwapStacks(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); // Clear cards and update the mod union table. When process_alloc_space_cards is true, // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do @@ -889,15 +893,15 @@ class Heap { // Push an object onto the allocation stack. void PushOnAllocationStack(Thread* self, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); void ClearConcurrentGCRequest(); - void ClearPendingTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); - void ClearPendingCollectorTransition(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); + void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_); + void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_); // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark // sweep GC, false for other GC types. @@ -906,23 +910,23 @@ class Heap { } // Trim the managed and native spaces by releasing unused memory back to the OS. - void TrimSpaces(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); + void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_); // Trim 0 pages at the end of reference tables. void TrimIndirectReferenceTables(Thread* self); void VisitObjectsInternal(ObjectCallback callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); - void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); + void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_); // GC stress mode attempts to do one GC per unique backtrace. void CheckGcStressMode(Thread* self, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_); // All-known continuous spaces, where objects lie within fixed bounds. std::vector<space::ContinuousSpace*> continuous_spaces_; diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h index 95877d13af..d9dfedb464 100644 --- a/runtime/gc/reference_processor.h +++ b/runtime/gc/reference_processor.h @@ -48,39 +48,39 @@ class ReferenceProcessor { explicit ReferenceProcessor(); void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references, gc::collector::GarbageCollector* collector) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - LOCKS_EXCLUDED(Locks::reference_processor_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(!Locks::reference_processor_lock_); // The slow path bool is contained in the reference class object, can only be set once // Only allow setting this with mutators suspended so that we can avoid using a lock in the // GetReferent fast path as an optimization. - void EnableSlowPath() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnableSlowPath() SHARED_REQUIRES(Locks::mutator_lock_); void BroadcastForSlowPath(Thread* self); // Decode the referent, may block if references are being processed. mirror::Object* GetReferent(Thread* self, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_); - void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_); + void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_); void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, collector::GarbageCollector* collector) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void UpdateRoots(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock. bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::reference_processor_lock_, - Locks::reference_queue_finalizer_references_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::reference_processor_lock_, + !Locks::reference_queue_finalizer_references_lock_); private: - bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool SlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_); // Called by ProcessReferences. - void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisableSlowPath(Thread* self) REQUIRES(Locks::reference_processor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // If we are preserving references it means that some dead objects may become live, we use start // and stop preserving to block mutators using GetReferrent from getting access to these // referents. - void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_); - void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_); + void StartPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_); + void StopPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_); // Collector which is clearing references, used by the GetReferent to return referents which are // already marked. collector::GarbageCollector* collector_ GUARDED_BY(Locks::reference_processor_lock_); diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h index 7d9ddf698e..aabac97742 100644 --- a/runtime/gc/reference_queue.h +++ b/runtime/gc/reference_queue.h @@ -22,6 +22,7 @@ #include <vector> #include "atomic.h" +#include "base/mutex.h" #include "base/timing_logger.h" #include "globals.h" #include "jni.h" @@ -53,39 +54,39 @@ class ReferenceQueue { // since it uses a lock to avoid a race between checking for the references presence and adding // it. void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_); // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock // overhead. - void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_); // Enqueue a reference without checking that it is enqueable. - void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnqueuePendingReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_); // Dequeue the first reference (returns list_). - mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_); // Enqueues finalizer references with white referents. White referents are blackened, moved to // the zombie field, and the referent field is cleared. void EnqueueFinalizerReferences(ReferenceQueue* cleared_references, collector::GarbageCollector* collector) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Walks the reference list marking any references subject to the reference clearing policy. // References with a black referent are removed from the list. References with white referents // biased toward saving are blackened and also removed from the list. void ForwardSoftReferences(MarkObjectVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Unlink the reference list clearing references objects with white referents. Cleared references // registered to a reference queue are scheduled for appending by the heap worker thread. void ClearWhiteReferences(ReferenceQueue* cleared_references, collector::GarbageCollector* collector) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t GetLength() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_); + size_t GetLength() const SHARED_REQUIRES(Locks::mutator_lock_); bool IsEmpty() const { return list_ == nullptr; @@ -93,13 +94,13 @@ class ReferenceQueue { void Clear() { list_ = nullptr; } - mirror::Reference* GetList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Reference* GetList() SHARED_REQUIRES(Locks::mutator_lock_) { return list_; } // Visits list_, currently only used for the mark compact GC. void UpdateRoots(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h index 338a41eaac..2263797d4a 100644 --- a/runtime/gc/space/bump_pointer_space-inl.h +++ b/runtime/gc/space/bump_pointer_space-inl.h @@ -87,7 +87,7 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) { } inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t num_bytes = obj->SizeOf(); if (usable_size != nullptr) { *usable_size = RoundUp(num_bytes, kAlignment); diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index df43606485..0e27d8467b 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -51,14 +51,14 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_); mirror::Object* AllocNonvirtual(size_t num_bytes); mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes); // Return the storage space required by obj. size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return AllocationSizeNonvirtual(obj, usable_size); } @@ -72,7 +72,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { } size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Removes the fork time growth limit on capacity, allowing the application to allocate up to the // maximum reserved size of the heap. @@ -99,19 +99,21 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { } // Reset the space to empty. - void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_); + void Clear() OVERRIDE REQUIRES(!block_lock_); void Dump(std::ostream& os) const; - size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_); - size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); - void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_); - void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); - - uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_); + size_t RevokeAllThreadLocalBuffers() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); + void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_); + void AssertAllThreadLocalBuffersAreRevoked() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); + + uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); + uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); bool IsEmpty() const { return Begin() == End(); } @@ -130,10 +132,10 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Return the object which comes after obj, while ensuring alignment. static mirror::Object* GetNextObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Allocate a new TLAB, returns false if the allocation failed. - bool AllocNewTlab(Thread* self, size_t bytes); + bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_); BumpPointerSpace* AsBumpPointerSpace() OVERRIDE { return this; @@ -141,7 +143,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Go through all of the blocks and visit the continuous objects. void Walk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_); accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE; @@ -152,7 +154,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Object alignment within the space. static constexpr size_t kAlignment = 8; @@ -161,13 +163,13 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { BumpPointerSpace(const std::string& name, MemMap* mem_map); // Allocate a raw block of bytes. - uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); - void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); + uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_); + void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_); // The main block is an unbounded block where objects go when there are no other blocks. This // enables us to maintain tightly packed objects when you are not using thread local buffers for // allocation. The main block starts at the space Begin(). - void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_); + void UpdateMainBlock() REQUIRES(block_lock_); uint8_t* growth_end_; AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions. diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h index ab527a42df..eab757a13e 100644 --- a/runtime/gc/space/dlmalloc_space.h +++ b/runtime/gc/space/dlmalloc_space.h @@ -50,11 +50,11 @@ class DlMallocSpace : public MallocSpace { virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE LOCKS_EXCLUDED(lock_); + OVERRIDE REQUIRES(!lock_); // Virtual to allow MemoryToolMallocSpace to intercept. virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE LOCKS_EXCLUDED(lock_) { + OVERRIDE REQUIRES(!lock_) { return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } @@ -64,12 +64,12 @@ class DlMallocSpace : public MallocSpace { } // Virtual to allow MemoryToolMallocSpace to intercept. virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE - LOCKS_EXCLUDED(lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Virtual to allow MemoryToolMallocSpace to intercept. virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE - LOCKS_EXCLUDED(lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!lock_) + SHARED_REQUIRES(Locks::mutator_lock_); size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE { return num_bytes; @@ -86,7 +86,7 @@ class DlMallocSpace : public MallocSpace { // Faster non-virtual allocation path. mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Faster non-virtual allocation size path. size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size); @@ -104,7 +104,7 @@ class DlMallocSpace : public MallocSpace { // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be // in use, indicated by num_bytes equaling zero. - void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); + void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_); // Returns the number of bytes that the space has currently obtained from the system. This is // greater or equal to the amount of live data in the space. @@ -136,7 +136,7 @@ class DlMallocSpace : public MallocSpace { } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace, @@ -147,7 +147,7 @@ class DlMallocSpace : public MallocSpace { mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE { diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index 93ff8aaff7..215c18b8d9 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -44,7 +44,7 @@ class ImageSpace : public MemMapSpace { // used to transfer ownership of the OatFile to the ClassLinker when // it is initialized. static ImageSpace* Create(const char* image, InstructionSet image_isa, std::string* error_msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Reads the image header from the specified image location for the // instruction set image_isa or dies trying. @@ -64,10 +64,10 @@ class ImageSpace : public MemMapSpace { // Releases the OatFile from the ImageSpace so it can be transfer to // the caller, presumably the ClassLinker. OatFile* ReleaseOatFile() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VerifyImageAllocations() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ImageHeader& GetImageHeader() const { return *reinterpret_cast<ImageHeader*>(Begin()); @@ -130,13 +130,13 @@ class ImageSpace : public MemMapSpace { // the OatFile in /data/dalvik-cache if necessary. static ImageSpace* Init(const char* image_filename, const char* image_location, bool validate_oat_file, std::string* error_msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); OatFile* OpenOatFile(const char* image, std::string* error_msg) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool ValidateOatFile(std::string* error_msg) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); friend class Space; diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index 45ed0cd75f..c726998ea2 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -96,7 +96,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { return Begin() <= byte_obj && byte_obj < End(); } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return true if the large object is a zygote large object. Potentially slow. virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0; @@ -130,11 +130,12 @@ class LargeObjectMapSpace : public LargeObjectSpace { // of malloc. static LargeObjectMapSpace* Create(const std::string& name); // Return the storage space required by obj. - size_t AllocationSize(mirror::Object* obj, size_t* usable_size); + size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated); - size_t Free(Thread* self, mirror::Object* ptr); - void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); + size_t* usable_size, size_t* bytes_tl_bulk_allocated) + REQUIRES(!lock_); + size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_); + void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_); // TODO: disabling thread safety analysis as this may be called when we already hold lock_. bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS; @@ -146,8 +147,8 @@ class LargeObjectMapSpace : public LargeObjectSpace { explicit LargeObjectMapSpace(const std::string& name); virtual ~LargeObjectMapSpace() {} - bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE LOCKS_EXCLUDED(lock_); - void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE LOCKS_EXCLUDED(lock_); + bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_); + void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_); // Used to ensure mutual exclusion when the allocation spaces data structures are being modified. mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; @@ -163,12 +164,13 @@ class FreeListSpace FINAL : public LargeObjectSpace { virtual ~FreeListSpace(); static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity); size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE; - size_t Free(Thread* self, mirror::Object* obj) OVERRIDE; - void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); - void Dump(std::ostream& os) const; + size_t* usable_size, size_t* bytes_tl_bulk_allocated) + OVERRIDE REQUIRES(!lock_); + size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_); + void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_); + void Dump(std::ostream& os) const REQUIRES(!lock_); protected: FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end); @@ -186,9 +188,9 @@ class FreeListSpace FINAL : public LargeObjectSpace { return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info)); } // Removes header from the free blocks set by finding the corresponding iterator and erasing it. - void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_); + void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_); bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE; - void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE; + void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_); class SortByPrevFree { public: diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h index 6c689cd890..4e56c4a429 100644 --- a/runtime/gc/space/malloc_space.h +++ b/runtime/gc/space/malloc_space.h @@ -63,9 +63,9 @@ class MallocSpace : public ContinuousMemMapAllocSpace { // amount of the storage space that may be used by obj. virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0; virtual size_t Free(Thread* self, mirror::Object* ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Returns the maximum bytes that could be allocated for the given // size in bulk, that is the maximum value for the @@ -160,8 +160,8 @@ class MallocSpace : public ContinuousMemMapAllocSpace { size_t maximum_size, bool low_memory_mode) = 0; virtual void RegisterRecentFree(mirror::Object* ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(lock_); virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() { return &SweepCallback; @@ -196,7 +196,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace { private: static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(MallocSpace); }; diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h index 64c6f35e1d..fe39e05a19 100644 --- a/runtime/gc/space/memory_tool_malloc_space.h +++ b/runtime/gc/space/memory_tool_malloc_space.h @@ -38,15 +38,15 @@ class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType { size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE; mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_); size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE; size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RegisterRecentFree(mirror::Object* ptr) OVERRIDE { UNUSED(ptr); diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index db005f7558..66fd62cee1 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -138,8 +138,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte return reinterpret_cast<mirror::Object*>(old_top); } -inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { size_t num_bytes = obj->SizeOf(); if (usable_size != nullptr) { if (LIKELY(num_bytes <= kRegionSize)) { diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index 19109f0d59..14e800595c 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -42,29 +42,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Allocate num_bytes, returns null if the space is full. mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE; + size_t* usable_size, size_t* bytes_tl_bulk_allocated) + OVERRIDE REQUIRES(!region_lock_); // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_); // The main allocation routine. template<bool kForEvac> ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, - size_t* bytes_tl_bulk_allocated); + size_t* bytes_tl_bulk_allocated) + REQUIRES(!region_lock_); // Allocate/free large objects (objects that are larger than the region size.) template<bool kForEvac> mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, - size_t* bytes_tl_bulk_allocated); - void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated); + size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_); + void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_); // Return the storage space required by obj. size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) { return AllocationSizeNonvirtual(obj, usable_size); } size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_); size_t Free(Thread*, mirror::Object*) OVERRIDE { UNIMPLEMENTED(FATAL); @@ -83,19 +85,19 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { return nullptr; } - void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_); + void Clear() OVERRIDE REQUIRES(!region_lock_); void Dump(std::ostream& os) const; - void DumpRegions(std::ostream& os); - void DumpNonFreeRegions(std::ostream& os); + void DumpRegions(std::ostream& os) REQUIRES(!region_lock_); + void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_); - size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_); - void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_); - size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); - void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_); - void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); + size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_); + void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_); + size_t RevokeAllThreadLocalBuffers() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_); + void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_); + void AssertAllThreadLocalBuffersAreRevoked() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_); enum class RegionType : uint8_t { kRegionTypeAll, // All types. @@ -112,24 +114,24 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { kRegionStateLargeTail, // Large tail (non-first regions of a large allocation). }; - template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal(); - template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal(); - uint64_t GetBytesAllocated() { + template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_); + template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_); + uint64_t GetBytesAllocated() REQUIRES(!region_lock_) { return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>(); } - uint64_t GetObjectsAllocated() { + uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) { return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>(); } - uint64_t GetBytesAllocatedInFromSpace() { + uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) { return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>(); } - uint64_t GetObjectsAllocatedInFromSpace() { + uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) { return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>(); } - uint64_t GetBytesAllocatedInUnevacFromSpace() { + uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) { return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>(); } - uint64_t GetObjectsAllocatedInUnevacFromSpace() { + uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) { return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>(); } @@ -148,12 +150,12 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Go through all of the blocks and visit the continuous objects. void Walk(ObjectCallback* callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { WalkInternal<false>(callback, arg); } void WalkToSpace(ObjectCallback* callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { WalkInternal<true>(callback, arg); } @@ -161,7 +163,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { return nullptr; } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_); // Object alignment within the space. static constexpr size_t kAlignment = kObjectAlignment; @@ -201,22 +203,22 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { } void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) - LOCKS_EXCLUDED(region_lock_); + REQUIRES(!region_lock_); - size_t FromSpaceSize(); - size_t UnevacFromSpaceSize(); - size_t ToSpaceSize(); - void ClearFromSpace(); + size_t FromSpaceSize() REQUIRES(!region_lock_); + size_t UnevacFromSpaceSize() REQUIRES(!region_lock_); + size_t ToSpaceSize() REQUIRES(!region_lock_); + void ClearFromSpace() REQUIRES(!region_lock_); void AddLiveBytes(mirror::Object* ref, size_t alloc_size) { Region* reg = RefToRegionUnlocked(ref); reg->AddLiveBytes(alloc_size); } - void AssertAllRegionLiveBytesZeroOrCleared(); + void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_); - void RecordAlloc(mirror::Object* ref); - bool AllocNewTlab(Thread* self); + void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_); + bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_); uint32_t Time() { return time_; @@ -476,7 +478,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { friend class RegionSpace; }; - Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) { + Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) { MutexLock mu(Thread::Current(), region_lock_); return RefToRegionLocked(ref); } @@ -492,7 +494,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { return RefToRegionLocked(ref); } - Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) { + Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) { DCHECK(HasAddress(ref)); uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin()); size_t reg_idx = offset / kRegionSize; @@ -504,7 +506,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { } mirror::Object* GetNextObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index 9dc6f31e60..bc1473850c 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -48,7 +48,7 @@ class RosAllocSpace : public MallocSpace { mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE LOCKS_EXCLUDED(lock_); + OVERRIDE REQUIRES(!lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE { return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size, @@ -56,7 +56,7 @@ class RosAllocSpace : public MallocSpace { } mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE REQUIRES(Locks::mutator_lock_) { return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } @@ -64,9 +64,9 @@ class RosAllocSpace : public MallocSpace { return AllocationSizeNonvirtual<true>(obj, usable_size); } size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { @@ -104,7 +104,7 @@ class RosAllocSpace : public MallocSpace { } size_t Trim() OVERRIDE; - void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); + void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_); size_t GetFootprint() OVERRIDE; size_t GetFootprintLimit() OVERRIDE; void SetFootprintLimit(size_t limit) OVERRIDE; @@ -134,7 +134,7 @@ class RosAllocSpace : public MallocSpace { return this; } - void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Verify() REQUIRES(Locks::mutator_lock_) { rosalloc_->Verify(); } @@ -166,11 +166,11 @@ class RosAllocSpace : public MallocSpace { void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), void* arg, bool do_null_callback_at_end) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); void InspectAllRosAllocWithSuspendAll( void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), void* arg, bool do_null_callback_at_end) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); // Underlying rosalloc. allocator::RosAlloc* rosalloc_; diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index 871ebac8a7..fc558cf8e4 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -219,7 +219,7 @@ class AllocSpace { virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } @@ -420,10 +420,9 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { return this; } - bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void BindLiveToMarkBitmap() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_); + void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_); + void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_); // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping. void SwapBitmaps(); diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h index 6e0e0d24c7..4d2db11ac2 100644 --- a/runtime/gc/space/space_test.h +++ b/runtime/gc/space/space_test.h @@ -49,7 +49,7 @@ class SpaceTest : public CommonRuntimeTest { heap->SetSpaceAsDefault(space); } - mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr)); if (byte_array_class_ == nullptr) { @@ -65,7 +65,7 @@ class SpaceTest : public CommonRuntimeTest { mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size, @@ -79,7 +79,7 @@ class SpaceTest : public CommonRuntimeTest { mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size, @@ -91,7 +91,7 @@ class SpaceTest : public CommonRuntimeTest { } void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Note the minimum size, which is the size of a zero-length byte array. EXPECT_GE(size, SizeOfZeroLengthByteArray()); EXPECT_TRUE(byte_array_class != nullptr); diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h index 934a234345..f2889e2301 100644 --- a/runtime/gc/space/zygote_space.h +++ b/runtime/gc/space/zygote_space.h @@ -33,7 +33,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace { static ZygoteSpace* Create(const std::string& name, MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap, accounting::ContinuousSpaceBitmap* mark_bitmap) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void Dump(std::ostream& os) const; @@ -77,7 +77,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace { } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() { diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h index 5f486192f0..e40fa06319 100644 --- a/runtime/gc/task_processor.h +++ b/runtime/gc/task_processor.h @@ -54,17 +54,17 @@ class TaskProcessor { public: TaskProcessor(); virtual ~TaskProcessor(); - void AddTask(Thread* self, HeapTask* task) LOCKS_EXCLUDED(lock_); - HeapTask* GetTask(Thread* self) LOCKS_EXCLUDED(lock_); - void Start(Thread* self) LOCKS_EXCLUDED(lock_); + void AddTask(Thread* self, HeapTask* task) REQUIRES(!*lock_); + HeapTask* GetTask(Thread* self) REQUIRES(!*lock_); + void Start(Thread* self) REQUIRES(!*lock_); // Stop tells the RunAllTasks to finish up the remaining tasks as soon as // possible then return. - void Stop(Thread* self) LOCKS_EXCLUDED(lock_); - void RunAllTasks(Thread* self) LOCKS_EXCLUDED(lock_); - bool IsRunning() const LOCKS_EXCLUDED(lock_); + void Stop(Thread* self) REQUIRES(!*lock_); + void RunAllTasks(Thread* self) REQUIRES(!*lock_); + bool IsRunning() const REQUIRES(!*lock_); void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time) - LOCKS_EXCLUDED(lock_); - Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_); + REQUIRES(!*lock_); + Thread* GetRunningThread() const REQUIRES(!*lock_); private: class CompareByTargetRunTime { diff --git a/runtime/gc_root.h b/runtime/gc_root.h index bb604f04c5..83471e6b96 100644 --- a/runtime/gc_root.h +++ b/runtime/gc_root.h @@ -91,24 +91,24 @@ class RootVisitor { // Single root version, not overridable. ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { VisitRoots(&roots, 1, info); } // Single root version, not overridable. ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (*roots != nullptr) { VisitRoot(roots, info); } } virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; }; // Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't @@ -116,7 +116,7 @@ class RootVisitor { class SingleRootVisitor : public RootVisitor { private: void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { VisitRoot(*roots[i], info); } @@ -124,7 +124,7 @@ class SingleRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { VisitRoot(roots[i]->AsMirrorPtr(), info); } @@ -169,10 +169,10 @@ class GcRoot { public: template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoot(RootVisitor* visitor, const RootInfo& info) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsNull()); mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ }; visitor->VisitRoots(roots, 1u, info); @@ -180,7 +180,7 @@ class GcRoot { } void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsNull()) { VisitRoot(visitor, info); } @@ -195,7 +195,7 @@ class GcRoot { return root_.IsNull(); } - ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_); private: // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a @@ -222,7 +222,7 @@ class BufferedRootVisitor { template <class MirrorType> ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!root.IsNull()) { VisitRoot(root); } @@ -230,27 +230,27 @@ class BufferedRootVisitor { template <class MirrorType> ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!root->IsNull()) { VisitRoot(root); } } template <class MirrorType> - void VisitRoot(GcRoot<MirrorType>& root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitRoot(GcRoot<MirrorType>& root) SHARED_REQUIRES(Locks::mutator_lock_) { VisitRoot(root.AddressWithoutBarrier()); } template <class MirrorType> void VisitRoot(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(buffer_pos_ >= kBufferSize)) { Flush(); } roots_[buffer_pos_++] = root; } - void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Flush() SHARED_REQUIRES(Locks::mutator_lock_) { visitor_->VisitRoots(roots_, buffer_pos_, root_info_); buffer_pos_ = 0; } diff --git a/runtime/handle.h b/runtime/handle.h index d94d87552a..f939ec5018 100644 --- a/runtime/handle.h +++ b/runtime/handle.h @@ -50,19 +50,19 @@ class Handle : public ValueObject { ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) { } - ALWAYS_INLINE T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T& operator*() const SHARED_REQUIRES(Locks::mutator_lock_) { return *Get(); } - ALWAYS_INLINE T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T* operator->() const SHARED_REQUIRES(Locks::mutator_lock_) { return Get(); } - ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T* Get() const SHARED_REQUIRES(Locks::mutator_lock_) { return down_cast<T*>(reference_->AsMirrorPtr()); } - ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE jobject ToJObject() const SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) { // Special case so that we work with NullHandles. return nullptr; @@ -71,12 +71,12 @@ class Handle : public ValueObject { } ALWAYS_INLINE StackReference<mirror::Object>* GetReference() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reference_; } ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reference_; } @@ -108,22 +108,22 @@ class MutableHandle : public Handle<T> { } ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(handle.reference_) { } ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Handle<T>::operator=(handle); return *this; } ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(reference) { } - ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T* Assign(T* reference) SHARED_REQUIRES(Locks::mutator_lock_) { StackReference<mirror::Object>* ref = Handle<T>::GetReference(); T* old = down_cast<T*>(ref->AsMirrorPtr()); ref->Assign(reference); @@ -131,12 +131,12 @@ class MutableHandle : public Handle<T> { } template<typename S> - explicit MutableHandle(const MutableHandle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit MutableHandle(const MutableHandle<S>& handle) SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(handle) { } template<typename S> - explicit MutableHandle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit MutableHandle(StackReference<S>* reference) SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(reference) { } diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index 9a0e52efd3..5ed8ef0ed5 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -60,16 +60,16 @@ class PACKED(4) HandleScope { } ALWAYS_INLINE mirror::Object* GetReference(size_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; @@ -150,14 +150,14 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { ALWAYS_INLINE ~StackHandleScope(); template<class T> - ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_); template<class T> ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); Thread* Self() const { return self_; @@ -165,7 +165,7 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { private: template<class T> - ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_LT(i, kNumReferences); return MutableHandle<T>(&GetReferences()[i]); } @@ -209,7 +209,7 @@ class StackHandleScopeCollection { } template<class T> - MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) { if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) { StackHandleScope<kNumReferencesPerScope>* scope = new StackHandleScope<kNumReferencesPerScope>(self_); diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index 71a69aa5dd..e67ea3fa8f 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -240,7 +240,7 @@ class EndianOutput { } void AddIdList(mirror::ObjectArray<mirror::Object>* values) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const int32_t length = values->GetLength(); for (int32_t i = 0; i < length; ++i) { AddObjectId(values->GetWithoutChecks(i)); @@ -429,8 +429,7 @@ class Hprof : public SingleRootVisitor { } void Dump() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, Locks::alloc_tracker_lock_) { + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !Locks::alloc_tracker_lock_) { { MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); if (Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) { @@ -471,26 +470,26 @@ class Hprof : public SingleRootVisitor { private: static void VisitObjectCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); DCHECK(arg != nullptr); reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj); } void DumpHeapObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpHeapClass(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpHeapArray(mirror::Array* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ProcessHeap(bool header_first) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { // Reset current heap and object count. current_heap_ = HPROF_HEAP_DEFAULT; objects_in_segment_ = 0; @@ -504,7 +503,7 @@ class Hprof : public SingleRootVisitor { } } - void ProcessBody() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ProcessBody() REQUIRES(Locks::mutator_lock_) { Runtime* const runtime = Runtime::Current(); // Walk the roots and the heap. output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime); @@ -517,7 +516,7 @@ class Hprof : public SingleRootVisitor { output_->EndRecord(); } - void ProcessHeader(bool string_first) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ProcessHeader(bool string_first) REQUIRES(Locks::mutator_lock_) { // Write the header. WriteFixedHeader(); // Write the string and class tables, and any stack traces, to the header. @@ -536,7 +535,7 @@ class Hprof : public SingleRootVisitor { output_->EndRecord(); } - void WriteClassTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void WriteClassTable() SHARED_REQUIRES(Locks::mutator_lock_) { for (const auto& p : classes_) { mirror::Class* c = p.first; HprofClassSerialNumber sn = p.second; @@ -585,11 +584,11 @@ class Hprof : public SingleRootVisitor { } void VisitRoot(mirror::Object* obj, const RootInfo& root_info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag, uint32_t thread_serial); - HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) { if (c != nullptr) { auto it = classes_.find(c); if (it == classes_.end()) { @@ -604,7 +603,7 @@ class Hprof : public SingleRootVisitor { } HprofStackTraceSerialNumber LookupStackTraceSerialNumber(const mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto r = allocation_records_.find(obj); if (r == allocation_records_.end()) { return kHprofNullStackTrace; @@ -616,7 +615,7 @@ class Hprof : public SingleRootVisitor { } } - HprofStringId LookupStringId(mirror::String* string) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HprofStringId LookupStringId(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_) { return LookupStringId(string->ToModifiedUtf8()); } @@ -634,7 +633,7 @@ class Hprof : public SingleRootVisitor { return id; } - HprofStringId LookupClassNameId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HprofStringId LookupClassNameId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) { return LookupStringId(PrettyDescriptor(c)); } @@ -662,7 +661,7 @@ class Hprof : public SingleRootVisitor { __ AddU4(static_cast<uint32_t>(nowMs & 0xFFFFFFFF)); } - void WriteStackTraces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void WriteStackTraces() SHARED_REQUIRES(Locks::mutator_lock_) { // Write a dummy stack trace record so the analysis tools don't freak out. output_->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime); __ AddStackTraceSerialNumber(kHprofNullStackTrace); @@ -725,7 +724,7 @@ class Hprof : public SingleRootVisitor { } bool DumpToDdmsBuffered(size_t overall_size ATTRIBUTE_UNUSED, size_t max_length ATTRIBUTE_UNUSED) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { LOG(FATAL) << "Unimplemented"; UNREACHABLE(); // // Send the data off to DDMS. @@ -738,7 +737,7 @@ class Hprof : public SingleRootVisitor { } bool DumpToFile(size_t overall_size, size_t max_length) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { // Where exactly are we writing to? int out_fd; if (fd_ >= 0) { @@ -787,7 +786,7 @@ class Hprof : public SingleRootVisitor { } bool DumpToDdmsDirect(size_t overall_size, size_t max_length, uint32_t chunk_type) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { CHECK(direct_to_ddms_); JDWP::JdwpState* state = Dbg::GetJdwpState(); CHECK(state != nullptr); @@ -818,7 +817,7 @@ class Hprof : public SingleRootVisitor { } void PopulateAllocationTrackingTraces() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::alloc_tracker_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::alloc_tracker_lock_) { gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords(); CHECK(records != nullptr); HprofStackTraceSerialNumber next_trace_sn = kHprofNullStackTrace + 1; diff --git a/runtime/image.h b/runtime/image.h index d856f218af..cc98ba64a1 100644 --- a/runtime/image.h +++ b/runtime/image.h @@ -156,9 +156,9 @@ class PACKED(4) ImageHeader { } mirror::Object* GetImageRoot(ImageRoot image_root) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::ObjectArray<mirror::Object>* GetImageRoots() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RelocateImage(off_t delta); diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 20e42221bb..c9ba6cfada 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -28,14 +28,16 @@ namespace art { +static constexpr bool kDumpStackOnNonLocalReference = false; + template<typename T> class MutatorLockedDumpable { public: explicit MutatorLockedDumpable(T& value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) { + SHARED_REQUIRES(Locks::mutator_lock_) : value_(value) { } - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_) { value_.Dump(os); } @@ -47,7 +49,7 @@ class MutatorLockedDumpable { template<typename T> std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) -// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis +// TODO: should be SHARED_REQUIRES(Locks::mutator_lock_) however annotalysis // currently fails for this. NO_THREAD_SAFETY_ANALYSIS { rhs.Dump(os); @@ -183,7 +185,9 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { if (env->check_jni) { ScopedObjectAccess soa(self); LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread"; - self->Dump(LOG(WARNING)); + if (kDumpStackOnNonLocalReference) { + self->Dump(LOG(WARNING)); + } } return true; } diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h index dea5dfdf90..798b48cc44 100644 --- a/runtime/indirect_reference_table.h +++ b/runtime/indirect_reference_table.h @@ -199,7 +199,7 @@ union IRTSegmentState { static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3; class IrtEntry { public: - void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { ++serial_; if (serial_ == kIRTPrevCount) { serial_ = 0; @@ -228,11 +228,11 @@ static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t), class IrtIterator { public: explicit IrtIterator(IrtEntry* table, size_t i, size_t capacity) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : table_(table), i_(i), capacity_(capacity) { } - IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + IrtIterator& operator++() SHARED_REQUIRES(Locks::mutator_lock_) { ++i_; return *this; } @@ -278,7 +278,7 @@ class IndirectReferenceTable { * failed during expansion). */ IndirectRef Add(uint32_t cookie, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Given an IndirectRef in the table, return the Object it refers to. @@ -286,14 +286,14 @@ class IndirectReferenceTable { * Returns kInvalidIndirectRefObject if iref is invalid. */ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + mirror::Object* Get(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE; // Synchronized get which reads a reference, acquiring a lock if necessary. template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/, IndirectRef iref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Get<kReadBarrierOption>(iref); } @@ -302,7 +302,7 @@ class IndirectReferenceTable { * * Updates an existing indirect reference to point to a new object. */ - void Update(IndirectRef iref, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Update(IndirectRef iref, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); /* * Remove an existing entry. @@ -317,7 +317,7 @@ class IndirectReferenceTable { void AssertEmpty(); - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_); /* * Return the #of entries in the entire table. This includes holes, and @@ -337,7 +337,7 @@ class IndirectReferenceTable { } void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetSegmentState() const { return segment_state_.all; @@ -352,7 +352,7 @@ class IndirectReferenceTable { } // Release pages past the end of the table that may have previously held references. - void Trim() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Trim() SHARED_REQUIRES(Locks::mutator_lock_); private: // Extract the table index from an indirect reference. diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc index c20002bdf9..f376ec0c6d 100644 --- a/runtime/indirect_reference_table_test.cc +++ b/runtime/indirect_reference_table_test.cc @@ -26,7 +26,7 @@ namespace art { class IndirectReferenceTableTest : public CommonRuntimeTest {}; static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream oss; irt->Dump(oss); if (num_objects == 0) { diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index abe9dc24ed..e28d578121 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -49,12 +49,20 @@ constexpr bool kVerboseInstrumentation = false; static constexpr StackVisitor::StackWalkKind kInstrumentationStackWalk = StackVisitor::StackWalkKind::kSkipInlinedFrames; -static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { - Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg); - instrumentation->InstallStubsForClass(klass); - return true; // we visit all classes. -} +class InstallStubsClassVisitor : public ClassVisitor { + public: + explicit InstallStubsClassVisitor(Instrumentation* instrumentation) + : instrumentation_(instrumentation) {} + + bool Visit(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) { + instrumentation_->InstallStubsForClass(klass); + return true; // we visit all classes. + } + + private: + Instrumentation* const instrumentation_; +}; + Instrumentation::Instrumentation() : instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false), @@ -87,7 +95,7 @@ void Instrumentation::InstallStubsForClass(mirror::Class* klass) { } static void UpdateEntrypoints(ArtMethod* method, const void* quick_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* const runtime = Runtime::Current(); jit::Jit* jit = runtime->GetJit(); if (jit != nullptr) { @@ -151,7 +159,7 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) { // Since we may already have done this previously, we need to push new instrumentation frame before // existing instrumentation frames. static void InstrumentationInstallStack(Thread* thread, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { struct InstallStackVisitor FINAL : public StackVisitor { InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc) : StackVisitor(thread_in, context, kInstrumentationStackWalk), @@ -161,7 +169,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg) last_return_pc_(0) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m == nullptr) { if (kVerboseInstrumentation) { @@ -291,7 +299,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg) // Removes the instrumentation exit pc as the return PC for every quick frame. static void InstrumentationRestoreStack(Thread* thread, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { struct RestoreStackVisitor FINAL : public StackVisitor { RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc, Instrumentation* instrumentation) @@ -302,7 +310,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg) instrumentation_stack_(thread_in->GetInstrumentationStack()), frames_removed_(0) {} - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (instrumentation_stack_->size() == 0) { return false; // Stop. } @@ -563,14 +571,16 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir entry_exit_stubs_installed_ = true; interpreter_stubs_installed_ = false; } - runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this); + InstallStubsClassVisitor visitor(this); + runtime->GetClassLinker()->VisitClasses(&visitor); instrumentation_stubs_installed_ = true; MutexLock mu(self, *Locks::thread_list_lock_); runtime->GetThreadList()->ForEach(InstrumentationInstallStack, this); } else { interpreter_stubs_installed_ = false; entry_exit_stubs_installed_ = false; - runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this); + InstallStubsClassVisitor visitor(this); + runtime->GetClassLinker()->VisitClasses(&visitor); // Restore stack only if there is no method currently deoptimized. bool empty; { @@ -931,7 +941,7 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread, static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame, int delta) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta; if (frame_id != instrumentation_frame.frame_id_) { LOG(ERROR) << "Expected frame_id=" << frame_id << " but found " diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h index db8e9c2508..93ff567dc3 100644 --- a/runtime/instrumentation.h +++ b/runtime/instrumentation.h @@ -63,24 +63,24 @@ struct InstrumentationListener { // Call-back for when a method is entered. virtual void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method, - uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when a method is exited. virtual void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when a method is popped due to an exception throw. A method will either cause a // MethodExited call-back or a MethodUnwind call-back when its activation is removed. virtual void MethodUnwind(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when the dex pc moves in a method. virtual void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when we read from a field. virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method, @@ -92,11 +92,11 @@ struct InstrumentationListener { // Call-back when an exception is caught. virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when we get a backward branch. virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; }; // Instrumentation is a catch-all for when extra information is required from the runtime. The @@ -129,90 +129,83 @@ class Instrumentation { // for saying you should have suspended all threads (installing stubs while threads are running // will break). void AddListener(InstrumentationListener* listener, uint32_t events) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); // Removes a listener possibly removing instrumentation stubs. void RemoveListener(InstrumentationListener* listener, uint32_t events) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); // Deoptimization. void EnableDeoptimization() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(deoptimized_methods_lock_); + REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_); void DisableDeoptimization(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(deoptimized_methods_lock_); + REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_); bool AreAllMethodsDeoptimized() const { return interpreter_stubs_installed_; } - bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ShouldNotifyMethodEnterExitEvents() const SHARED_REQUIRES(Locks::mutator_lock_); // Executes everything with interpreter. void DeoptimizeEverything(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); // Executes everything with compiled code (or interpreter if there is no code). void UndeoptimizeEverything(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static // method (except a class initializer) set to the resolution trampoline will be deoptimized only // once its declaring class is initialized. void Deoptimize(ArtMethod* method) - LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_); // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method // (except a class initializer) set to the resolution trampoline will be updated only once its // declaring class is initialized. void Undeoptimize(ArtMethod* method) - LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_); // Indicates whether the method has been deoptimized so it is executed with the interpreter. bool IsDeoptimized(ArtMethod* method) - LOCKS_EXCLUDED(deoptimized_methods_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!deoptimized_methods_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Enable method tracing by installing instrumentation entry/exit stubs or interpreter. void EnableMethodTracing(const char* key, bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter. void DisableMethodTracing(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); InterpreterHandlerTable GetInterpreterHandlerTable() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return interpreter_handler_table_; } - void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_); - void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_); + void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_); + void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_); void InstrumentQuickAllocEntryPointsLocked() - EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_); + REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_, + !Locks::runtime_shutdown_lock_); void UninstrumentQuickAllocEntryPointsLocked() - EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_); - void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); + REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_, + !Locks::runtime_shutdown_lock_); + void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_); // Update the code of a method respecting any installed stubs. void UpdateMethodsCode(ArtMethod* method, const void* quick_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); // Get the quick code for the given method. More efficient than asking the class linker as it // will short-cut to GetCode if instrumentation and static method resolution stubs aren't // installed. const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ForceInterpretOnly() { interpret_only_ = true; @@ -232,39 +225,39 @@ class Instrumentation { return instrumentation_stubs_installed_; } - bool HasMethodEntryListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasMethodEntryListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_method_entry_listeners_; } - bool HasMethodExitListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasMethodExitListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_method_exit_listeners_; } - bool HasMethodUnwindListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasMethodUnwindListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_method_unwind_listeners_; } - bool HasDexPcListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasDexPcListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_dex_pc_listeners_; } - bool HasFieldReadListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasFieldReadListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_field_read_listeners_; } - bool HasFieldWriteListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasFieldWriteListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_field_write_listeners_; } - bool HasExceptionCaughtListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasExceptionCaughtListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_exception_caught_listeners_; } - bool HasBackwardBranchListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasBackwardBranchListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_backward_branch_listeners_; } - bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ || have_field_read_listeners_ || have_field_write_listeners_ || have_exception_caught_listeners_ || have_method_unwind_listeners_; @@ -274,7 +267,7 @@ class Instrumentation { // listeners into executing code and get method enter events for methods already on the stack. void MethodEnterEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasMethodEntryListeners())) { MethodEnterEventImpl(thread, this_object, method, dex_pc); } @@ -284,7 +277,7 @@ class Instrumentation { void MethodExitEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasMethodExitListeners())) { MethodExitEventImpl(thread, this_object, method, dex_pc, return_value); } @@ -293,12 +286,12 @@ class Instrumentation { // Inform listeners that a method has been exited due to an exception. void MethodUnwindEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Inform listeners that the dex pc has moved (only supported by the interpreter). void DexPcMovedEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasDexPcListeners())) { DexPcMovedEventImpl(thread, this_object, method, dex_pc); } @@ -306,7 +299,7 @@ class Instrumentation { // Inform listeners that a backward branch has been taken (only supported by the interpreter). void BackwardBranch(Thread* thread, ArtMethod* method, int32_t offset) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasBackwardBranchListeners())) { BackwardBranchImpl(thread, method, offset); } @@ -316,7 +309,7 @@ class Instrumentation { void FieldReadEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasFieldReadListeners())) { FieldReadEventImpl(thread, this_object, method, dex_pc, field); } @@ -326,7 +319,7 @@ class Instrumentation { void FieldWriteEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasFieldWriteListeners())) { FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value); } @@ -334,30 +327,31 @@ class Instrumentation { // Inform listeners that an exception was caught. void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Called when an instrumented method is entered. The intended link register (lr) is saved so // that returning causes a branch to the method exit stub. Generates method enter events. void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object, ArtMethod* method, uintptr_t lr, bool interpreter_entry) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Called when an instrumented method is exited. Removes the pushed instrumentation frame // returning the intended link register. Generates method exit events. TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc, uint64_t gpr_result, uint64_t fpr_result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); // Pops an instrumentation frame from the current thread and generate an unwind event. void PopMethodForUnwind(Thread* self, bool is_deoptimization) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Call back for configure stubs. - void InstallStubsForClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void InstallStubsForClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!deoptimized_methods_lock_); void InstallStubsForMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); private: InstrumentationLevel GetCurrentInstrumentationLevel() const; @@ -368,11 +362,10 @@ class Instrumentation { // instrumentation level it needs. Therefore the current instrumentation level // becomes the highest instrumentation level required by a client. void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_, - deoptimized_methods_lock_); + REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_, !Locks::thread_list_lock_, + !Locks::classlinker_classes_lock_); - void UpdateInterpreterHandlerTable() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void UpdateInterpreterHandlerTable() REQUIRES(Locks::mutator_lock_) { interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable; } @@ -382,38 +375,36 @@ class Instrumentation { void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void MethodExitEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FieldReadEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Read barrier-aware utility functions for accessing deoptimized_methods_ bool AddDeoptimizedMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_); bool IsDeoptimizedMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); bool RemoveDeoptimizedMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_); ArtMethod* BeginDeoptimizedMethod() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); bool IsDeoptimizedMethodsEmpty() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code? bool instrumentation_stubs_installed_; @@ -508,7 +499,7 @@ struct InstrumentationStackFrame { interpreter_entry_(interpreter_entry) { } - std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* this_object_; ArtMethod* method_; diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc index 85bb8c4197..b49f7e1bfa 100644 --- a/runtime/instrumentation_test.cc +++ b/runtime/instrumentation_test.cc @@ -44,7 +44,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_method_enter_event = true; } @@ -53,7 +53,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED, const JValue& return_value ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_method_exit_event = true; } @@ -61,7 +61,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_method_unwind_event = true; } @@ -69,7 +69,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, uint32_t new_dex_pc ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_dex_pc_moved_event = true; } @@ -78,7 +78,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED, ArtField* field ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_field_read_event = true; } @@ -88,20 +88,20 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio uint32_t dex_pc ATTRIBUTE_UNUSED, ArtField* field ATTRIBUTE_UNUSED, const JValue& field_value ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_field_written_event = true; } void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_exception_caught_event = true; } void BackwardBranch(Thread* thread ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, int32_t dex_pc_offset ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_backward_branch_event = true; } @@ -198,7 +198,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -213,7 +213,7 @@ class InstrumentationTest : public CommonRuntimeTest { void UndeoptimizeMethod(Thread* self, ArtMethod* method, const char* key, bool disable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -227,7 +227,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -241,7 +241,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -255,7 +255,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -266,7 +266,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void DisableMethodTracing(Thread* self, const char* key) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -278,7 +278,7 @@ class InstrumentationTest : public CommonRuntimeTest { private: static bool HasEventListener(const instrumentation::Instrumentation* instr, uint32_t event_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (event_type) { case instrumentation::Instrumentation::kMethodEntered: return instr->HasMethodEntryListeners(); @@ -305,7 +305,7 @@ class InstrumentationTest : public CommonRuntimeTest { static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type, Thread* self, ArtMethod* method, mirror::Object* obj, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (event_type) { case instrumentation::Instrumentation::kMethodEntered: instr->MethodEnterEvent(self, obj, method, dex_pc); diff --git a/runtime/intern_table.h b/runtime/intern_table.h index ef08d74c7f..0be66759ac 100644 --- a/runtime/intern_table.h +++ b/runtime/intern_table.h @@ -58,70 +58,74 @@ class InternTable { // Interns a potentially new string in the 'strong' table. May cause thread suspension. mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - // Only used by image writer. - mirror::String* InternImageString(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Only used by image writer. Special version that may not cause thread suspension since the GC + // can not be running while we are doing image writing. + mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_); // Interns a potentially new string in the 'strong' table. May cause thread suspension. - mirror::String* InternStrong(const char* utf8_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Interns a potentially new string in the 'strong' table. May cause thread suspension. - mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::String* InternStrong(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Interns a potentially new string in the 'weak' table. May cause thread suspension. - mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::String* InternWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); - void SweepInternTableWeaks(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SweepInternTableWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::intern_table_lock_); - bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ContainsWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::intern_table_lock_); // Total number of interned strings. - size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t Size() const REQUIRES(!Locks::intern_table_lock_); // Total number of weakly live interned strings. - size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t StrongSize() const REQUIRES(!Locks::intern_table_lock_); // Total number of strongly live interned strings. - size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t WeakSize() const REQUIRES(!Locks::intern_table_lock_); void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); - void DumpForSigQuit(std::ostream& os) const; + void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_); - void DisallowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void BroadcastForNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnsureNewWeakInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisallowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); + void AllowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); + void EnsureNewInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_); + void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); + void EnsureNewWeakInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_); // Adds all of the resolved image strings from the image space into the intern table. The // advantage of doing this is preventing expensive DexFile::FindStringId calls. void AddImageStringsToTable(gc::space::ImageSpace* image_space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); + // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages. void SwapPostZygoteWithPreZygote() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); // Add an intern table which was serialized to the image. void AddImageInternTable(gc::space::ImageSpace* image_space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); // Read the intern table from memory. The elements aren't copied, the intern hash set data will // point to somewhere within ptr. Only reads the strong interns. - size_t ReadFromMemory(const uint8_t* ptr) LOCKS_EXCLUDED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Write the post zygote intern table to a pointer. Only writes the strong interns since it is // expected that there is no weak interns since this is called from the image writer. - size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t WriteToMemory(uint8_t* ptr) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::intern_table_lock_); // Change the weak root state. May broadcast to waiters. void ChangeWeakRootState(gc::WeakRootState new_state) - LOCKS_EXCLUDED(Locks::intern_table_lock_); + REQUIRES(!Locks::intern_table_lock_); private: class StringHashEquals { @@ -144,39 +148,33 @@ class InternTable { // weak interns and strong interns. class Table { public: - mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::intern_table_lock_); + void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::intern_table_lock_); void Remove(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void SweepWeaks(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); + void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_); + size_t Size() const REQUIRES(Locks::intern_table_lock_); // Read pre zygote table is called from ReadFromMemory which happens during runtime creation // when we load the image intern table. Returns how many bytes were read. size_t ReadIntoPreZygoteTable(const uint8_t* ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in // the post zygote table. Returns how many bytes were written. size_t WriteFromPostZygoteTable(uint8_t* ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); private: typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals, TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet; void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages // caused by modifying the zygote intern table hash table. The pre zygote table are the @@ -188,57 +186,43 @@ class InternTable { // Insert if non null, otherwise return null. mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks) - LOCKS_EXCLUDED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); mirror::String* LookupStrong(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* LookupWeak(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertStrong(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertWeak(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveStrong(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveWeak(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); // Transaction rollback access. mirror::String* LookupStringFromImage(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertStrongFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertWeakFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveStrongFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveWeakFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - friend class Transaction; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); size_t ReadFromMemoryLocked(const uint8_t* ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Change the weak root state. May broadcast to waiters. void ChangeWeakRootStateLocked(gc::WeakRootState new_state) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); // Wait until we can read weak roots. - void WaitUntilAccessible(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void WaitUntilAccessible(Thread* self) + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_); @@ -256,6 +240,9 @@ class InternTable { Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_); // Weak root state, used for concurrent system weak processing and more. gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_); + + friend class Transaction; + DISALLOW_COPY_AND_ASSIGN(InternTable); }; } // namespace art diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc index c987180d40..b60b32d06b 100644 --- a/runtime/intern_table_test.cc +++ b/runtime/intern_table_test.cc @@ -62,7 +62,7 @@ TEST_F(InternTableTest, Size) { class TestPredicate : public IsMarkedVisitor { public: - mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { bool erased = false; for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) { if (*it == s) { diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 26860e7100..6c6232c437 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -28,7 +28,7 @@ namespace interpreter { static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty, Object* receiver, uint32_t* args, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler, // it should be removed and JNI compiled stubs used instead. ScopedObjectAccessUnchecked soa(self); @@ -240,23 +240,23 @@ JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) UNREACHABLE(); } // Explicit definitions of ExecuteGotoImpl. -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); #endif static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) { @@ -395,7 +395,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive } void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JValue value; // Set value to last known result in case the shadow frame chain is empty. value.SetJ(ret_val->GetJ()); diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h index 446c5bb4a5..61140a24cf 100644 --- a/runtime/interpreter/interpreter.h +++ b/runtime/interpreter/interpreter.h @@ -35,26 +35,26 @@ namespace interpreter { // Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array. extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace interpreter extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace art diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 0980ea1bc2..9de9e8ada2 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -192,7 +192,7 @@ EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot); // iget-object-q template<Primitive::Type field_type> static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JValue field_value; switch (field_type) { case Primitive::kPrimBoolean: @@ -450,7 +450,7 @@ void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame) // Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame. static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame, size_t dest_reg, size_t src_reg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Uint required, so that sign extension does not make this wrong on 64b systems uint32_t src_value = shadow_frame.GetVReg(src_reg); mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg); @@ -482,7 +482,7 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args) { } // Separate declaration is required solely for the attributes. -template<bool is_range, bool do_assignability_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<bool is_range, bool do_assignability_check> SHARED_REQUIRES(Locks::mutator_lock_) static inline bool DoCallCommon(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame, @@ -491,7 +491,7 @@ static inline bool DoCallCommon(ArtMethod* called_method, uint32_t arg[Instruction::kMaxVarArgRegs], uint32_t vregC) ALWAYS_INLINE; -SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +SHARED_REQUIRES(Locks::mutator_lock_) static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) ALWAYS_INLINE; static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) { @@ -834,7 +834,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame, return true; } -// TODO fix thread analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). +// TODO fix thread analysis: should be SHARED_REQUIRES(Locks::mutator_lock_). template<typename T> static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count) NO_THREAD_SAFETY_ANALYSIS { @@ -845,7 +845,7 @@ static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* arra } void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(Runtime::Current()->IsActiveTransaction()); DCHECK(array != nullptr); DCHECK_LE(count, array->GetLength()); @@ -884,7 +884,7 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) // Explicit DoCall template function declarations. #define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \ ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ @@ -897,7 +897,7 @@ EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true); // Explicit DoLambdaCall template function declarations. #define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \ ShadowFrame& shadow_frame, \ const Instruction* inst, \ @@ -911,7 +911,7 @@ EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, true); // Explicit DoFilledNewArray template function declarations. #define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \ const ShadowFrame& shadow_frame, \ Thread* self, JValue* result) diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 9babb18325..a6cccef617 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -72,7 +72,7 @@ extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); void ThrowNullPointerExceptionFromInterpreter() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS { ref->MonitorEnter(self); @@ -84,13 +84,13 @@ static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANA void AbortTransactionF(Thread* self, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AbortTransactionV(Thread* self, const char* fmt, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Invokes the given method. This is part of the invocation support and is used by DoInvoke and // DoInvokeVirtualQuick functions. @@ -114,7 +114,7 @@ bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_fr // // If the validation fails, return false and raise an exception. static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { bool success = false; if (UNLIKELY(called_method == nullptr)) { @@ -191,7 +191,7 @@ static inline bool DoCreateLambda(Thread* self, ShadowFrame& shadow_frame, // (Exceptions are thrown by creating a new exception and then being put in the thread TLS) static inline ArtMethod* ReadLambdaClosureFromVRegsOrThrow(ShadowFrame& shadow_frame, uint32_t vreg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO(iam): Introduce a closure abstraction that will contain the captured variables // instead of just an ArtMethod. // This is temporarily using 2 vregs because a native ArtMethod can be up to 64-bit, @@ -306,32 +306,32 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, // Returns true on success, otherwise throws an exception and returns false. template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check> bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, - uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_); // Handles iget-quick, iget-wide-quick and iget-object-quick instructions. // Returns true on success, otherwise throws an exception and returns false. template<Primitive::Type field_type> bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Handles iput-XXX and sput-XXX instructions. // Returns true on success, otherwise throws an exception and returns false. template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check, bool transaction_active> bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst, - uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_); // Handles iput-quick, iput-wide-quick and iput-object-quick instructions. // Returns true on success, otherwise throws an exception and returns false. template<Primitive::Type field_type, bool transaction_active> bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the // java.lang.String class is initialized. static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Class* java_lang_string_class = String::GetJavaLangString(); if (UNLIKELY(!java_lang_string_class->IsInitialized())) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); @@ -358,7 +358,7 @@ static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uin // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -376,7 +376,7 @@ static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg, // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -394,7 +394,7 @@ static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg, // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const int64_t kMinLong = std::numeric_limits<int64_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -412,7 +412,7 @@ static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg, // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const int64_t kMinLong = std::numeric_limits<int64_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -436,7 +436,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame, // Returns the branch offset to the next instruction to execute. static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH); const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t(); int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data)); @@ -464,7 +464,7 @@ static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& // Returns the branch offset to the next instruction to execute. static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH); const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t(); int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data)); @@ -497,7 +497,7 @@ static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& template <bool _do_check> static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, - uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_) { /* * box-lambda vA, vB /// opcode 0xf8, format 22x * - vA is the target register where the Object representation of the closure will be stored into @@ -529,7 +529,7 @@ static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const In return true; } -template <bool _do_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template <bool _do_check> SHARED_REQUIRES(Locks::mutator_lock_) static inline bool DoUnboxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, @@ -553,7 +553,7 @@ static inline bool DoUnboxLambda(Thread* self, ArtMethod* unboxed_closure = nullptr; // Raise an exception if unboxing fails. if (!Runtime::Current()->GetLambdaBoxTable()->UnboxLambda(boxed_closure_object, - &unboxed_closure)) { + outof(unboxed_closure))) { CHECK(self->IsExceptionPending()); return false; } @@ -565,15 +565,15 @@ static inline bool DoUnboxLambda(Thread* self, uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame, uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame) __attribute__((cold)) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst, const uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { constexpr bool kTracing = false; if (kTracing) { #define TRACE_LOG std::cerr @@ -605,7 +605,7 @@ static inline bool IsBackwardBranch(int32_t branch_offset) { // Explicitly instantiate all DoInvoke functions. #define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ JValue* result) @@ -626,7 +626,7 @@ EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface) // invoke-interface/range. // Explicitly instantiate all DoInvokeVirtualQuick functions. #define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ JValue* result) @@ -637,7 +637,7 @@ EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick- // Explicitly instantiate all DoCreateLambda functions. #define EXPLICIT_DO_CREATE_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoCreateLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, \ const Instruction* inst) @@ -647,7 +647,7 @@ EXPLICIT_DO_CREATE_LAMBDA_DECL(true); // create-lambda // Explicitly instantiate all DoInvokeLambda functions. #define EXPLICIT_DO_INVOKE_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoInvokeLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \ uint16_t inst_data, JValue* result); @@ -657,7 +657,7 @@ EXPLICIT_DO_INVOKE_LAMBDA_DECL(true); // invoke-lambda // Explicitly instantiate all DoBoxLambda functions. #define EXPLICIT_DO_BOX_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoBoxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \ uint16_t inst_data); @@ -667,7 +667,7 @@ EXPLICIT_DO_BOX_LAMBDA_DECL(true); // box-lambda // Explicitly instantiate all DoUnBoxLambda functions. #define EXPLICIT_DO_UNBOX_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoUnboxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \ uint16_t inst_data); diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index ec923b6eb2..7027cbfc52 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -2536,16 +2536,16 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF } // NOLINT(readability/fn_size) // Explicit definitions of ExecuteGotoImpl. -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 78090bbe0c..544f7886e9 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -2283,16 +2283,16 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, } // NOLINT(readability/fn_size) // Explicit definitions of ExecuteSwitchImpl. -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index 43e24faed3..22701ac7fd 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -46,7 +46,7 @@ namespace interpreter { static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) { va_list args; @@ -69,7 +69,7 @@ static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> class Handle<mirror::ClassLoader> class_loader, JValue* result, const std::string& method_name, bool initialize_class, bool abort_if_not_found) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(className.Get() != nullptr); std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str())); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); @@ -99,7 +99,7 @@ static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> class // actually the transaction abort exception. This must not be wrapped, as it signals an // initialization abort. static void CheckExceptionGenerateClassNotFound(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (self->IsExceptionPending()) { // If it is not the transaction abort exception, wrap it. std::string type(PrettyTypeOf(self->GetException())); @@ -111,7 +111,7 @@ static void CheckExceptionGenerateClassNotFound(Thread* self) } static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* param = shadow_frame->GetVRegReference(arg_offset); if (param == nullptr) { AbortTransactionOrFail(self, "Null-pointer in Class.forName."); @@ -294,7 +294,7 @@ static void PrimitiveArrayCopy(Thread* self, mirror::Array* src_array, int32_t src_pos, mirror::Array* dst_array, int32_t dst_pos, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) { AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.", PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(), @@ -490,7 +490,7 @@ void UnstartedRuntime::UnstartedDoubleDoubleToRawLongBits( } static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile* dex_file = dex_cache->GetDexFile(); if (dex_file == nullptr) { return nullptr; @@ -601,7 +601,7 @@ void UnstartedRuntime::UnstartedMemoryPeekLong( static void UnstartedMemoryPeekArray( Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { int64_t address_long = shadow_frame->GetVRegLong(arg_offset); mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2); if (obj == nullptr) { @@ -840,7 +840,7 @@ void UnstartedRuntime::UnstartedStringFastSubstring( // This allows getting the char array for new style of String objects during compilation. void UnstartedRuntime::UnstartedStringToCharArray( Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString(); if (string == nullptr) { AbortTransactionOrFail(self, "String.charAt with null object"); diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h index a357d5fa18..03d7026ef7 100644 --- a/runtime/interpreter/unstarted_runtime.h +++ b/runtime/interpreter/unstarted_runtime.h @@ -52,14 +52,14 @@ class UnstartedRuntime { ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void Jni(Thread* self, ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: // Methods that intercept available libcore implementations. @@ -68,7 +68,7 @@ class UnstartedRuntime { ShadowFrame* shadow_frame, \ JValue* result, \ size_t arg_offset) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); #include "unstarted_runtime_list.h" UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT) #undef UNSTARTED_RUNTIME_DIRECT_LIST @@ -82,7 +82,7 @@ class UnstartedRuntime { mirror::Object* receiver, \ uint32_t* args, \ JValue* result) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); #include "unstarted_runtime_list.h" UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI) #undef UNSTARTED_RUNTIME_DIRECT_LIST diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index 4b672e06f4..a1ae2aab9c 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -42,7 +42,7 @@ class UnstartedRuntimeTest : public CommonRuntimeTest { ShadowFrame* shadow_frame, \ JValue* result, \ size_t arg_offset) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \ } #include "unstarted_runtime_list.h" @@ -58,7 +58,7 @@ class UnstartedRuntimeTest : public CommonRuntimeTest { mirror::Object* receiver, \ uint32_t* args, \ JValue* result) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \ } #include "unstarted_runtime_list.h" diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index 36e3aa3b58..9d41018c1b 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -87,7 +87,7 @@ class SharedLibrary { * If the call has not yet finished in another thread, wait for it. */ bool CheckOnLoadResult() - LOCKS_EXCLUDED(jni_on_load_lock_) { + REQUIRES(!jni_on_load_lock_) { Thread* self = Thread::Current(); bool okay; { @@ -112,7 +112,7 @@ class SharedLibrary { return okay; } - void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) { + void SetResult(bool result) REQUIRES(!jni_on_load_lock_) { Thread* self = Thread::Current(); MutexLock mu(self, jni_on_load_lock_); @@ -210,8 +210,8 @@ class Libraries { // See section 11.3 "Linking Native Methods" of the JNI spec. void* FindNativeMethod(ArtMethod* m, std::string& detail) - EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::jni_libraries_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { std::string jni_short_name(JniShortName(m)); std::string jni_long_name(JniLongName(m)); const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader(); diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h index 97fbbc5d43..d70fc47c61 100644 --- a/runtime/java_vm_ext.h +++ b/runtime/java_vm_ext.h @@ -77,7 +77,7 @@ class JavaVMExt : public JavaVM { // such as NewByteArray. // If -verbose:third-party-jni is on, we want to log any JNI function calls // made by a third-party native method. - bool ShouldTrace(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ShouldTrace(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); /** * Loads the given shared library. 'path' is an absolute pathname. @@ -93,56 +93,57 @@ class JavaVMExt : public JavaVM { * using dlsym(3) on every native library that's been loaded so far. */ void* FindCodeForNativeMethod(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_); + REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_); bool SetCheckJniEnabled(bool enabled); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!globals_lock_); - void DisallowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnsureNewWeakGlobalsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void BroadcastForNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); + void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); + void EnsureNewWeakGlobalsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!weak_globals_lock_); + void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!weak_globals_lock_); jobject AddGlobalRef(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_); jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); - void DeleteGlobalRef(Thread* self, jobject obj); + void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_); - void DeleteWeakGlobalRef(Thread* self, jweak obj); + void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_); void SweepJniWeakGlobals(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(globals_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_); mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); void UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(weak_globals_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); const JNIInvokeInterface* GetUncheckedFunctions() const { return unchecked_functions_; } - void TrimGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(globals_lock_); + void TrimGlobals() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!globals_lock_); private: Runtime* const runtime_; diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h index 7c48985dfe..f5ac9d0241 100644 --- a/runtime/jdwp/jdwp.h +++ b/runtime/jdwp/jdwp.h @@ -88,7 +88,7 @@ struct JdwpLocation { uint64_t dex_pc; }; std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs); bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs); @@ -130,7 +130,7 @@ struct JdwpState { * Returns a newly-allocated JdwpState struct on success, or nullptr on failure. */ static JdwpState* Create(const JdwpOptions* options) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; ~JdwpState(); @@ -155,15 +155,15 @@ struct JdwpState { // thread (command handler) so no event thread posts an event while // it processes a command. This must be called only from the debugger // thread. - void AcquireJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_); - void ReleaseJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_); + void AcquireJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_); + void ReleaseJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_); // Acquires/releases the JDWP synchronization token for the event thread // so no other thread (debugger thread or event thread) interleaves with // it when posting an event. This must NOT be called from the debugger // thread, only event thread. - void AcquireJdwpTokenForEvent(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_); - void ReleaseJdwpTokenForEvent() LOCKS_EXCLUDED(jdwp_token_lock_); + void AcquireJdwpTokenForEvent(ObjectId threadId) REQUIRES(!jdwp_token_lock_); + void ReleaseJdwpTokenForEvent() REQUIRES(!jdwp_token_lock_); /* * These notify the debug code that something interesting has happened. This @@ -183,7 +183,7 @@ struct JdwpState { * The VM has finished initializing. Only called when the debugger is * connected at the time initialization completes. */ - void PostVMStart() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PostVMStart() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_); /* * A location of interest has been reached. This is used for breakpoints, @@ -199,8 +199,7 @@ struct JdwpState { */ void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags, const JValue* returnValue) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * A field of interest has been accessed or modified. This is used for field access and field @@ -211,8 +210,7 @@ struct JdwpState { */ void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr, const JValue* fieldValue, bool is_modification) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * An exception has been thrown. @@ -221,22 +219,19 @@ struct JdwpState { */ void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object, const EventLocation* pCatchLoc, mirror::Object* thisPtr) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * A thread has started or stopped. */ void PostThreadChange(Thread* thread, bool start) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * Class has been prepared. */ void PostClassPrepare(mirror::Class* klass) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * The VM is about to stop. @@ -244,7 +239,7 @@ struct JdwpState { bool PostVMDeath(); // Called if/when we realize we're talking to DDMS. - void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void NotifyDdmsActive() SHARED_REQUIRES(Locks::mutator_lock_); void SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size, uint8_t* out_header); @@ -253,23 +248,23 @@ struct JdwpState { * Send up a chunk of DDM data. */ void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool HandlePacket(); + bool HandlePacket() REQUIRES(!shutdown_lock_, !jdwp_token_lock_); void SendRequest(ExpandBuf* pReq); void ResetState() - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); /* atomic ops to get next serial number */ uint32_t NextRequestSerial(); uint32_t NextEventSerial(); void Run() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_, + !attach_lock_, !event_list_lock_); /* * Register an event by adding it to the event list. @@ -278,48 +273,45 @@ struct JdwpState { * may discard its pointer after calling this. */ JdwpError RegisterEvent(JdwpEvent* pEvent) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); /* * Unregister an event, given the requestId. */ void UnregisterEventById(uint32_t requestId) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); /* * Unregister all events. */ void UnregisterAll() - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); private: explicit JdwpState(const JdwpOptions* options); - size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply); + size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply) + REQUIRES(!jdwp_token_lock_); bool InvokeInProgress(); bool IsConnected(); void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy, ObjectId threadId) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_); void CleanupMatchList(const std::vector<JdwpEvent*>& match_list) - EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void EventFinish(ExpandBuf* pReq); bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket, std::vector<JdwpEvent*>* match_list) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket, std::vector<JdwpEvent*>* match_list) - EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void UnregisterEvent(JdwpEvent* pEvent) - EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov); /* @@ -351,8 +343,8 @@ struct JdwpState { * events at the same time, so we grab a mutex in the SetWaitForJdwpToken * call, and release it in the ClearWaitForJdwpToken call. */ - void SetWaitForJdwpToken(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_); - void ClearWaitForJdwpToken() LOCKS_EXCLUDED(jdwp_token_lock_); + void SetWaitForJdwpToken(ObjectId threadId) REQUIRES(!jdwp_token_lock_); + void ClearWaitForJdwpToken() REQUIRES(!jdwp_token_lock_); public: // TODO: fix privacy const JdwpOptions* options_; @@ -415,9 +407,9 @@ struct JdwpState { bool processing_request_ GUARDED_BY(shutdown_lock_); }; -std::string DescribeField(const FieldId& field_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -std::string DescribeMethod(const MethodId& method_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +std::string DescribeField(const FieldId& field_id) SHARED_REQUIRES(Locks::mutator_lock_); +std::string DescribeMethod(const MethodId& method_id) SHARED_REQUIRES(Locks::mutator_lock_); +std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_REQUIRES(Locks::mutator_lock_); class Request { public: @@ -433,9 +425,9 @@ class Request { uint32_t ReadUnsigned32(const char* what); - FieldId ReadFieldId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + FieldId ReadFieldId() SHARED_REQUIRES(Locks::mutator_lock_); - MethodId ReadMethodId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MethodId ReadMethodId() SHARED_REQUIRES(Locks::mutator_lock_); ObjectId ReadObjectId(const char* specific_kind); @@ -447,7 +439,7 @@ class Request { ObjectId ReadThreadGroupId(); - RefTypeId ReadRefTypeId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + RefTypeId ReadRefTypeId() SHARED_REQUIRES(Locks::mutator_lock_); FrameId ReadFrameId(); @@ -461,7 +453,7 @@ class Request { JdwpTypeTag ReadTypeTag(); - JdwpLocation ReadLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + JdwpLocation ReadLocation() SHARED_REQUIRES(Locks::mutator_lock_); JdwpModKind ReadModKind(); diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc index adc2912e58..51952c4923 100644 --- a/runtime/jdwp/jdwp_adb.cc +++ b/runtime/jdwp/jdwp_adb.cc @@ -24,7 +24,7 @@ #include "base/stringprintf.h" #include "jdwp/jdwp_priv.h" -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/sockets.h" #endif @@ -224,7 +224,7 @@ bool JdwpAdbState::Accept() { */ int ret = connect(control_sock_, &control_addr_.controlAddrPlain, control_addr_len_); if (!ret) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ if (!socket_peer_is_trusted(control_sock_)) { if (shutdown(control_sock_, SHUT_RDWR)) { PLOG(ERROR) << "trouble shutting down socket"; diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index 14f097f72a..5d21f1716e 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -447,7 +447,7 @@ static bool PatternMatch(const char* pattern, const std::string& target) { * need to do this even if later mods cause us to ignore the event. */ static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JdwpEventMod* pMod = pEvent->mods; for (int i = pEvent->modCount; i > 0; i--, pMod++) { @@ -784,7 +784,7 @@ void JdwpState::PostVMStart() { static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list, ObjectId thread_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0, e = match_list.size(); i < e; ++i) { JdwpEvent* pEvent = match_list[i]; VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind @@ -800,7 +800,7 @@ static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list, static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location, JDWP::JdwpLocation* jdwp_location) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(event_location != nullptr); DCHECK(jdwp_location != nullptr); Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc); diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index d4e2656b7e..f449406d19 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -53,7 +53,7 @@ std::string DescribeRefTypeId(const RefTypeId& ref_type_id) { } static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint8_t tag; JdwpError rc = Dbg::GetObjectTag(object_id, &tag); if (rc == ERR_NONE) { @@ -64,7 +64,7 @@ static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id) } static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<ObjectId>& objects) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAdd4BE(reply, objects.size()); for (size_t i = 0; i < objects.size(); ++i) { JdwpError rc = WriteTaggedObject(reply, objects[i]); @@ -84,7 +84,7 @@ static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<Objec static JdwpError RequestInvoke(JdwpState*, Request* request, ObjectId thread_id, ObjectId object_id, RefTypeId class_id, MethodId method_id, bool is_constructor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(!is_constructor || object_id != 0); int32_t arg_count = request->ReadSigned32("argument count"); @@ -123,7 +123,7 @@ static JdwpError RequestInvoke(JdwpState*, Request* request, } static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Text information on runtime version. std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion())); expandBufAddUtf8String(pReply, version); @@ -147,7 +147,7 @@ static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply) * been loaded by multiple class loaders. */ static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string classDescriptor(request->ReadUtf8String()); std::vector<RefTypeId> ids; @@ -179,7 +179,7 @@ static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* * to be suspended, and that violates some JDWP expectations. */ static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::vector<ObjectId> thread_ids; Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids); @@ -195,7 +195,7 @@ static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply) * List all thread groups that do not have a parent. */ static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { /* * TODO: maintain a list of parentless thread groups in the VM. * @@ -214,7 +214,7 @@ static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply * Respond with the sizes of the basic debugger types. */ static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAdd4BE(pReply, sizeof(FieldId)); expandBufAdd4BE(pReply, sizeof(MethodId)); expandBufAdd4BE(pReply, sizeof(ObjectId)); @@ -224,7 +224,7 @@ static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply) } static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::Dispose(); return ERR_NONE; } @@ -236,7 +236,7 @@ static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*) * This needs to increment the "suspend count" on all threads. */ static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension); Dbg::SuspendVM(); @@ -248,13 +248,13 @@ static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*) * Resume execution. Decrements the "suspend count" of all threads. */ static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::ResumeVM(); return ERR_NONE; } static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t exit_status = request->ReadUnsigned32("exit_status"); state->ExitAfterReplying(exit_status); return ERR_NONE; @@ -267,7 +267,7 @@ static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*) * string "java.util.Arrays".) */ static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string str(request->ReadUtf8String()); ObjectId string_id; JdwpError status = Dbg::CreateString(str, &string_id); @@ -279,7 +279,7 @@ static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply } static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAddUtf8String(pReply, "/"); std::vector<std::string> class_path; @@ -300,7 +300,7 @@ static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply) } static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t object_count = request->ReadUnsigned32("object_count"); for (size_t i = 0; i < object_count; ++i) { ObjectId object_id = request->ReadObjectId(); @@ -311,7 +311,7 @@ static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*) } static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAdd1(reply, true); // canWatchFieldModification expandBufAdd1(reply, true); // canWatchFieldAccess expandBufAdd1(reply, true); // canGetBytecodes @@ -323,7 +323,7 @@ static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply) } static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // The first few capabilities are the same as those reported by the older call. VM_Capabilities(nullptr, request, reply); @@ -350,7 +350,7 @@ static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* rep } static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::vector<JDWP::RefTypeId> classes; Dbg::GetClassList(&classes); @@ -381,17 +381,17 @@ static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status } static JdwpError VM_AllClasses(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, false); } static JdwpError VM_AllClassesWithGeneric(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, true); } static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { int32_t class_count = request->ReadSigned32("class count"); if (class_count < 0) { return ERR_ILLEGAL_ARGUMENT; @@ -415,7 +415,7 @@ static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pRep } static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::GetModifiers(refTypeId, pReply); } @@ -424,7 +424,7 @@ static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply) * Get values from static fields in a reference type. */ static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); int32_t field_count = request->ReadSigned32("field count"); expandBufAdd4BE(pReply, field_count); @@ -442,7 +442,7 @@ static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Get the name of the source file in which a reference type was declared. */ static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); std::string source_file; JdwpError status = Dbg::GetSourceFile(refTypeId, &source_file); @@ -457,7 +457,7 @@ static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply) * Return the current status of the reference type. */ static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); JDWP::JdwpTypeTag type_tag; uint32_t class_status; @@ -473,7 +473,7 @@ static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply) * Return interfaces implemented directly by this class. */ static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredInterfaces(refTypeId, pReply); } @@ -482,7 +482,7 @@ static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply) * Return the class object corresponding to this type. */ static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); ObjectId class_object_id; JdwpError status = Dbg::GetClassObject(refTypeId, &class_object_id); @@ -500,13 +500,13 @@ static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply) * JDB seems interested, but DEX files don't currently support this. */ static JdwpError RT_SourceDebugExtension(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { /* referenceTypeId in, string out */ return ERR_ABSENT_INFORMATION; } static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); std::string signature; @@ -522,12 +522,12 @@ static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, b } static JdwpError RT_Signature(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return RT_Signature(state, request, pReply, false); } static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return RT_Signature(state, request, pReply, true); } @@ -536,7 +536,7 @@ static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, Exp * reference type, or null if it was loaded by the system loader. */ static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::GetClassLoader(refTypeId, pReply); } @@ -546,14 +546,14 @@ static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply) * fields declared by a class. */ static JdwpError RT_FieldsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredFields(refTypeId, true, pReply); } // Obsolete equivalent of FieldsWithGeneric, without the generic type information. static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredFields(refTypeId, false, pReply); } @@ -563,20 +563,20 @@ static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply) * methods declared by a class. */ static JdwpError RT_MethodsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredMethods(refTypeId, true, pReply); } // Obsolete equivalent of MethodsWithGeneric, without the generic type information. static JdwpError RT_Methods(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredMethods(refTypeId, false, pReply); } static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); int32_t max_count = request->ReadSigned32("max count"); if (max_count < 0) { @@ -596,7 +596,7 @@ static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply) * Return the immediate superclass of a class. */ static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); RefTypeId superClassId; JdwpError status = Dbg::GetSuperclass(class_id, &superClassId); @@ -611,7 +611,7 @@ static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply) * Set static class values. */ static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); int32_t values_count = request->ReadSigned32("values count"); @@ -641,7 +641,7 @@ static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*) */ static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); ObjectId thread_id = request->ReadThreadId(); MethodId method_id = request->ReadMethodId(); @@ -658,7 +658,7 @@ static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, */ static JdwpError CT_NewInstance(JdwpState* state, Request* request, ExpandBuf* pReply ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); ObjectId thread_id = request->ReadThreadId(); MethodId method_id = request->ReadMethodId(); @@ -675,7 +675,7 @@ static JdwpError CT_NewInstance(JdwpState* state, Request* request, * Create a new array object of the requested type and length. */ static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId arrayTypeId = request->ReadRefTypeId(); int32_t length = request->ReadSigned32("length"); @@ -693,7 +693,7 @@ static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply) * Return line number information for the method, if present. */ static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); MethodId method_id = request->ReadMethodId(); @@ -704,7 +704,7 @@ static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply) static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply, bool generic) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); MethodId method_id = request->ReadMethodId(); @@ -717,17 +717,17 @@ static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply } static JdwpError M_VariableTable(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return M_VariableTable(state, request, pReply, false); } static JdwpError M_VariableTableWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return M_VariableTable(state, request, pReply, true); } static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); MethodId method_id = request->ReadMethodId(); @@ -753,7 +753,7 @@ static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply) * passed in here. */ static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::GetReferenceType(object_id, pReply); } @@ -762,7 +762,7 @@ static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pRepl * Get values from the fields of an object. */ static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); int32_t field_count = request->ReadSigned32("field count"); @@ -782,7 +782,7 @@ static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Set values in the fields of an object. */ static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); int32_t field_count = request->ReadSigned32("field count"); @@ -804,7 +804,7 @@ static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*) } static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::GetMonitorInfo(object_id, reply); } @@ -822,7 +822,7 @@ static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply) */ static JdwpError OR_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); ObjectId thread_id = request->ReadThreadId(); RefTypeId class_id = request->ReadRefTypeId(); @@ -832,19 +832,19 @@ static JdwpError OR_InvokeMethod(JdwpState* state, Request* request, } static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::DisableCollection(object_id); } static JdwpError OR_EnableCollection(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::EnableCollection(object_id); } static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); bool is_collected; JdwpError rc = Dbg::IsCollected(object_id, &is_collected); @@ -853,7 +853,7 @@ static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply) } static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); int32_t max_count = request->ReadSigned32("max count"); if (max_count < 0) { @@ -873,7 +873,7 @@ static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* re * Return the string value in a string object. */ static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId stringObject = request->ReadObjectId(); std::string str; JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str); @@ -892,7 +892,7 @@ static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply) * Return a thread's name. */ static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); std::string name; @@ -913,7 +913,7 @@ static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply) * resume it; only the JDI is allowed to resume it. */ static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); if (thread_id == Dbg::GetThreadSelfId()) { @@ -932,7 +932,7 @@ static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*) * Resume the specified thread. */ static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); if (thread_id == Dbg::GetThreadSelfId()) { @@ -948,7 +948,7 @@ static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*) * Return status of specified thread. */ static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); JDWP::JdwpThreadStatus threadStatus; @@ -970,7 +970,7 @@ static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply) * Return the thread group that the specified thread is a member of. */ static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); return Dbg::GetThreadGroup(thread_id, pReply); } @@ -982,7 +982,7 @@ static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply) * be THREAD_NOT_SUSPENDED. */ static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); uint32_t start_frame = request->ReadUnsigned32("start frame"); uint32_t length = request->ReadUnsigned32("length"); @@ -1014,7 +1014,7 @@ static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply) * Returns the #of frames on the specified thread, which must be suspended. */ static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); size_t frame_count; @@ -1028,7 +1028,7 @@ static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply) } static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_stack_depths) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); std::vector<ObjectId> monitors; @@ -1052,17 +1052,17 @@ static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_ } static JdwpError TR_OwnedMonitors(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return TR_OwnedMonitors(request, reply, false); } static JdwpError TR_OwnedMonitorsStackDepthInfo(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return TR_OwnedMonitors(request, reply, true); } static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); ObjectId contended_monitor; @@ -1074,7 +1074,7 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, Expand } static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(reply); ObjectId thread_id = request->ReadThreadId(); return Dbg::Interrupt(thread_id); @@ -1087,7 +1087,7 @@ static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply) * its suspend count recently.) */ static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); return Dbg::GetThreadDebugSuspendCount(thread_id, pReply); } @@ -1098,7 +1098,7 @@ static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* p * The Eclipse debugger recognizes "main" and "system" as special. */ static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_group_id = request->ReadThreadGroupId(); return Dbg::GetThreadGroupName(thread_group_id, pReply); } @@ -1108,7 +1108,7 @@ static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply) * thread group. */ static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_group_id = request->ReadThreadGroupId(); return Dbg::GetThreadGroupParent(thread_group_id, pReply); } @@ -1118,7 +1118,7 @@ static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply) * specified thread group. */ static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_group_id = request->ReadThreadGroupId(); return Dbg::GetThreadGroupChildren(thread_group_id, pReply); } @@ -1127,7 +1127,7 @@ static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply) * Return the #of components in the array. */ static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId array_id = request->ReadArrayId(); int32_t length; @@ -1146,7 +1146,7 @@ static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply) * Return the values from an array. */ static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId array_id = request->ReadArrayId(); uint32_t offset = request->ReadUnsigned32("offset"); uint32_t length = request->ReadUnsigned32("length"); @@ -1157,7 +1157,7 @@ static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Set values in an array. */ static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId array_id = request->ReadArrayId(); uint32_t offset = request->ReadUnsigned32("offset"); uint32_t count = request->ReadUnsigned32("count"); @@ -1165,7 +1165,7 @@ static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*) } static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { request->ReadObjectId(); // classLoaderObject // TODO: we should only return classes which have the given class loader as a defining or // initiating loader. The former would be easy; the latter is hard, because we don't have @@ -1179,7 +1179,7 @@ static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pRe * Reply with a requestID. */ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JdwpEventKind event_kind = request->ReadEnum1<JdwpEventKind>("event kind"); JdwpSuspendPolicy suspend_policy = request->ReadEnum1<JdwpSuspendPolicy>("suspend policy"); int32_t modifier_count = request->ReadSigned32("modifier count"); @@ -1322,7 +1322,7 @@ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply) } static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { request->ReadEnum1<JdwpEventKind>("event kind"); uint32_t requestId = request->ReadUnsigned32("request id"); @@ -1336,7 +1336,7 @@ static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*) * Return the values of arguments and local variables. */ static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Dbg::GetLocalValues(request, pReply); } @@ -1344,12 +1344,12 @@ static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Set the values of arguments and local variables. */ static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Dbg::SetLocalValues(request); } static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); FrameId frame_id = request->ReadFrameId(); @@ -1370,7 +1370,7 @@ static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply) * that, or I have no idea what this is for.) */ static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_object_id = request->ReadRefTypeId(); return Dbg::GetReflectedType(class_object_id, pReply); } @@ -1379,7 +1379,7 @@ static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pRep * Handle a DDM packet with a single chunk in it. */ static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { state->NotifyDdmsActive(); uint8_t* replyBuf = nullptr; int replyLen = -1; diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc index 6bc5e27f85..5a9a0f5006 100644 --- a/runtime/jdwp/jdwp_main.cc +++ b/runtime/jdwp/jdwp_main.cc @@ -248,7 +248,7 @@ JdwpState* JdwpState::Create(const JdwpOptions* options) { case kJdwpTransportSocket: InitSocketTransport(state.get(), options); break; -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ case kJdwpTransportAndroidAdb: InitAdbTransport(state.get(), options); break; @@ -256,12 +256,12 @@ JdwpState* JdwpState::Create(const JdwpOptions* options) { default: LOG(FATAL) << "Unknown transport: " << options->transport; } - { /* * Grab a mutex before starting the thread. This ensures they * won't signal the cond var before we're waiting. */ + state->thread_start_lock_.AssertNotHeld(self); MutexLock thread_start_locker(self, state->thread_start_lock_); /* diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h index d58467d108..29314f6274 100644 --- a/runtime/jdwp/jdwp_priv.h +++ b/runtime/jdwp/jdwp_priv.h @@ -86,8 +86,8 @@ class JdwpNetStateBase { void Close(); - ssize_t WritePacket(ExpandBuf* pReply, size_t length) LOCKS_EXCLUDED(socket_lock_); - ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) LOCKS_EXCLUDED(socket_lock_); + ssize_t WritePacket(ExpandBuf* pReply, size_t length) REQUIRES(!socket_lock_); + ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) REQUIRES(!socket_lock_); Mutex* GetSocketLock() { return &socket_lock_; } diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc index 2b28f7df5a..3fbad36e43 100644 --- a/runtime/jdwp/object_registry.cc +++ b/runtime/jdwp/object_registry.cc @@ -63,13 +63,13 @@ JDWP::ObjectId ObjectRegistry::Add(Handle<T> obj_h) { // Explicit template instantiation. template -SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) +SHARED_REQUIRES(Locks::mutator_lock_) +REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h); template -SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) +SHARED_REQUIRES(Locks::mutator_lock_) +REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h); template<class T> diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h index 4c149cdac7..17490f4db5 100644 --- a/runtime/jdwp/object_registry.h +++ b/runtime/jdwp/object_registry.h @@ -63,28 +63,24 @@ class ObjectRegistry { ObjectRegistry(); JDWP::ObjectId Add(mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); JDWP::RefTypeId AddRefType(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); template<class T> JDWP::ObjectId Add(Handle<T> obj_h) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) { if (id == 0) { *error = JDWP::ERR_NONE; return nullptr; @@ -92,47 +88,42 @@ class ObjectRegistry { return down_cast<T>(InternalGet(id, error)); } - void Clear() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Clear() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void DisableCollection(JDWP::ObjectId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void EnableCollection(JDWP::ObjectId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); bool IsCollected(JDWP::ObjectId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void DisposeObject(JDWP::ObjectId id, uint32_t reference_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); // This is needed to get the jobject instead of the Object*. // Avoid using this and use standard Get when possible. - jobject GetJObject(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + jobject GetJObject(JDWP::ObjectId id) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); private: template<class T> JDWP::ObjectId InternalAdd(Handle<T> obj_h) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void Demote(ObjectRegistryEntry& entry) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_); void Promote(ObjectRegistryEntry& entry) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_); bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code, ObjectRegistryEntry** out_entry) - EXCLUSIVE_LOCKS_REQUIRED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(lock_) SHARED_REQUIRES(Locks::mutator_lock_); Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::multimap<int32_t, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_); diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index dbd8977d91..ca6e7ea1f8 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -48,7 +48,7 @@ class Jit { virtual ~Jit(); static Jit* Create(JitOptions* options, std::string* error_msg); bool CompileMethod(ArtMethod* method, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateInstrumentationCache(size_t compile_threshold); void CreateThreadPool(); CompilerCallbacks* GetCompilerCallbacks() { diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index c1ea921834..9707f6f29d 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -78,27 +78,27 @@ class JitCodeCache { // Return true if the code cache contains the code pointer which si the entrypoint of the method. bool ContainsMethod(ArtMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return true if the code cache contains a code ptr. bool ContainsCodePtr(const void* ptr) const; // Reserve a region of code of size at least "size". Returns null if there is no more room. - uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_); + uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_); // Add a data array of size (end - begin) with the associated contents, returns null if there // is no more room. uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Get code for a method, returns null if it is not in the jit cache. const void* GetCodeFor(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the // entrypoint isn't within the cache. void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); private: // Takes ownership of code_mem_map. diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h index 27894eb6c2..0deaf8ad02 100644 --- a/runtime/jit/jit_instrumentation.h +++ b/runtime/jit/jit_instrumentation.h @@ -47,9 +47,9 @@ class JitInstrumentationCache { public: explicit JitInstrumentationCache(size_t hot_method_threshold); void AddSamples(Thread* self, ArtMethod* method, size_t samples) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void SignalCompiled(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void CreateThreadPool(); void DeleteThreadPool(); @@ -68,7 +68,7 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/, ArtMethod* method, uint32_t /*dex_pc*/) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { instrumentation_cache_->AddSamples(thread, method, 1); } virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/, @@ -92,7 +92,7 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen // We only care about how many dex instructions were executed in the Jit. virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { CHECK_LE(dex_pc_offset, 0); instrumentation_cache_->AddSamples(thread, method, 1); } diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc index 84fc404b46..b18b430403 100644 --- a/runtime/jni_env_ext.cc +++ b/runtime/jni_env_ext.cc @@ -63,14 +63,14 @@ JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in) JNIEnvExt::~JNIEnvExt() { } -jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { if (obj == nullptr) { return nullptr; } return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj)); } -void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) { if (obj != nullptr) { locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj)); } @@ -86,14 +86,14 @@ void JNIEnvExt::DumpReferenceTables(std::ostream& os) { monitors.Dump(os); } -void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void JNIEnvExt::PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast. // TODO: take 'capacity' into account. stacked_local_ref_cookies.push_back(local_ref_cookie); local_ref_cookie = locals.GetSegmentState(); } -void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void JNIEnvExt::PopFrame() SHARED_REQUIRES(Locks::mutator_lock_) { locals.SetSegmentState(local_ref_cookie); local_ref_cookie = stacked_local_ref_cookies.back(); stacked_local_ref_cookies.pop_back(); diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h index 29d912cb01..9b55536e98 100644 --- a/runtime/jni_env_ext.h +++ b/runtime/jni_env_ext.h @@ -39,7 +39,7 @@ struct JNIEnvExt : public JNIEnv { ~JNIEnvExt(); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetCheckJniEnabled(bool enabled); @@ -48,7 +48,7 @@ struct JNIEnvExt : public JNIEnv { template<typename T> T AddLocalReference(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static Offset SegmentStateOffset(); @@ -60,8 +60,8 @@ struct JNIEnvExt : public JNIEnv { return Offset(OFFSETOF_MEMBER(JNIEnvExt, self)); } - jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); + void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_); Thread* const self; JavaVMExt* const vm; diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index cc176b7c71..6a716b5e0d 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -89,7 +89,7 @@ static std::string NormalizeJniClassDescriptor(const char* name) { static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c, const char* name, const char* sig, const char* kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string temp; soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;", "no %s method \"%s.%s%s\"", @@ -98,7 +98,7 @@ static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c, static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c, const char* kind, jint idx, bool return_errors) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in " << PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8() << ": " << kind << " is null at index " << idx; @@ -107,7 +107,7 @@ static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror:: } static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (LIKELY(klass->IsInitialized())) { return klass; } @@ -121,7 +121,7 @@ static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass) static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class)); if (c == nullptr) { return nullptr; @@ -148,7 +148,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, } static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr); // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set. if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) { @@ -179,7 +179,7 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa) static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<2> hs(soa.Self()); Handle<mirror::Class> c( hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class)))); @@ -227,7 +227,7 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start, jsize length, const char* identifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string type(PrettyTypeOf(array)); soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "%s offset=%d length=%d %s.length=%d", @@ -236,14 +236,14 @@ static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize sta static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length, jsize array_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", "offset=%d length=%d string.length()=%d", start, length, array_length); } int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause) - LOCKS_EXCLUDED(Locks::mutator_lock_) { + REQUIRES(!Locks::mutator_lock_) { // Turn the const char* into a java.lang.String. ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg)); if (msg != nullptr && s.get() == nullptr) { @@ -314,7 +314,7 @@ static JavaVMExt* JavaVmExtFromEnv(JNIEnv* env) { template <bool kNative> static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); for (auto& method : c->GetDirectMethods(pointer_size)) { if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) { @@ -2321,7 +2321,7 @@ class JNI { private: static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity, const char* caller) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO: we should try to expand the table if necessary. if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) { LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity; @@ -2350,7 +2350,7 @@ class JNI { template <typename JArrayT, typename ElementT, typename ArtArrayT> static ArtArrayT* DecodeAndCheckArrayType(ScopedObjectAccess& soa, JArrayT java_array, const char* fn_name, const char* operation) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array); if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) { soa.Vm()->JniAbortF(fn_name, @@ -2407,7 +2407,7 @@ class JNI { static void ReleasePrimitiveArray(ScopedObjectAccess& soa, mirror::Array* array, size_t component_size, void* elements, jint mode) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { void* array_data = array->GetRawData(component_size, 0); gc::Heap* heap = Runtime::Current()->GetHeap(); bool is_copy = array_data != elements; diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc index 64a6076aea..22cc820b73 100644 --- a/runtime/lambda/box_table.cc +++ b/runtime/lambda/box_table.cc @@ -94,8 +94,7 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { return method_as_object; } -bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { - DCHECK(object != nullptr); +bool BoxTable::UnboxLambda(mirror::Object* object, out<ClosureType> out_closure) { *out_closure = nullptr; // Note that we do not need to access lambda_table_lock_ here diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h index 12d3ff3ac6..c6d3d0c0fb 100644 --- a/runtime/lambda/box_table.h +++ b/runtime/lambda/box_table.h @@ -18,6 +18,7 @@ #include "base/allocator.h" #include "base/hash_map.h" +#include "base/out.h" #include "gc_root.h" #include "base/macros.h" #include "base/mutex.h" @@ -48,30 +49,28 @@ class BoxTable FINAL { // Boxes a closure into an object. Returns null and throws an exception on failure. mirror::Object* BoxLambda(const ClosureType& closure) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::lambda_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_); // Unboxes an object back into the lambda. Returns false and throws an exception on failure. - bool UnboxLambda(mirror::Object* object, ClosureType* out_closure) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool UnboxLambda(mirror::Object* object, out<ClosureType> out_closure) + SHARED_REQUIRES(Locks::mutator_lock_); // Sweep weak references to lambda boxes. Update the addresses if the objects have been // moved, and delete them from the table if the objects have been cleaned up. void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::lambda_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_); // GC callback: Temporarily block anyone from touching the map. void DisallowNewWeakBoxedLambdas() - LOCKS_EXCLUDED(Locks::lambda_table_lock_); + REQUIRES(!Locks::lambda_table_lock_); // GC callback: Unblock any readers who have been queued waiting to touch the map. void AllowNewWeakBoxedLambdas() - LOCKS_EXCLUDED(Locks::lambda_table_lock_); + REQUIRES(!Locks::lambda_table_lock_); // GC callback: Verify that the state is now blocking anyone from touching the map. void EnsureNewWeakBoxedLambdasDisallowed() - LOCKS_EXCLUDED(Locks::lambda_table_lock_); + REQUIRES(!Locks::lambda_table_lock_); BoxTable(); ~BoxTable() = default; @@ -93,11 +92,11 @@ class BoxTable FINAL { // Attempt to look up the lambda in the map, or return null if it's not there yet. ValueType FindBoxedLambda(const ClosureType& closure) const - SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_); + SHARED_REQUIRES(Locks::lambda_table_lock_); // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed. void BlockUntilWeaksAllowed() - SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_); + SHARED_REQUIRES(Locks::lambda_table_lock_); // EmptyFn implementation for art::HashMap struct EmptyFn { diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h index c10ddfda9f..743ee77e17 100644 --- a/runtime/linear_alloc.h +++ b/runtime/linear_alloc.h @@ -28,24 +28,24 @@ class LinearAlloc { public: explicit LinearAlloc(ArenaPool* pool); - void* Alloc(Thread* self, size_t size) LOCKS_EXCLUDED(lock_); + void* Alloc(Thread* self, size_t size) REQUIRES(!lock_); // Realloc never frees the input pointer, it is the caller's job to do this if necessary. - void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) LOCKS_EXCLUDED(lock_); + void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) REQUIRES(!lock_); // Allocate and construct an array of structs of type T. template<class T> - T* AllocArray(Thread* self, size_t elements) { + T* AllocArray(Thread* self, size_t elements) REQUIRES(!lock_) { return reinterpret_cast<T*>(Alloc(self, elements * sizeof(T))); } // Return the number of bytes used in the allocator. - size_t GetUsedMemory() const LOCKS_EXCLUDED(lock_); + size_t GetUsedMemory() const REQUIRES(!lock_); - ArenaPool* GetArenaPool() LOCKS_EXCLUDED(lock_); + ArenaPool* GetArenaPool() REQUIRES(!lock_); // Return true if the linear alloc contrains an address. - bool Contains(void* ptr) const; + bool Contains(void* ptr) const REQUIRES(!lock_); private: mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 8df8f96ea9..d9ad7dc0c2 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -280,7 +280,7 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt ScopedFd fd(-1); #ifdef USE_ASHMEM -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ const bool use_ashmem = true; #else // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't diff --git a/runtime/mem_map.h b/runtime/mem_map.h index 01e29c90db..196a7f6292 100644 --- a/runtime/mem_map.h +++ b/runtime/mem_map.h @@ -92,7 +92,7 @@ class MemMap { std::string* error_msg); // Releases the memory mapping. - ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_); + ~MemMap() REQUIRES(!Locks::mem_maps_lock_); const std::string& GetName() const { return name_; @@ -142,25 +142,25 @@ class MemMap { std::string* error_msg); static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map) - LOCKS_EXCLUDED(Locks::mem_maps_lock_); + REQUIRES(!Locks::mem_maps_lock_); static void DumpMaps(std::ostream& os, bool terse = false) - LOCKS_EXCLUDED(Locks::mem_maps_lock_); + REQUIRES(!Locks::mem_maps_lock_); typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps; - static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_); - static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_); + static void Init() REQUIRES(!Locks::mem_maps_lock_); + static void Shutdown() REQUIRES(!Locks::mem_maps_lock_); private: MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size, - int prot, bool reuse, size_t redzone_size = 0) LOCKS_EXCLUDED(Locks::mem_maps_lock_); + int prot, bool reuse, size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_); static void DumpMapsLocked(std::ostream& os, bool terse) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_); + REQUIRES(Locks::mem_maps_lock_); static bool HasMemMap(MemMap* map) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_); + REQUIRES(Locks::mem_maps_lock_); static MemMap* GetLargestMemMapAt(void* address) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_); + REQUIRES(Locks::mem_maps_lock_); const std::string name_; uint8_t* const begin_; // Start of data. diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index 6240b3be7a..dc084be06e 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -34,12 +34,13 @@ namespace mirror { class MANAGED AbstractMethod : public AccessibleObject { public: // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod. - bool CreateFromArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); - ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_); // Only used by the image writer. - void SetArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); + mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); private: static MemberOffset ArtMethodOffset() { diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h index 6d4c0f6fb3..dcf5118d11 100644 --- a/runtime/mirror/accessible_object.h +++ b/runtime/mirror/accessible_object.h @@ -36,12 +36,12 @@ class MANAGED AccessibleObject : public Object { } template<bool kTransactionActive> - void SetAccessible(bool value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(padding_); return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u); } - bool IsAccessible() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldBoolean(FlagOffset()); } diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 88d75abd3c..3d540297e5 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -101,7 +101,7 @@ class SetLengthVisitor { } void operator()(Object* obj, size_t usable_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(usable_size); // Avoid AsArray as object is not yet in live bitmap or allocation stack. Array* array = down_cast<Array*>(obj); @@ -126,7 +126,7 @@ class SetLengthToUsableSizeVisitor { } void operator()(Object* obj, size_t usable_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsArray as object is not yet in live bitmap or allocation stack. Array* array = down_cast<Array*>(obj); // DCHECK(array->IsArrayInstance()); diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc index d72c03ff86..4128689bb7 100644 --- a/runtime/mirror/array.cc +++ b/runtime/mirror/array.cc @@ -43,7 +43,7 @@ namespace mirror { static Array* RecursiveCreateMultiArray(Thread* self, Handle<Class> array_class, int current_dimension, Handle<mirror::IntArray> dimensions) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { int32_t array_length = dimensions->Get(current_dimension); StackHandleScope<1> hs(self); Handle<Array> new_array( diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index e65611d3c5..b27a8849ed 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -39,21 +39,21 @@ class MANAGED Array : public Object { template <bool kIsInstrumented, bool kFillUsable = false> ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count, size_t component_size_shift, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static Array* CreateMultiArray(Thread* self, Handle<Class> element_class, Handle<IntArray> dimensions) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_)); } - void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_GE(length, 0); // We use non transactional version since we can't undo this write. We also disable checking // since it would fail during a transaction. @@ -67,7 +67,7 @@ class MANAGED Array : public Object { static MemberOffset DataOffset(size_t component_size); void* GetRawData(size_t component_size, int32_t index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() + + (index * component_size); return reinterpret_cast<void*>(data); @@ -82,16 +82,18 @@ class MANAGED Array : public Object { // Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and // returns false. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_); - Array* CopyOf(Thread* self, int32_t new_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); protected: - void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); private: void ThrowArrayIndexOutOfBoundsException(int32_t index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // The number of array elements. int32_t length_; @@ -107,32 +109,32 @@ class MANAGED PrimitiveArray : public Array { typedef T ElementType; static PrimitiveArray<T>* Alloc(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - const T* GetData() const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const T* GetData() const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0)); } - T* GetData() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<T*>(GetRawData(sizeof(T), 0)); } - T Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); - T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(CheckIsValidIndex(i)); return GetData()[i]; } - void Set(int32_t i, T value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true> void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS; // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> @@ -144,7 +146,7 @@ class MANAGED PrimitiveArray : public Array { * and the arrays non-null. */ void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Works like memcpy(), except we guarantee not to allow tearing of array values (ie using @@ -152,7 +154,7 @@ class MANAGED PrimitiveArray : public Array { * and the arrays non-null. */ void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void SetArrayClass(Class* array_class) { CHECK(array_class_.IsNull()); @@ -160,7 +162,7 @@ class MANAGED PrimitiveArray : public Array { array_class_ = GcRoot<Class>(array_class); } - static Class* GetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!array_class_.IsNull()); return array_class_.Read(); } @@ -170,7 +172,7 @@ class MANAGED PrimitiveArray : public Array { array_class_ = GcRoot<Class>(nullptr); } - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: static GcRoot<Class> array_class_; @@ -183,11 +185,11 @@ class PointerArray : public Array { public: template<typename T> T GetElementPtrSize(uint32_t idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive = false, bool kUnchecked = false, typename T> void SetElementPtrSize(uint32_t idx, T element, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); }; } // namespace mirror diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 73a194d3ce..6568487df9 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -680,6 +680,8 @@ inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) // linked yet. VisitStaticFieldsReferences<kVisitClass>(this, visitor); } + // Since this class is reachable, we must also visit the associated roots when we scan it. + VisitNativeRoots(visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); } template<ReadBarrierOption kReadBarrierOption> @@ -816,20 +818,22 @@ void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) { if (sfields != nullptr) { for (size_t i = 0, count = NumStaticFields(); i < count; ++i) { auto* f = &sfields[i]; + // Visit roots first in case the declaring class gets moved. + f->VisitRoots(visitor); if (kIsDebugBuild && IsResolved()) { CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus(); } - f->VisitRoots(visitor); } } ArtField* const ifields = GetIFieldsUnchecked(); if (ifields != nullptr) { for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) { auto* f = &ifields[i]; + // Visit roots first in case the declaring class gets moved. + f->VisitRoots(visitor); if (kIsDebugBuild && IsResolved()) { CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus(); } - f->VisitRoots(visitor); } } // We may see GetDirectMethodsPtr() == null with NumDirectMethods() != 0 if the root marking diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 5bd65837d8..701ba4a78b 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -835,7 +835,7 @@ class CopyClassVisitor { } void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self_); Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass())); mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_); diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index cc63dec602..d95bcd80e5 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -127,7 +127,7 @@ class MANAGED Class FINAL : public Object { }; template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) { static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32"); return static_cast<Status>( GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_))); @@ -135,7 +135,7 @@ class MANAGED Class FINAL : public Object { // This is static because 'this' may be moved by GC. static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static MemberOffset StatusOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, status_); @@ -143,146 +143,146 @@ class MANAGED Class FINAL : public Object { // Returns true if the class has been retired. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsRetired() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() == kStatusRetired; } // Returns true if the class has failed to link. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsErroneous() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() == kStatusError; } // Returns true if the class has been loaded. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsIdxLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusIdx; } // Returns true if the class has been loaded. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusLoaded; } // Returns true if the class has been linked. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsResolved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusResolved; } // Returns true if the class was compile-time verified. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsCompileTimeVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime; } // Returns true if the class has been verified. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusVerified; } // Returns true if the class is initializing. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsInitializing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusInitializing; } // Returns true if the class is initialized. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsInitialized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() == kStatusInitialized; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset AccessFlagsOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_); } - void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the class is an interface. - ALWAYS_INLINE bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccInterface) != 0; } // Returns true if the class is declared public. - ALWAYS_INLINE bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPublic) != 0; } // Returns true if the class is declared final. - ALWAYS_INLINE bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } - ALWAYS_INLINE bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccClassIsFinalizable) != 0; } - ALWAYS_INLINE void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); SetAccessFlags(flags | kAccClassIsFinalizable); } - ALWAYS_INLINE bool IsStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0; } - ALWAYS_INLINE void SetStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); SetAccessFlags(flags | kAccClassIsStringClass); } // Returns true if the class is abstract. - ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccAbstract) != 0; } // Returns true if the class is an annotation. - ALWAYS_INLINE bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccAnnotation) != 0; } // Returns true if the class is synthetic. - ALWAYS_INLINE bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccSynthetic) != 0; } // Returns true if the class can avoid access checks. - bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPreverified) != 0; } - void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); SetAccessFlags(flags | kAccPreverified); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsWeakReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsSoftReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsFinalizerReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPhantomReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0; } @@ -291,7 +291,7 @@ class MANAGED Class FINAL : public Object { // For array classes, where all the classes are final due to there being no sub-classes, an // Object[] may be assigned to by a String[] but a String[] may not be assigned to by other // types as the component is final. - bool CannotBeAssignedFromOtherTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsArrayClass()) { return IsFinal(); } else { @@ -306,18 +306,19 @@ class MANAGED Class FINAL : public Object { // Returns true if this class is the placeholder and should retire and // be replaced with a class with the right size for embedded imt/vtable. - bool IsTemp() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) { Status s = GetStatus(); return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable(); } - String* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the cached name. - void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets the cached name. + String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name. + void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_); // Sets the cached name. // Computes the name, then sets the cached value. - static String* ComputeName(Handle<Class> h_this) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsProxyClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) { // Read access flags without using getter as whether something is a proxy can be check in // any loaded state // TODO: switch to a check if the super class is java.lang.reflect.Proxy? @@ -326,9 +327,9 @@ class MANAGED Class FINAL : public Object { } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); - void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t)); int32_t v32 = static_cast<int32_t>(new_type); DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero"; @@ -338,81 +339,82 @@ class MANAGED Class FINAL : public Object { } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the class is a primitive type. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveBoolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveByte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveChar() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveShort() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveInt() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType() == Primitive::kPrimInt; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveLong() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveFloat() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveDouble() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveVoid() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) { return IsArrayClass<kVerifyFlags>() && GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()-> IsPrimitive(); } // Depth of class from java.lang.Object - uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_); template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset ComponentTypeOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, component_type_); @@ -420,9 +422,9 @@ class MANAGED Class FINAL : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_); - void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(GetComponentType() == nullptr); DCHECK(new_component_type != nullptr); // Component type is invariant: use non-transactional mode without check. @@ -430,43 +432,43 @@ class MANAGED Class FINAL : public Object { } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t GetComponentSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) { return 1U << GetComponentSizeShift(); } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t GetComponentSizeShift() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) { return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift(); } - bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) { return !IsPrimitive() && GetSuperClass() == nullptr; } - bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) { return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass(); } - bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) { return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || (IsAbstract() && IsArrayClass()); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetComponentType<kVerifyFlags>() != nullptr && !GetComponentType<kVerifyFlags>()->IsPrimitive(); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsIntArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); auto* component_type = GetComponentType<kVerifyFlags>(); return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>(); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsLongArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); auto* component_type = GetComponentType<kVerifyFlags>(); return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>(); @@ -475,16 +477,16 @@ class MANAGED Class FINAL : public Object { // Creates a raw object instance but does not invoke the default constructor. template<bool kIsInstrumented, bool kCheckAddFinalizer = true> ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); Object* AllocObject(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); Object* AllocNonMovableObject(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) { // Classes, arrays, and strings vary in size, and so the object_size_ field cannot // be used to Get their instance size return IsClassClass<kVerifyFlags, kReadBarrierOption>() || @@ -493,17 +495,17 @@ class MANAGED Class FINAL : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_)); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_)); } void SetClassSize(uint32_t new_class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Compute how many bytes would be used a class with the given elements. static uint32_t ComputeClassSize(bool has_embedded_tables, @@ -529,31 +531,31 @@ class MANAGED Class FINAL : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - uint32_t GetObjectSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset ObjectSizeOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, object_size_); } - void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsVariableSize()); // Not called within a transaction. return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size); } void SetObjectSizeWithoutChecks(uint32_t new_object_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. return SetField32<false, false, kVerifyNone>( OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size); } // Returns true if this class is in the same packages as that class. - bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_); static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2); // Returns true if this class can access that class. - bool CanAccess(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) { return that->IsPublic() || this->IsInSamePackage(that); } @@ -561,7 +563,7 @@ class MANAGED Class FINAL : public Object { // Note that access to the class isn't checked in case the declaring class is protected and the // method has been exposed by a public sub-class bool CanAccessMember(Class* access_to, uint32_t member_flags) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Classes can access all of their own members if (this == access_to) { return true; @@ -589,34 +591,34 @@ class MANAGED Class FINAL : public Object { // referenced by the FieldId in the DexFile in case the declaring class is inaccessible. bool CanAccessResolvedField(Class* access_to, ArtField* field, DexCache* dex_cache, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool CheckResolvedFieldAccess(Class* access_to, ArtField* field, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this class access a resolved method? // Note that access to methods's class is checked and this may require looking up the class // referenced by the MethodId in the DexFile in case the declaring class is inaccessible. bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method, DexCache* dex_cache, uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <InvokeType throw_invoke_type> bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method, uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool IsSubClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); // Can src be assigned to this class? For example, String can be assigned to Object (by an // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign // to themselves. Classes for primitive types may not assign to each other. - ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_); - void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetSuperClass(Class *new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) { // Super class is assigned once, except during class linker initialization. Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_)); DCHECK(old_super_class == nullptr || old_super_class == new_super_class); @@ -624,7 +626,7 @@ class MANAGED Class FINAL : public Object { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class); } - bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetSuperClass() != nullptr; } @@ -632,9 +634,9 @@ class MANAGED Class FINAL : public Object { return MemberOffset(OFFSETOF_MEMBER(Class, super_class_)); } - ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); - void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset DexCacheOffset() { return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_)); @@ -646,83 +648,83 @@ class MANAGED Class FINAL : public Object { kDumpClassInitialized = (1 << 2), }; - void DumpClass(std::ostream& os, int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); // Also updates the dex_cache_strings_ variable from new_dex_cache. - void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsBegin(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsEnd(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* GetDirectMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);\ + ArtMethod* GetDirectMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_); void SetDirectMethodsPtr(ArtMethod* new_direct_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Used by image writer. void SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Use only when we are allocating populating the method arrays. ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of static, private, and constructor methods. - ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_)); } - void SetNumDirectMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumDirectMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) { return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_), num); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsBegin(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsEnd(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetVirtualMethodsPtr(ArtMethod* new_virtual_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of non-inherited virtual methods. - ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_)); } - void SetNumVirtualMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumVirtualMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) { return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_), num); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE PointerArray* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_); - void SetVTable(PointerArray* new_vtable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset VTableOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, vtable_); @@ -732,362 +734,363 @@ class MANAGED Class FINAL : public Object { return MemberOffset(sizeof(Class)); } - bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) { return IsInstantiable(); } - bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size); static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size); ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_); - void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given a method implemented by this class but potentially from a super class, return the // specific implementation method for this class. ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given a method implemented by this class' super class, return the specific implementation // method for this class. ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given a method implemented by this class, but potentially from a // super class or interface, return the specific implementation // method for this class. ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE; + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE; ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_); // Get instance fields of the class (See also GetSFields). - ArtField* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetIFields() SHARED_REQUIRES(Locks::mutator_lock_); - void SetIFields(ArtField* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetIFields(ArtField* new_ifields) SHARED_REQUIRES(Locks::mutator_lock_); // Unchecked edition has no verification flags. - void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_); - uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_)); } - void SetNumInstanceFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumInstanceFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) { return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_), num); } - ArtField* GetInstanceField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of instance fields containing reference types. - uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsResolved() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_)); } - uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsLoaded() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_)); } - void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); void SetReferenceInstanceOffsets(uint32_t new_reference_offsets) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the offset of the first reference instance field. Other reference instance fields follow. MemberOffset GetFirstReferenceInstanceFieldOffset() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of static fields containing reference types. - uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsResolved() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_)); } - uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsLoaded() || IsErroneous() || IsRetired()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_)); } - void SetNumReferenceStaticFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num); } // Get the offset of the first reference static field. Other reference static fields follow. MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the offset of the first reference static field. Other reference static fields follow. MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Gets the static fields of the class. - ArtField* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetSFields() SHARED_REQUIRES(Locks::mutator_lock_); - void SetSFields(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetSFields(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_); // Unchecked edition has no verification flags. - void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_); - uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_)); } - void SetNumStaticFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumStaticFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) { return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_), num); } // TODO: uint16_t - ArtField* GetStaticField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_); // Find a static or instance field using the JLS resolution order static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass. ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass, only searches classes that // have the same dex cache. ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given static field in this class or a superclass. static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given static field in this class or superclass, only searches classes that // have the same dex cache. static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - pid_t GetClinitThreadId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsIdxLoaded() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_)); } - void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_); - Class* GetVerifyErrorClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Class* GetVerifyErrorClass() SHARED_REQUIRES(Locks::mutator_lock_) { // DCHECK(IsErroneous()); return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_)); } - uint16_t GetDexClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_)); } - void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx); } - uint16_t GetDexTypeIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_)); } - void SetDexTypeIndex(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx); } - static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(HasJavaLangClass()); return java_lang_Class_.Read(); } - static bool HasJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) { return !java_lang_Class_.IsNull(); } // Can't call this SetClass or else gets called instead of Object::SetClass in places. - static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit native roots visits roots which are keyed off the native pointers such as ArtFields and // ArtMethods. template<class Visitor> void VisitNativeRoots(Visitor& visitor, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // When class is verified, set the kAccPreverified flag on each method. void SetPreverifiedFlagOnAllMethods(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kVisitClass, typename Visitor> void VisitReferences(mirror::Class* klass, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the descriptor of the class. In a few cases a std::string is required, rather than // always create one the storage argument is populated and its internal c_str() returned. We do // this to avoid memory allocation in the common case. - const char* GetDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetArrayDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_); - bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_); static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass, uint32_t idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_); - std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::TypeList* GetInterfaceTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_); // Asserts we are initialized or initializing in the given thread. void AssertInitializedOrInitializingInThread(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize], - size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t pointer_size) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // For proxy class only. - ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_); // For proxy class only. - ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_); // For reference class only. - MemberOffset GetDisableIntrinsicFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - MemberOffset GetSlowPathFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool GetSlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetSlowPath(bool enabled) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_); + MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_); + bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_); + void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_); - ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<String>* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_); void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset DexCacheStringsOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_); } @@ -1095,7 +1098,7 @@ class MANAGED Class FINAL : public Object { // May cause thread suspension due to EqualParameters. ArtMethod* GetDeclaredConstructor( Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore // fence. @@ -1105,7 +1108,7 @@ class MANAGED Class FINAL : public Object { } void operator()(mirror::Object* obj, size_t usable_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: const uint32_t class_size_; @@ -1114,7 +1117,7 @@ class MANAGED Class FINAL : public Object { }; // Returns true if the class loader is null, ie the class loader is the boot strap class loader. - bool IsBootStrapClassLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) { return GetClassLoader() == nullptr; } @@ -1127,34 +1130,34 @@ class MANAGED Class FINAL : public Object { } ALWAYS_INLINE ArtMethod* GetDirectMethodsPtrUnchecked() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtrUnchecked() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetVerifyErrorClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); template <bool throw_on_failure, bool use_referrers_cache> bool ResolvedFieldAccessTest(Class* access_to, ArtField* field, uint32_t field_idx, DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type> bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method, uint32_t method_idx, DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool Implements(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsArrayAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_); // Unchecked editions is for root visiting. - ArtField* GetSFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtField* GetIFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetSFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_); + ArtField* GetIFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_); - bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_); // Check that the pointer size mathces the one in the class linker. ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size); diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h index b10a296f18..134f1cdee4 100644 --- a/runtime/mirror/class_loader.h +++ b/runtime/mirror/class_loader.h @@ -32,7 +32,7 @@ class MANAGED ClassLoader : public Object { static constexpr uint32_t InstanceSize() { return sizeof(ClassLoader); } - ClassLoader* GetParent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_)); } diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h index 0ce83ec746..ba49a15f22 100644 --- a/runtime/mirror/dex_cache.h +++ b/runtime/mirror/dex_cache.h @@ -48,12 +48,12 @@ class MANAGED DexCache FINAL : public Object { void Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings, ObjectArray<Class>* types, PointerArray* methods, PointerArray* fields, - size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); void Fixup(ArtMethod* trampoline, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_)); } @@ -73,76 +73,76 @@ class MANAGED DexCache FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_); } - size_t NumStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStrings()->GetLength(); } - size_t NumResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedTypes()->GetLength(); } - size_t NumResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedMethods()->GetLength(); } - size_t NumResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedFields()->GetLength(); } - String* GetResolvedString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetResolvedString(uint32_t string_idx) SHARED_REQUIRES(Locks::mutator_lock_) { return GetStrings()->Get(string_idx); } void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO default transaction support. GetStrings()->Set(string_idx, resolved); } Class* GetResolvedType(uint32_t type_idx) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedTypes()->Get(type_idx); } void SetResolvedType(uint32_t type_idx, Class* resolved) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Pointer sized variant, used for patching. ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Pointer sized variant, used for patching. ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<ObjectArray<String>>(StringsOffset()); } - ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<ObjectArray<Class>>( OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_)); } - PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<PointerArray>(ResolvedMethodsOffset()); } - PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<PointerArray>(ResolvedFieldsOffset()); } - const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_)); } - void SetDexFile(const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file); } diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h index d927f0c258..edaddbd2e7 100644 --- a/runtime/mirror/field.h +++ b/runtime/mirror/field.h @@ -36,66 +36,66 @@ class String; // C++ mirror of java.lang.reflect.Field. class MANAGED Field : public AccessibleObject { public: - static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) { return static_class_.Read(); } - static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return array_class_.Read(); } - ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_)); } - mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_)); } - uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_)); } - bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccStatic) != 0; } - bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } - bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccVolatile) != 0; } ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetType()->GetPrimitiveType(); } - mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_)); } - int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_)); } - static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); // Slow, try to use only for PrettyField and such. - ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_); template <bool kTransactionActive = false> static mirror::Field* CreateFromArtField(Thread* self, ArtField* field, bool force_resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); private: HeapReference<mirror::Class> declaring_class_; @@ -105,27 +105,27 @@ class MANAGED Field : public AccessibleObject { int32_t offset_; template<bool kTransactionActive> - void SetDeclaringClass(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c); } template<bool kTransactionActive> - void SetType(mirror::Class* type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type); } template<bool kTransactionActive> - void SetAccessFlags(uint32_t flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) { SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags); } template<bool kTransactionActive> - void SetDexFieldIndex(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) { SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx); } template<bool kTransactionActive> - void SetOffset(uint32_t offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) { SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset); } diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h index 1ea5beeae3..b21ecdf6e8 100644 --- a/runtime/mirror/iftable.h +++ b/runtime/mirror/iftable.h @@ -25,34 +25,34 @@ namespace mirror { class MANAGED IfTable FINAL : public ObjectArray<Object> { public: - ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) { Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass(); DCHECK(interface != nullptr); return interface; } ALWAYS_INLINE void SetInterface(int32_t i, Class* interface) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - PointerArray* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) { auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray)); DCHECK(method_array != nullptr); return method_array; } - size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) { auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray)); return method_array == nullptr ? 0u : method_array->GetLength(); } - void SetMethodArray(int32_t i, PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(arr != nullptr); auto idx = i * kMax + kMethodArray; DCHECK(Get(idx) == nullptr); Set<false>(idx, arr); } - size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) { return GetLength() / kMax; } diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h index 42c76c045c..0c28e4f580 100644 --- a/runtime/mirror/method.h +++ b/runtime/mirror/method.h @@ -29,25 +29,25 @@ class Class; class MANAGED Method : public AbstractMethod { public: static Method* CreateFromArtMethod(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) { return static_class_.Read(); } - static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_); - static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return array_class_.Read(); } - static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: static GcRoot<Class> static_class_; // java.lang.reflect.Method.class. @@ -60,25 +60,25 @@ class MANAGED Method : public AbstractMethod { class MANAGED Constructor: public AbstractMethod { public: static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) { return static_class_.Read(); } - static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_); - static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return array_class_.Read(); } - static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class. diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index e019d5aa72..c5610b5a2e 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -477,7 +477,7 @@ inline int8_t Object::GetFieldByteVolatile(MemberOffset field_offset) { template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, bool kIsVolatile> inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -495,7 +495,7 @@ inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, bool kIsVolatile> inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index b177e2f579..80decaa53d 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -47,7 +47,7 @@ class CopyReferenceFieldsWithReadBarrierVisitor { : dest_obj_(dest_obj) {} void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const - ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { // GetFieldObject() contains a RB. Object* ref = obj->GetFieldObject<Object>(offset); // No WB here as a large object space does not have a card table @@ -56,13 +56,18 @@ class CopyReferenceFieldsWithReadBarrierVisitor { } void operator()(mirror::Class* klass, mirror::Reference* ref) const - ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { // Copy java.lang.ref.Reference.referent which isn't visited in // Object::VisitReferences(). DCHECK(klass->IsTypeOfReferenceClass()); this->operator()(ref, mirror::Reference::ReferentOffset(), false); } + // Unused since we don't copy class native roots. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + private: Object* const dest_obj_; }; @@ -107,7 +112,7 @@ class CopyObjectVisitor { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Object::CopyObject(self_, obj, orig_->Get(), num_bytes_); } diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index f1c96b5007..eea9f3751e 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -84,40 +84,40 @@ class MANAGED LOCKABLE Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - ALWAYS_INLINE Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_); - Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_); #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif - void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_); #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_); // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in // invoke-interface to detect incompatible interface types. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_); - Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); int32_t IdentityHashCode() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static MemberOffset MonitorOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); @@ -126,298 +126,298 @@ class MANAGED LOCKABLE Object { // As_volatile can be false if the mutators are suspended. This is an optimization since it // avoids the barriers. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_); bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetLockOwnerThreadId(); - mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + mirror::Object* MonitorEnter(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) EXCLUSIVE_LOCK_FUNCTION(); - bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + bool MonitorExit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) UNLOCK_FUNCTION(); - void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_); template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsString() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + String* AsString() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); // Accessor for Java type fields. template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false> ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> void SetFieldPtr(MemberOffset field_offset, T new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>( field_offset, new_value, sizeof(void*)); } @@ -426,7 +426,7 @@ class MANAGED LOCKABLE Object { VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; if (pointer_size == 4) { intptr_t ptr = reinterpret_cast<intptr_t>(new_value); @@ -439,13 +439,13 @@ class MANAGED LOCKABLE Object { } } // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename Visitor, typename JavaLangRefVisitor = VoidFunctor> void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor) NO_THREAD_SAFETY_ANALYSIS; - ArtField* FindFieldByOffset(MemberOffset offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_); // Used by object_test. static void SetHashCodeSeed(uint32_t new_seed); @@ -456,13 +456,13 @@ class MANAGED LOCKABLE Object { // Accessors for non-Java type fields template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> T GetFieldPtr(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*)); } template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; if (pointer_size == 4) { return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset)); @@ -480,25 +480,25 @@ class MANAGED LOCKABLE Object { NO_THREAD_SAFETY_ANALYSIS; template<bool kVisitClass, typename Visitor> void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kVisitClass, typename Visitor> void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: template<typename kSize, bool kIsVolatile> ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<typename kSize, bool kIsVolatile> ALWAYS_INLINE kSize GetField(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Verify the type correctness of stores to fields. // TODO: This can cause thread suspension and isn't moving GC safe. void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckFieldAssignment(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kCheckFieldAssignments) { CheckFieldAssignmentImpl(field_offset, new_value); } @@ -509,7 +509,7 @@ class MANAGED LOCKABLE Object { // Class::CopyOf(). static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src, size_t num_bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static Atomic<uint32_t> hash_code_seed; diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h index 5eddc18745..607b000488 100644 --- a/runtime/mirror/object_array.h +++ b/runtime/mirror/object_array.h @@ -32,21 +32,21 @@ class MANAGED ObjectArray: public Array { static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - T* Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + T* Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the object can be stored into the array. If not, throws // an ArrayStoreException and returns false. - // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_). template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS; - ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_); + // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS; @@ -54,37 +54,37 @@ class MANAGED ObjectArray: public Array { // Set element without bound and element type checks, to be used in limited // circumstances, such as during boot image writing. // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS; // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS; - ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_); // Copy src into this array (dealing with overlaps as memmove does) without assignability checks. void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos, - int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t count) SHARED_REQUIRES(Locks::mutator_lock_); // Copy src into this array assuming no overlap and without assignability checks. void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos, - int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t count) SHARED_REQUIRES(Locks::mutator_lock_); // Copy src into this array with assignability checks. void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos, int32_t count, bool throw_exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ObjectArray<T>* CopyOf(Thread* self, int32_t new_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<const bool kVisitClass, typename Visitor> void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS; diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h index 055be8524c..2a5c88e29f 100644 --- a/runtime/mirror/object_reference.h +++ b/runtime/mirror/object_reference.h @@ -33,11 +33,11 @@ class Object; template<bool kPoisonReferences, class MirrorType> class MANAGED ObjectReference { public: - MirrorType* AsMirrorPtr() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) { return UnCompress(); } - void Assign(MirrorType* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) { reference_ = Compress(other); } @@ -56,18 +56,18 @@ class MANAGED ObjectReference { protected: ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : reference_(Compress(mirror_ptr)) { } // Compress reference to its bit representation. - static uint32_t Compress(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) { uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr); return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits); } // Uncompress an encoded reference from its bit representation. - MirrorType* UnCompress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) { uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_; return reinterpret_cast<MirrorType*>(as_bits); } @@ -83,11 +83,11 @@ template<class MirrorType> class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> { public: static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return HeapReference<MirrorType>(mirror_ptr); } private: - HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {} }; @@ -95,16 +95,16 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr template<class MirrorType> class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> { public: - CompressedReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_) : mirror::ObjectReference<false, MirrorType>(nullptr) {} static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return CompressedReference<MirrorType>(p); } private: - CompressedReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_) : mirror::ObjectReference<false, MirrorType>(p) {} }; diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index 85ea28f9f5..f5a04457e7 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -48,7 +48,7 @@ class ObjectTest : public CommonRuntimeTest { const char* utf8_in, const char* utf16_expected_le, int32_t expected_hash) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]); for (int32_t i = 0; i < expected_utf16_length; i++) { uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) | diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h index 4bbdb99553..51ae760515 100644 --- a/runtime/mirror/reference.h +++ b/runtime/mirror/reference.h @@ -62,49 +62,49 @@ class MANAGED Reference : public Object { return OFFSET_OF_OBJECT_MEMBER(Reference, referent_); } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>( ReferentOffset()); } template<bool kTransactionActive> - void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent); } template<bool kTransactionActive> - void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr); } // Volatile read/write is not necessary since the java pending next is only accessed from // the java threads for cleared references. Once these cleared references have a null referent, // we never end up reading their pending next from the GC again. - Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<Reference>(PendingNextOffset()); } template<bool kTransactionActive> - void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPendingNext(Reference* pending_next) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next); } - bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsEnqueued() SHARED_REQUIRES(Locks::mutator_lock_) { // Since the references are stored as cyclic lists it means that once enqueued, the pending // next is always non-null. return GetPendingNext() != nullptr; } - bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsEnqueuable() SHARED_REQUIRES(Locks::mutator_lock_); template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_ref_Reference_.IsNull()); return java_lang_ref_Reference_.Read<kReadBarrierOption>(); } static void SetClass(Class* klass); static void ResetClass(); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: // Note: This avoids a read barrier, it should only be used by the GC. - HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset()); } @@ -130,10 +130,10 @@ class MANAGED FinalizerReference : public Reference { } template<bool kTransactionActive> - void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) { return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie); } - Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object>(ZombieOffset()); } diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h index dc7131e46e..1167391e23 100644 --- a/runtime/mirror/stack_trace_element.h +++ b/runtime/mirror/stack_trace_element.h @@ -31,32 +31,32 @@ namespace mirror { // C++ mirror of java.lang.StackTraceElement class MANAGED StackTraceElement FINAL : public Object { public: - String* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_)); } - String* GetMethodName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_)); } - String* GetFileName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_)); } - int32_t GetLineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_)); } static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name, int32_t line_number) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static void SetClass(Class* java_lang_StackTraceElement); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_); + static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_StackTraceElement_.IsNull()); return java_lang_StackTraceElement_.Read(); } @@ -71,7 +71,7 @@ class MANAGED StackTraceElement FINAL : public Object { template<bool kTransactionActive> void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name, int32_t line_number) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static GcRoot<Class> java_lang_StackTraceElement_; diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index b689057426..3a39f587da 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -42,7 +42,7 @@ class SetStringCountVisitor { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); @@ -61,7 +61,7 @@ class SetStringCountAndBytesVisitor { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); @@ -88,7 +88,7 @@ class SetStringCountAndValueVisitorFromCharArray { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); @@ -111,7 +111,7 @@ class SetStringCountAndValueVisitorFromString { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h index af06385401..eb2e1f6977 100644 --- a/runtime/mirror/string.h +++ b/runtime/mirror/string.h @@ -49,87 +49,87 @@ class MANAGED String FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(String, value_); } - uint16_t* GetValue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) { return &value_[0]; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_)); } - void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) { // Count is invariant so use non-transactional mode. Also disable check as we may run inside // a transaction. DCHECK_LE(0, new_count); SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count); } - int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_); // Computes, stores, and returns the hash code. - int32_t ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetUtfLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t CharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_); - void SetCharAt(int32_t index, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_); - String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + String* Intern() SHARED_REQUIRES(Locks::mutator_lock_); template <bool kIsInstrumented, typename PreFenceVisitor> ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length, gc::AllocatorType allocator_type, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template <bool kIsInstrumented> ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length, Handle<ByteArray> array, int32_t offset, int32_t high_byte, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template <bool kIsInstrumented> ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count, Handle<CharArray> array, int32_t offset, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template <bool kIsInstrumented> ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length, Handle<String> string, int32_t offset, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromModifiedUtf8(Thread* self, const char* utf) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // TODO: This is only used in the interpreter to compare against // entries from a dex files constant pool (ArtField names). Should // we unify this with Equals(const StringPiece&); ? - bool Equals(const char* modified_utf8) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_); // TODO: This is only used to compare DexCache.location with // a dex_file's location (which is an std::string). Do we really // need this in mirror::String just for that one usage ? bool Equals(const StringPiece& modified_utf8) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool Equals(String* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_); // Compare UTF-16 code point values not in a locale-sensitive manner int Compare(int32_t utf16_length, const char* utf8_data_in); @@ -137,21 +137,22 @@ class MANAGED String FINAL : public Object { // TODO: do we need this overload? give it a more intention-revealing name. bool Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Create a modified UTF-8 encoded std::string from a java/lang/String object. - std::string ToModifiedUtf8() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_); - int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_); - int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_); - CharArray* ToCharArray(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_String_.IsNull()); return java_lang_String_.Read(); } @@ -159,10 +160,10 @@ class MANAGED String FINAL : public Object { static void SetClass(Class* java_lang_String); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - void SetHashCode(int32_t new_hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) { // Hash code is invariant so use non-transactional mode. Also disable check as we may run inside // a transaction. DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_))); diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc index 1c21edbc42..e8633def48 100644 --- a/runtime/mirror/throwable.cc +++ b/runtime/mirror/throwable.cc @@ -53,7 +53,7 @@ void Throwable::SetCause(Throwable* cause) { } } -void Throwable::SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(state != nullptr); if (Runtime::Current()->IsActiveTransaction()) { SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), state); diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h index 9cc0b6f5c4..0f488dc46a 100644 --- a/runtime/mirror/throwable.h +++ b/runtime/mirror/throwable.h @@ -31,38 +31,38 @@ namespace mirror { // C++ mirror of java.lang.Throwable class MANAGED Throwable : public Object { public: - void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_); - String* GetDetailMessage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_)); } - std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_); // This is a runtime version of initCause, you shouldn't use it if initCause may have been // overridden. Also it asserts rather than throwing exceptions. Currently this is only used // in cases like the verifier where the checks cannot fail and initCause isn't overridden. - void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_); + void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_); + bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_); - static Class* GetJavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_Throwable_.IsNull()); return java_lang_Throwable_.Read(); } - int32_t GetStackDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_); static void SetClass(Class* java_lang_Throwable); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - Object* GetStackState() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_)); } - Object* GetStackTrace() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_trace_)); } diff --git a/runtime/monitor.cc b/runtime/monitor.cc index fd9c1b17e1..da6ee259b5 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -298,7 +298,7 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) __attribute__((format(printf, 1, 2))); static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { va_list args; va_start(args, fmt); Thread* self = Thread::Current(); @@ -667,11 +667,9 @@ void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWo // Suspend the owner, inflate. First change to blocked and give up mutator_lock_. self->SetMonitorEnterObject(obj.Get()); bool timed_out; - Thread* owner; - { - ScopedThreadStateChange tsc(self, kBlocked); - owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out); - } + self->TransitionFromRunnableToSuspended(kBlocked); + Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out); + self->TransitionFromSuspendedToRunnable(); if (owner != nullptr) { // We succeeded in suspending the thread, check the lock's status didn't change. lock_word = obj->GetLockWord(true); @@ -1083,7 +1081,7 @@ bool Monitor::IsValidLockWord(LockWord lock_word) { } } -bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) { MutexLock mu(Thread::Current(), monitor_lock_); return owner_ != nullptr; } @@ -1189,7 +1187,7 @@ class MonitorDeflateVisitor : public IsMarkedVisitor { MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {} virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (Monitor::Deflate(self_, object)) { DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked); ++deflate_count_; diff --git a/runtime/monitor.h b/runtime/monitor.h index 09a6cb6ffe..3ca8954308 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -62,34 +62,36 @@ class Monitor { static uint32_t GetLockOwnerThreadId(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy. + // NO_THREAD_SAFETY_ANALYSIS for mon->Lock. static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj) EXCLUSIVE_LOCK_FUNCTION(obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; + + // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock. static bool MonitorExit(Thread* thread, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(obj); + SHARED_REQUIRES(Locks::mutator_lock_) + UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS; - static void Notify(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { DoNotify(self, obj, false); } - static void NotifyAll(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void NotifyAll(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { DoNotify(self, obj, true); } // Object.wait(). Also called for class init. + // NO_THREAD_SAFETY_ANALYSIS for mon->Wait. static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow, ThreadState why) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; static void DescribeWait(std::ostream& os, const Thread* thread) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Used to implement JDWP's ThreadReference.CurrentContendedMonitor. static mirror::Object* GetContendedMonitor(Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Calls 'callback' once for each lock held in the single stack frame represented by // the current state of 'stack_visitor'. @@ -97,12 +99,12 @@ class Monitor { // is necessary when we have already aborted but want to dump the stack as much as we can. static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*), void* callback_context, bool abort_on_failure = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsValidLockWord(LockWord lock_word); template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetObject() SHARED_REQUIRES(Locks::mutator_lock_) { return obj_.Read<kReadBarrierOption>(); } @@ -114,7 +116,7 @@ class Monitor { int32_t GetHashCode(); - bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_lock_); bool HasHashCode() const { return hash_code_.LoadRelaxed() != 0; @@ -126,12 +128,13 @@ class Monitor { // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check. static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word, - uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS; + uint32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_); + // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that + // does not allow a thread suspension in the middle. TODO: maybe make this exclusive. + // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_. static bool Deflate(Thread* self, mirror::Object* obj) - // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that - // does not allow a thread suspension in the middle. TODO: maybe make this exclusive. - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; #ifndef __LP64__ void* operator new(size_t size) { @@ -149,57 +152,58 @@ class Monitor { private: explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code, - MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MonitorId id) SHARED_REQUIRES(Locks::mutator_lock_); // Install the monitor into its object, may fail if another thread installs a different monitor // first. bool Install(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this // routine. - void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); + void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_); // Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of // this routine. - void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); + void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_); // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The // calling thread must own the lock or the owner must be suspended. There's a race with other // threads inflating the lock, installing hash codes and spurious failures. The caller should // re-read the lock word following the call. static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self) void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, const char* owner_filename, uint32_t owner_line_number) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void Lock(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); bool Unlock(Thread* thread) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify. void Notify(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void NotifyAll(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and @@ -222,15 +226,15 @@ class Monitor { // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end // of the 32-bit time epoch. void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Translates the provided method and pc into its declaring class' source file and line number. void TranslateLocation(ArtMethod* method, uint32_t pc, const char** source_file, uint32_t* line_number) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - uint32_t GetOwnerThreadId(); + uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_); static bool (*is_sensitive_thread_hook_)(); static uint32_t lock_profiling_threshold_; @@ -285,17 +289,16 @@ class MonitorList { MonitorList(); ~MonitorList(); - void Add(Monitor* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Add(Monitor* m) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_); void SweepMonitorList(IsMarkedVisitor* visitor) - LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_); - void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_); - void EnsureNewMonitorsDisallowed() LOCKS_EXCLUDED(monitor_list_lock_); - void BroadcastForNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_); + REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + void DisallowNewMonitors() REQUIRES(!monitor_list_lock_); + void AllowNewMonitors() REQUIRES(!monitor_list_lock_); + void EnsureNewMonitorsDisallowed() REQUIRES(!monitor_list_lock_); + void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_); // Returns how many monitors were deflated. - size_t DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_); typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors; @@ -318,7 +321,7 @@ class MonitorList { // For use only by the JDWP implementation. class MonitorInfo { public: - explicit MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_); Thread* owner_; size_t entry_count_; diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc index 4a364cab62..2832e32dd1 100644 --- a/runtime/monitor_pool.cc +++ b/runtime/monitor_pool.cc @@ -90,7 +90,7 @@ void MonitorPool::AllocateChunk() { Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // We are gonna allocate, so acquire the writer lock. MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h index 4ab4e86ac4..240ca61641 100644 --- a/runtime/monitor_pool.h +++ b/runtime/monitor_pool.h @@ -43,7 +43,7 @@ class MonitorPool { } static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #ifndef __LP64__ Monitor* mon = new Monitor(self, owner, obj, hash_code); DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment); @@ -110,10 +110,10 @@ class MonitorPool { // analysis. MonitorPool() NO_THREAD_SAFETY_ANALYSIS; - void AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(Locks::allocated_monitor_ids_lock_); + void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_); Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ReleaseMonitorToPool(Thread* self, Monitor* monitor); void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors); diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc index 2a29c60a13..1be637c8a5 100644 --- a/runtime/monitor_test.cc +++ b/runtime/monitor_test.cc @@ -60,7 +60,7 @@ static const size_t kMaxHandles = 1000000; // Use arbitrary large amount for no static void FillHeap(Thread* self, ClassLinker* class_linker, std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp, std::vector<MutableHandle<mirror::Object>>* handles) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB); hsp->reset(new StackHandleScope<kMaxHandles>(self)); diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index 4f97d20d6c..1b210bb4c3 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -17,6 +17,7 @@ #include "dalvik_system_DexFile.h" #include "base/logging.h" +#include "base/out.h" #include "base/stl_util.h" #include "base/stringprintf.h" #include "class_linker.h" @@ -164,7 +165,8 @@ static jobject DexFile_openDexFileNative( std::vector<std::unique_ptr<const DexFile>> dex_files; std::vector<std::string> error_msgs; - dex_files = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs); + dex_files = + linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), outof(error_msgs)); if (!dex_files.empty()) { jlongArray array = ConvertNativeToJavaArray(env, dex_files); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 5dd354d4d6..9ea339a6bd 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -16,7 +16,7 @@ #include "dalvik_system_VMRuntime.h" -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ extern "C" void android_set_application_target_sdk_version(uint32_t version); #endif #include <limits.h> @@ -196,7 +196,7 @@ static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sd // Note that targetSdkVersion may be 0, meaning "current". Runtime::Current()->SetTargetSdkVersion(target_sdk_version); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // This part is letting libc/dynamic linker know about current app's // target sdk version to enable compatibility workarounds. android_set_application_target_sdk_version(static_cast<uint32_t>(target_sdk_version)); @@ -262,7 +262,7 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor { explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { } void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { mirror::String* string = root->AsString(); table_->operator[](string->ToModifiedUtf8()) = string; } @@ -274,7 +274,7 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor { // Based on ClassLinker::ResolveString. static void PreloadDexCachesResolveString( Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::String* string = dex_cache->GetResolvedString(string_idx); if (string != nullptr) { return; @@ -292,7 +292,7 @@ static void PreloadDexCachesResolveString( // Based on ClassLinker::ResolveType. static void PreloadDexCachesResolveType( Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* klass = dex_cache->GetResolvedType(type_idx); if (klass != nullptr) { return; @@ -321,7 +321,7 @@ static void PreloadDexCachesResolveType( // Based on ClassLinker::ResolveField. static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtField* field = dex_cache->GetResolvedField(field_idx, sizeof(void*)); if (field != nullptr) { return; @@ -349,7 +349,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin // Based on ClassLinker::ResolveMethod. static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx, InvokeType invoke_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*)); if (method != nullptr) { return; @@ -423,7 +423,7 @@ static void PreloadDexCachesStatsTotal(DexCacheStats* total) { } static void PreloadDexCachesStatsFilled(DexCacheStats* filled) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!kPreloadDexCachesCollectStats) { return; } diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc index ee62755ae4..541eeb13c9 100644 --- a/runtime/native/dalvik_system_VMStack.cc +++ b/runtime/native/dalvik_system_VMStack.cc @@ -29,7 +29,7 @@ namespace art { static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { jobject trace = nullptr; if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) { trace = soa.Self()->CreateInternalStackTrace<false>(soa); @@ -87,7 +87,7 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass) { : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), class_loader(nullptr) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(class_loader == nullptr); mirror::Class* c = GetMethod()->GetDeclaringClass(); // c is null for runtime methods. diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index a41aed6f29..eddb2d16b2 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -41,7 +41,7 @@ namespace art { ALWAYS_INLINE static inline mirror::Class* DecodeClass( const ScopedFastNativeObjectAccess& soa, jobject java_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(java_class); DCHECK(c != nullptr); DCHECK(c->IsClass()); @@ -108,7 +108,7 @@ static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) { static mirror::ObjectArray<mirror::Field>* GetDeclaredFields( Thread* self, mirror::Class* klass, bool public_only, bool force_resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); auto* ifields = klass->GetIFields(); auto* sfields = klass->GetSFields(); @@ -189,7 +189,7 @@ static jobjectArray Class_getPublicDeclaredFields(JNIEnv* env, jobject javaThis) // fast. ALWAYS_INLINE static inline ArtField* FindFieldByName( Thread* self ATTRIBUTE_UNUSED, mirror::String* name, ArtField* fields, size_t num_fields) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t low = 0; size_t high = num_fields; const uint16_t* const data = name->GetValue(); @@ -218,7 +218,7 @@ ALWAYS_INLINE static inline ArtField* FindFieldByName( ALWAYS_INLINE static inline mirror::Field* GetDeclaredField( Thread* self, mirror::Class* c, mirror::String* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto* instance_fields = c->GetIFields(); auto* art_field = FindFieldByName(self, name, instance_fields, c->NumInstanceFields()); if (art_field != nullptr) { @@ -274,7 +274,7 @@ static jobject Class_getDeclaredConstructorInternal( } static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(m != nullptr); return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor(); } diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc index abac8153b3..856a3e7d01 100644 --- a/runtime/native/java_lang_Runtime.cc +++ b/runtime/native/java_lang_Runtime.cc @@ -31,10 +31,10 @@ #include "verify_object-inl.h" #include <sstream> -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // This function is provided by android linker. extern "C" void android_update_LD_LIBRARY_PATH(const char* ld_library_path); -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ namespace art { @@ -53,7 +53,7 @@ NO_RETURN static void Runtime_nativeExit(JNIEnv*, jclass, jint status) { } static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPathJstr) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ if (javaLdLibraryPathJstr != nullptr) { ScopedUtfChars ldLibraryPath(env, javaLdLibraryPathJstr); if (ldLibraryPath.c_str() != nullptr) { diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc index 97aae67178..d9863c579e 100644 --- a/runtime/native/java_lang_System.cc +++ b/runtime/native/java_lang_System.cc @@ -36,7 +36,7 @@ namespace art { */ static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string actualType(PrettyTypeOf(array)); Thread* self = Thread::Current(); self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc index 15156301c8..62a0b7653d 100644 --- a/runtime/native/java_lang_VMClassLoader.cc +++ b/runtime/native/java_lang_VMClassLoader.cc @@ -16,6 +16,7 @@ #include "java_lang_VMClassLoader.h" +#include "base/out.h" #include "class_linker.h" #include "jni_internal.h" #include "mirror/class_loader.h" @@ -45,7 +46,7 @@ static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoa // Try the common case. StackHandleScope<1> hs(soa.Self()); cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash, - hs.NewHandle(loader), &c); + hs.NewHandle(loader), outof(c)); if (c != nullptr) { return soa.AddLocalReference<jclass>(c); } diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index ba898c6d2d..5bbb0dc45f 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -32,7 +32,7 @@ namespace art { template<bool kIsSet> ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsSet && field->IsFinal()) { ThrowIllegalAccessException( StringPrintf("Cannot set %s field %s of class %s", @@ -60,7 +60,7 @@ ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* template<bool kAllowReferences> ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f, Primitive::Type field_type, JValue* value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(value->GetJ(), INT64_C(0)); MemberOffset offset(f->GetOffset()); const bool is_volatile = f->IsVolatile(); @@ -105,7 +105,7 @@ ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa, jobject j_rcvr, mirror::Field** f, mirror::Object** class_or_rcvr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { soa.Self()->AssertThreadSuspensionIsAllowable(); mirror::Class* declaringClass = (*f)->GetDeclaringClass(); if ((*f)->IsStatic()) { @@ -232,7 +232,7 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) { ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f, Primitive::Type field_type, bool allow_references, const JValue& new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(f->GetDeclaringClass()->IsInitialized()); MemberOffset offset(f->GetOffset()); const bool is_volatile = f->IsVolatile(); @@ -253,9 +253,9 @@ ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* break; case Primitive::kPrimChar: if (is_volatile) { - o->SetFieldBooleanVolatile<false>(offset, new_value.GetC()); + o->SetFieldCharVolatile<false>(offset, new_value.GetC()); } else { - o->SetFieldBoolean<false>(offset, new_value.GetC()); + o->SetFieldChar<false>(offset, new_value.GetC()); } break; case Primitive::kPrimInt: diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h index 57b873bc22..c4a33dfd14 100644 --- a/runtime/native/scoped_fast_native_object_access.h +++ b/runtime/native/scoped_fast_native_object_access.h @@ -27,7 +27,7 @@ namespace art { class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable { public: explicit ScopedFastNativeObjectAccess(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE : ScopedObjectAccessAlreadyRunnable(env) { Locks::mutator_lock_->AssertSharedHeld(Self()); diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h index 7fe31300ab..2295cb4664 100644 --- a/runtime/nth_caller_visitor.h +++ b/runtime/nth_caller_visitor.h @@ -33,7 +33,7 @@ struct NthCallerVisitor : public StackVisitor { count(0), caller(nullptr) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); bool do_count = false; if (m == nullptr || m->IsRuntimeMethod()) { diff --git a/runtime/oat.h b/runtime/oat.h index ee2f3f60f3..29dd76ce5e 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '0', '6', '7', '\0' }; + static constexpr uint8_t kOatVersion[] = { '0', '6', '8', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 098fe619aa..a23d94d845 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -27,7 +27,7 @@ #include <sstream> // dlopen_ext support from bionic. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "android/dlext.h" #endif @@ -229,7 +229,7 @@ bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base, *error_msg = StringPrintf("Failed to find absolute path for '%s'", elf_filename.c_str()); return false; } -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ android_dlextinfo extinfo; extinfo.flags = ANDROID_DLEXT_FORCE_LOAD | ANDROID_DLEXT_FORCE_FIXED_VADDR; dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo); diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 7c4ef8be88..27f8677f03 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -215,7 +215,7 @@ class OatFile FINAL { const OatDexFile* GetOatDexFile(const char* dex_location, const uint32_t* const dex_location_checksum, bool exception_if_not_found = true) const - LOCKS_EXCLUDED(secondary_lookup_lock_); + REQUIRES(!secondary_lookup_lock_); const std::vector<const OatDexFile*>& GetOatDexFiles() const { return oat_dex_files_storage_; diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index d133fa3c46..65263d0f30 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -26,6 +26,7 @@ #include <gtest/gtest.h> #include "art_field-inl.h" +#include "base/out.h" #include "class_linker-inl.h" #include "common_runtime_test.h" #include "compiler_callbacks.h" @@ -38,6 +39,28 @@ namespace art { +// Some tests very occasionally fail: we expect to have an unrelocated non-pic +// odex file that is reported as needing relocation, but it is reported +// instead as being up to date (b/22599792). +// +// This function adds extra checks for diagnosing why the given oat file is +// reported up to date, when it should be non-pic needing relocation. +// These extra diagnostics checks should be removed once b/22599792 has been +// resolved. +static void DiagnoseFlakyTestFailure(const OatFile& oat_file) { + Runtime* runtime = Runtime::Current(); + const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace(); + ASSERT_TRUE(image_space != nullptr); + const ImageHeader& image_header = image_space->GetImageHeader(); + const OatHeader& oat_header = oat_file.GetOatHeader(); + EXPECT_FALSE(oat_file.IsPic()); + EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum()); + EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()), + oat_header.GetImageFileLocationOatDataBegin()); + EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta()); +} + + class OatFileAssistantTest : public CommonRuntimeTest { public: virtual void SetUp() { @@ -186,6 +209,7 @@ class OatFileAssistantTest : public CommonRuntimeTest { // Generate an odex file for the purposes of test. // If pic is true, generates a PIC odex. + // The generated odex file will be un-relocated. void GenerateOdexForTest(const std::string& dex_location, const std::string& odex_location, bool pic = false) { @@ -210,6 +234,16 @@ class OatFileAssistantTest : public CommonRuntimeTest { std::string error_msg; ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg; setenv("ANDROID_DATA", android_data_.c_str(), 1); + + // Verify the odex file was generated as expected. + std::unique_ptr<OatFile> odex_file(OatFile::Open( + odex_location.c_str(), odex_location.c_str(), nullptr, nullptr, + false, dex_location.c_str(), &error_msg)); + ASSERT_TRUE(odex_file.get() != nullptr) << error_msg; + + if (!pic) { + DiagnoseFlakyTestFailure(*odex_file); + } } void GeneratePicOdexForTest(const std::string& dex_location, @@ -446,27 +480,6 @@ TEST_F(OatFileAssistantTest, OatOutOfDate) { EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles()); } -// Some tests very occasionally fail: we expect to have an unrelocated non-pic -// odex file that is reported as needing relocation, but it is reported -// instead as being up to date (b/22599792). -// -// This function adds extra checks for diagnosing why the given oat file is -// reported up to date, when it should be non-pic needing relocation. -// These extra diagnostics checks should be removed once b/22599792 has been -// resolved. -static void DiagnoseFlakyTestFailure(const OatFile& oat_file) { - Runtime* runtime = Runtime::Current(); - const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace(); - ASSERT_TRUE(image_space != nullptr); - const ImageHeader& image_header = image_space->GetImageHeader(); - const OatHeader& oat_header = oat_file.GetOatHeader(); - EXPECT_FALSE(oat_file.IsPic()); - EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum()); - EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()), - oat_header.GetImageFileLocationOatDataBegin()); - EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta()); -} - // Case: We have a DEX file and an ODEX file, but no OAT file. // Expect: The status is kPatchOatNeeded. TEST_F(OatFileAssistantTest, DexOdexNoOat) { @@ -946,7 +959,9 @@ class RaceGenerateTask : public Task { ClassLinker* linker = Runtime::Current()->GetClassLinker(); std::vector<std::unique_ptr<const DexFile>> dex_files; std::vector<std::string> error_msgs; - dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(), oat_location_.c_str(), &error_msgs); + dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(), + oat_location_.c_str(), + outof(error_msgs)); CHECK(!dex_files.empty()) << Join(error_msgs, '\n'); CHECK(dex_files[0]->GetOatDexFile() != nullptr) << dex_files[0]->GetLocation(); loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile(); diff --git a/runtime/object_lock.h b/runtime/object_lock.h index acddc03e29..eb7cbd85d3 100644 --- a/runtime/object_lock.h +++ b/runtime/object_lock.h @@ -28,15 +28,15 @@ class Thread; template <typename T> class ObjectLock { public: - ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_); - ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ~ObjectLock() SHARED_REQUIRES(Locks::mutator_lock_); - void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void WaitIgnoringInterrupts() SHARED_REQUIRES(Locks::mutator_lock_); - void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Notify() SHARED_REQUIRES(Locks::mutator_lock_); - void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void NotifyAll() SHARED_REQUIRES(Locks::mutator_lock_); private: Thread* const self_; diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 77723545a2..25b5e49b3d 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -263,6 +263,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .Define("--cpu-abilist=_") .WithType<std::string>() .IntoKey(M::CpuAbiList) + .Define("-Xfingerprint:_") + .WithType<std::string>() + .IntoKey(M::Fingerprint) .Define({"-Xexperimental-lambdas", "-Xnoexperimental-lambdas"}) .WithType<bool>() .WithValues({true, false}) diff --git a/runtime/prebuilt_tools_test.cc b/runtime/prebuilt_tools_test.cc index 53bc87665a..a7f7bcd134 100644 --- a/runtime/prebuilt_tools_test.cc +++ b/runtime/prebuilt_tools_test.cc @@ -23,7 +23,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class PrebuiltToolsTest : public CommonRuntimeTest { }; @@ -61,6 +61,6 @@ TEST_F(PrebuiltToolsTest, CheckTargetTools) { } } -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/runtime/profiler.cc b/runtime/profiler.cc index 87b0d43451..3db32657c8 100644 --- a/runtime/profiler.cc +++ b/runtime/profiler.cc @@ -59,13 +59,13 @@ class BoundedStackVisitor : public StackVisitor { public: BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack, Thread* thread, uint32_t max_depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), stack_(stack), max_depth_(max_depth), depth_(0) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; @@ -88,7 +88,7 @@ class BoundedStackVisitor : public StackVisitor { // This is called from either a thread list traversal or from a checkpoint. Regardless // of which caller, the mutator lock must be held. -static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) { BackgroundMethodSamplingProfiler* profiler = reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg); const ProfilerOptions profile_options = profiler->GetProfilerOptions(); diff --git a/runtime/profiler.h b/runtime/profiler.h index 7611487da2..30babe358d 100644 --- a/runtime/profiler.h +++ b/runtime/profiler.h @@ -104,8 +104,8 @@ class ProfileSampleResults { explicit ProfileSampleResults(Mutex& lock); ~ProfileSampleResults(); - void Put(ArtMethod* method); - void PutStack(const std::vector<InstructionLocation>& stack_dump); + void Put(ArtMethod* method) REQUIRES(!lock_); + void PutStack(const std::vector<InstructionLocation>& stack_dump) REQUIRES(!lock_); uint32_t Write(std::ostream &os, ProfileDataType type); void ReadPrevious(int fd, ProfileDataType type); void Clear(); @@ -168,17 +168,19 @@ class BackgroundMethodSamplingProfiler { // Start a profile thread with the user-supplied arguments. // Returns true if the profile was started or if it was already running. Returns false otherwise. static bool Start(const std::string& output_filename, const ProfilerOptions& options) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_, - Locks::profiler_lock_); - - static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_); - static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_); - - void RecordMethod(ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void RecordStack(const std::vector<InstructionLocation>& stack) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool ProcessMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, + !Locks::profiler_lock_); + + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void Stop() REQUIRES(!Locks::profiler_lock_, !wait_lock_, !Locks::profiler_lock_) + NO_THREAD_SAFETY_ANALYSIS; + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void Shutdown() REQUIRES(!Locks::profiler_lock_) NO_THREAD_SAFETY_ANALYSIS; + + void RecordMethod(ArtMethod *method) SHARED_REQUIRES(Locks::mutator_lock_); + void RecordStack(const std::vector<InstructionLocation>& stack) + SHARED_REQUIRES(Locks::mutator_lock_); + bool ProcessMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); const ProfilerOptions& GetProfilerOptions() const { return options_; } Barrier& GetBarrier() { @@ -190,13 +192,15 @@ class BackgroundMethodSamplingProfiler { const std::string& output_filename, const ProfilerOptions& options); // The sampling interval in microseconds is passed as an argument. - static void* RunProfilerThread(void* arg) LOCKS_EXCLUDED(Locks::profiler_lock_); + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void* RunProfilerThread(void* arg) REQUIRES(!Locks::profiler_lock_) + NO_THREAD_SAFETY_ANALYSIS; - uint32_t WriteProfile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t WriteProfile() SHARED_REQUIRES(Locks::mutator_lock_); void CleanProfile(); - uint32_t DumpProfile(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool ShuttingDown(Thread* self) LOCKS_EXCLUDED(Locks::profiler_lock_); + uint32_t DumpProfile(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); + static bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_); static BackgroundMethodSamplingProfiler* profiler_ GUARDED_BY(Locks::profiler_lock_); diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc index f40c0f1130..c33b126a76 100644 --- a/runtime/proxy_test.cc +++ b/runtime/proxy_test.cc @@ -34,7 +34,7 @@ class ProxyTest : public CommonCompilerTest { mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobject jclass_loader, const char* className, const std::vector<mirror::Class*>& interfaces) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* javaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"); CHECK(javaLangObject != nullptr); diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h index 0d39e22b34..79c50e8635 100644 --- a/runtime/quick/inline_method_analyser.h +++ b/runtime/quick/inline_method_analyser.h @@ -56,6 +56,7 @@ enum InlineMethodOpcode : uint16_t { kIntrinsicReferenceGetReferent, kIntrinsicCharAt, kIntrinsicCompareTo, + kIntrinsicEquals, // String equals kIntrinsicGetCharsNoCheck, kIntrinsicIsEmptyOrLength, kIntrinsicIndexOf, @@ -157,7 +158,7 @@ class InlineMethodAnalyser { * @return true if the method is a candidate for inlining, false otherwise. */ static bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static constexpr bool IsInstructionIGet(Instruction::Code opcode) { return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT; @@ -182,16 +183,16 @@ class InlineMethodAnalyser { static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result); static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result); static bool AnalyseIGetMethod(verifier::MethodVerifier* verifier, InlineMethod* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool AnalyseIPutMethod(verifier::MethodVerifier* verifier, InlineMethod* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast path instance field access in a verified accessor? // If yes, computes field's offset and volatility and whether the method is static or not. static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put, verifier::MethodVerifier* verifier, InlineIGetIPutData* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); }; } // namespace art diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 02baad758f..d1a4081125 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -45,14 +45,14 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { public: CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception, QuickExceptionHandler* exception_handler) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), exception_(exception), exception_handler_(exception_handler) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = GetMethod(); exception_handler_->SetHandlerFrameDepth(GetFrameDepth()); if (method == nullptr) { @@ -83,7 +83,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { private: bool HandleTryItems(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t dex_pc = DexFile::kDexNoIndex; if (!method->IsNative()) { dex_pc = GetDexPc(); @@ -159,7 +159,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) { class DeoptimizeStackVisitor FINAL : public StackVisitor { public: DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), exception_handler_(exception_handler), @@ -167,7 +167,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { stacked_shadow_frame_pushed_(false) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { exception_handler_->SetHandlerFrameDepth(GetFrameDepth()); ArtMethod* method = GetMethod(); if (method == nullptr) { @@ -196,7 +196,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { return static_cast<VRegKind>(kinds.at(reg * 2)); } - bool HandleDeoptimization(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); CHECK(code_item != nullptr); uint16_t num_regs = code_item->registers_size_; @@ -350,14 +350,14 @@ void QuickExceptionHandler::DeoptimizeStack() { class InstrumentationStackVisitor : public StackVisitor { public: InstrumentationStackVisitor(Thread* self, size_t frame_depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), frame_depth_(frame_depth), instrumentation_frames_to_pop_(0) { CHECK_NE(frame_depth_, kInvalidFrameDepth); } - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { size_t current_frame_depth = GetFrameDepth(); if (current_frame_depth < frame_depth_) { CHECK(GetMethod() != nullptr); diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h index 8d7cd12216..ce9085d70a 100644 --- a/runtime/quick_exception_handler.h +++ b/runtime/quick_exception_handler.h @@ -36,17 +36,17 @@ class ShadowFrame; class QuickExceptionHandler { public: QuickExceptionHandler(Thread* self, bool is_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); NO_RETURN ~QuickExceptionHandler() { LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. UNREACHABLE(); } - void FindCatch(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DeoptimizeStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_); + void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_); + void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_); + NO_RETURN void DoLongJump() SHARED_REQUIRES(Locks::mutator_lock_); void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) { handler_quick_frame_ = handler_quick_frame; diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h index 55cef6826a..e7ad7316bd 100644 --- a/runtime/read_barrier.h +++ b/runtime/read_barrier.h @@ -49,7 +49,7 @@ class ReadBarrier { bool kMaybeDuringStartup = false> ALWAYS_INLINE static MirrorType* Barrier( mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // It's up to the implementation whether the given root gets updated // whereas the return value must be an updated reference. @@ -57,7 +57,7 @@ class ReadBarrier { bool kMaybeDuringStartup = false> ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root, GcRootSource* gc_root_source = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // It's up to the implementation whether the given root gets updated // whereas the return value must be an updated reference. @@ -65,24 +65,24 @@ class ReadBarrier { bool kMaybeDuringStartup = false> ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root, GcRootSource* gc_root_source = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsDuringStartup(); // Without the holder object. static void AssertToSpaceInvariant(mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); } // With the holder object. static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // With GcRootSource. static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static mirror::Object* Mark(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static mirror::Object* Mark(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); static mirror::Object* WhitePtr() { return reinterpret_cast<mirror::Object*>(white_ptr_); @@ -96,7 +96,7 @@ class ReadBarrier { ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj, uintptr_t* out_rb_ptr_high_bits) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them. static constexpr uintptr_t white_ptr_ = 0x0; // Not marked. diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h index 4f408dd5c1..710c21f03e 100644 --- a/runtime/read_barrier_c.h +++ b/runtime/read_barrier_c.h @@ -47,9 +47,4 @@ #error "Only one of Baker or Brooks can be enabled at a time." #endif -// A placeholder marker to indicate places to add read barriers in the -// assembly code. This is a development time aid and to be removed -// after read barriers are added. -#define THIS_LOAD_REQUIRES_READ_BARRIER - #endif // ART_RUNTIME_READ_BARRIER_C_H_ diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc index a31d8ac5ba..49b6a38b01 100644 --- a/runtime/reference_table.cc +++ b/runtime/reference_table.cc @@ -62,7 +62,7 @@ void ReferenceTable::Remove(mirror::Object* obj) { // If "obj" is an array, return the number of elements in the array. // Otherwise, return zero. -static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static size_t GetElementCount(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { // We assume the special cleared value isn't an array in the if statement below. DCHECK(!Runtime::Current()->GetClearedJniWeakGlobal()->IsArrayInstance()); if (obj == nullptr || !obj->IsArrayInstance()) { @@ -78,7 +78,7 @@ static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks:: // or equivalent to the original. static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count, int identical, int equiv) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (obj == nullptr) { os << " null reference (count=" << equiv << ")\n"; return; diff --git a/runtime/reference_table.h b/runtime/reference_table.h index 94f16b66de..f90ccd1e51 100644 --- a/runtime/reference_table.h +++ b/runtime/reference_table.h @@ -41,22 +41,22 @@ class ReferenceTable { ReferenceTable(const char* name, size_t initial_size, size_t max_size); ~ReferenceTable(); - void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); - void Remove(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Remove(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); size_t Size() const; - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: typedef std::vector<GcRoot<mirror::Object>, TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table; static void Dump(std::ostream& os, Table& entries) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); friend class IndirectReferenceTable; // For Dump. std::string name_; diff --git a/runtime/reflection.cc b/runtime/reflection.cc index 11522d9914..ee2e2c569a 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -72,7 +72,7 @@ class ArgArray { num_bytes_ += 4; } - void Append(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Append(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue()); } @@ -96,7 +96,7 @@ class ArgArray { void BuildArgArrayFromVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* receiver, va_list ap) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Set receiver if non-null (method is not static) if (receiver != nullptr) { Append(receiver); @@ -132,7 +132,7 @@ class ArgArray { void BuildArgArrayFromJValues(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* receiver, jvalue* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Set receiver if non-null (method is not static) if (receiver != nullptr) { Append(receiver); @@ -171,7 +171,7 @@ class ArgArray { } void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Set receiver if non-null (method is not static) size_t cur_arg = arg_offset; if (!shadow_frame->GetMethod()->IsStatic()) { @@ -206,7 +206,7 @@ class ArgArray { static void ThrowIllegalPrimitiveArgumentException(const char* expected, const char* found_descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ThrowIllegalArgumentException( StringPrintf("Invalid primitive conversion from %s to %s", expected, PrettyDescriptor(found_descriptor).c_str()).c_str()); @@ -214,7 +214,7 @@ class ArgArray { bool BuildArgArrayFromObjectArray(mirror::Object* receiver, mirror::ObjectArray<mirror::Object>* args, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::TypeList* classes = m->GetParameterTypeList(); // Set receiver if non-null (method is not static) if (receiver != nullptr) { @@ -343,7 +343,7 @@ class ArgArray { }; static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::TypeList* params = m->GetParameterTypeList(); if (params == nullptr) { return; // No arguments so nothing to check. @@ -418,7 +418,7 @@ static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args) } static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*)); } @@ -426,7 +426,7 @@ static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method) static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa, ArtMethod* method, ArgArray* arg_array, JValue* result, const char* shorty) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t* args = arg_array->GetArray(); if (UNLIKELY(soa.Env()->check_jni)) { CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args); @@ -436,7 +436,7 @@ static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa, JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // We want to make sure that the stack is not within a small distance from the // protected region in case we are calling into a leaf function whose stack // check has been elided. @@ -730,7 +730,7 @@ mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) { } static std::string UnboxingFailureKind(ArtField* f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (f != nullptr) { return "field " + PrettyField(f, false); } @@ -740,7 +740,7 @@ static std::string UnboxingFailureKind(ArtField* f) static bool UnboxPrimitive(mirror::Object* o, mirror::Class* dst_class, ArtField* f, JValue* unboxed_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { bool unbox_for_result = (f == nullptr); if (!dst_class->IsPrimitive()) { if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) { diff --git a/runtime/reflection.h b/runtime/reflection.h index 825a7213ce..d9c38c1064 100644 --- a/runtime/reflection.h +++ b/runtime/reflection.h @@ -33,60 +33,60 @@ class ScopedObjectAccessAlreadyRunnable; class ShadowFrame; mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f, JValue* unboxed_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result, Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, JValue* dst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // num_frames is number of frames we look up for access check. jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver, jobject args, size_t num_frames = 1) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class, uint32_t access_flags, mirror::Class** calling_class, size_t num_frames) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // This version takes a known calling class. bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class, uint32_t access_flags, mirror::Class* calling_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the calling class by using a stack visitor, may return null for unattached native threads. mirror::Class* GetCallingClass(Thread* self, size_t num_frames) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void InvalidReceiverError(mirror::Object* o, mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void UpdateReference(Thread* self, jobject obj, mirror::Object* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace art diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc index 9707fb8e42..bd89be5d17 100644 --- a/runtime/reflection_test.cc +++ b/runtime/reflection_test.cc @@ -85,7 +85,7 @@ class ReflectionTest : public CommonCompilerTest { mirror::Object** receiver, bool is_static, const char* method_name, const char* method_signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods"; jobject jclass_loader(LoadDex(class_name)); Thread* self = Thread::Current(); diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h index 68d5ad2f6e..380e72b5dd 100644 --- a/runtime/runtime-inl.h +++ b/runtime/runtime-inl.h @@ -66,13 +66,13 @@ inline ArtMethod* Runtime::GetImtUnimplementedMethod() { } inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(HasCalleeSaveMethod(type)); return GetCalleeSaveMethodUnchecked(type); } inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]); } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index cc8b215049..1914124600 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -590,7 +590,7 @@ bool Runtime::Start() { return true; } -void Runtime::EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { +void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) { DCHECK_GT(threads_being_born_, 0U); threads_being_born_--; if (shutting_down_started_ && threads_being_born_ == 0) { @@ -607,14 +607,14 @@ bool Runtime::InitZygote() { // See storage config details at http://source.android.com/tech/storage/ // Create private mount namespace shared by all children if (unshare(CLONE_NEWNS) == -1) { - PLOG(WARNING) << "Failed to unshare()"; + PLOG(ERROR) << "Failed to unshare()"; return false; } // Mark rootfs as being a slave so that changes from default // namespace only flow into our children. if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) { - PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE"; + PLOG(ERROR) << "Failed to mount() rootfs as MS_SLAVE"; return false; } @@ -625,7 +625,7 @@ bool Runtime::InitZygote() { if (target_base != nullptr) { if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV, "uid=0,gid=1028,mode=0751") == -1) { - LOG(WARNING) << "Failed to mount tmpfs to " << target_base; + PLOG(ERROR) << "Failed to mount tmpfs to " << target_base; return false; } } @@ -852,6 +852,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_); + fingerprint_ = runtime_options.ReleaseOrDefault(Opt::Fingerprint); + if (runtime_options.GetOrDefault(Opt::Interpret)) { GetInstrumentation()->ForceInterpretOnly(); } diff --git a/runtime/runtime.h b/runtime/runtime.h index 55adaf1276..4577b75397 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -184,19 +184,19 @@ class Runtime { bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_); bool IsShuttingDown(Thread* self); - bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { + bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) { return shutting_down_; } - size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { + size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) { return threads_being_born_; } - void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { + void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) { threads_being_born_++; } - void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); + void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_); bool IsStarted() const { return started_; @@ -212,7 +212,7 @@ class Runtime { // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most // callers should prefer. - NO_RETURN static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_); + NO_RETURN static void Abort() REQUIRES(!Locks::abort_lock_); // Returns the "main" ThreadGroup, used when attaching user threads. jobject GetMainThreadGroup() const; @@ -230,7 +230,7 @@ class Runtime { void CallExitHook(jint status); // Detaches the current native thread from the runtime. - void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_); + void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os); void DumpLockHolders(std::ostream& os); @@ -279,15 +279,15 @@ class Runtime { } // Is the given object the special object used to mark a cleared JNI weak global? - bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); // Get the special object used to mark a cleared JNI weak global. - mirror::Object* GetClearedJniWeakGlobal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetClearedJniWeakGlobal() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_REQUIRES(Locks::mutator_lock_); mirror::Throwable* GetPreAllocatedNoClassDefFoundError() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<std::string>& GetProperties() const { return properties_; @@ -301,77 +301,77 @@ class Runtime { return "2.1.0"; } - void DisallowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnsureNewSystemWeaksDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void BroadcastForNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_); + void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_); + void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_); + void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_); // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If // clean_dirty is true then dirty roots will be marked as non-dirty after visiting. void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit image roots, only used for hprof since the GC uses the image space mod union table // instead. - void VisitImageRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitImageRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); // Visit all of the roots we can do safely do concurrently. void VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit all of the non thread roots, we can do this with mutators unpaused. void VisitNonThreadRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VisitTransactionRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit all of the thread roots. - void VisitThreadRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitThreadRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); // Flip thread roots from from-space refs to to-space refs. size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback, gc::collector::GarbageCollector* collector) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Visit all other roots which must be done with mutators suspended. void VisitNonConcurrentRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the // system weak is updated to be the visitor's returned value. void SweepSystemWeaks(IsMarkedVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Constant roots are the roots which never change after the runtime is initialized, they only // need to be visited once per GC cycle. void VisitConstantRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a special method that calls into a trampoline for runtime method resolution - ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_); bool HasResolutionMethod() const { return resolution_method_ != nullptr; } - void SetResolutionMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetResolutionMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_); // Returns a special method that calls into a trampoline for runtime imt conflicts. - ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_); + ArtMethod* GetImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_); bool HasImtConflictMethod() const { return imt_conflict_method_ != nullptr; } - void SetImtConflictMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetImtUnimplementedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); + void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_); // Returns a special method that describes all callee saves being spilled to the stack. enum CalleeSaveType { @@ -386,17 +386,17 @@ class Runtime { } ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const { return callee_save_method_frame_infos_[type]; } QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) { return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]); @@ -410,7 +410,7 @@ class Runtime { void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type); - ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_); int32_t GetStat(int kind); @@ -424,8 +424,8 @@ class Runtime { void ResetStats(int kinds); - void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_, - Locks::mutator_lock_); + void SetStatsEnabled(bool new_state) + REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_); enum class NativeBridgeAction { // private kUnload, @@ -463,9 +463,9 @@ class Runtime { bool IsTransactionAborted() const; void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowTransactionAbortError(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value, bool is_volatile) const; @@ -482,17 +482,17 @@ class Runtime { void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, mirror::Object* value, bool is_volatile) const; void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RecordStrongStringInsertion(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); void RecordWeakStringInsertion(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); void RecordStrongStringRemoval(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); void RecordWeakStringRemoval(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); - void SetFaultMessage(const std::string& message); + void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_); // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations // with the unexpected_signal_lock_. const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS { @@ -563,6 +563,11 @@ class Runtime { bool IsDebuggable() const; + // Returns the build fingerprint, if set. Otherwise an empty string is returned. + std::string GetFingerprint() { + return fingerprint_; + } + private: static void InitPlatformSignalHandlers(); @@ -572,7 +577,7 @@ class Runtime { bool Init(const RuntimeOptions& options, bool ignore_unrecognized) SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); - void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_); + void InitNativeMethods() REQUIRES(!Locks::mutator_lock_); void InitThreadGroups(Thread* self); void RegisterRuntimeNativeMethods(JNIEnv* env); @@ -757,6 +762,9 @@ class Runtime { MethodRefToStringInitRegMap method_ref_string_init_reg_map_; + // Contains the build fingerprint, if given as a parameter. + std::string fingerprint_; + DISALLOW_COPY_AND_ASSIGN(Runtime); }; std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs); diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index 9922c5f993..02ed3a2553 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -112,6 +112,7 @@ RUNTIME_OPTIONS_KEY (std::string, NativeBridge) RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10) RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback) RUNTIME_OPTIONS_KEY (std::string, CpuAbiList) +RUNTIME_OPTIONS_KEY (std::string, Fingerprint) RUNTIME_OPTIONS_KEY (bool, ExperimentalLambdas, false) // -X[no]experimental-lambdas // Not parse-able from command line, but can be provided explicitly. diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h index 1cc2df65ba..b90aa0ec0e 100644 --- a/runtime/scoped_thread_state_change.h +++ b/runtime/scoped_thread_state_change.h @@ -34,7 +34,7 @@ namespace art { class ScopedThreadStateChange { public: ScopedThreadStateChange(Thread* self, ThreadState new_thread_state) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) { if (UNLIKELY(self_ == nullptr)) { // Value chosen arbitrarily and won't be used in the destructor since thread_ == null. @@ -59,7 +59,7 @@ class ScopedThreadStateChange { } } - ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE { + ~ScopedThreadStateChange() REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE { if (UNLIKELY(self_ == nullptr)) { if (!expected_has_no_thread_) { Runtime* runtime = Runtime::Current(); @@ -130,7 +130,7 @@ class ScopedObjectAccessAlreadyRunnable { * it's best if we don't grab a mutex. */ template<typename T> - T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T AddLocalReference(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal()); @@ -139,32 +139,32 @@ class ScopedObjectAccessAlreadyRunnable { template<typename T> T Decode(jobject obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return down_cast<T>(Self()->DecodeJObject(obj)); } ArtField* DecodeField(jfieldID fid) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<ArtField*>(fid); } - jfieldID EncodeField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jfieldID EncodeField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<jfieldID>(field); } - ArtMethod* DecodeMethod(jmethodID mid) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* DecodeMethod(jmethodID mid) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<ArtMethod*>(mid); } - jmethodID EncodeMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jmethodID EncodeMethod(ArtMethod* method) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<jmethodID>(method); @@ -176,12 +176,12 @@ class ScopedObjectAccessAlreadyRunnable { protected: explicit ScopedObjectAccessAlreadyRunnable(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : self_(ThreadForEnv(env)), env_(down_cast<JNIEnvExt*>(env)), vm_(env_->vm) { } explicit ScopedObjectAccessAlreadyRunnable(Thread* self) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : self_(self), env_(down_cast<JNIEnvExt*>(self->GetJniEnv())), vm_(env_ != nullptr ? env_->vm : nullptr) { } @@ -220,14 +220,14 @@ class ScopedObjectAccessAlreadyRunnable { class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable { public: explicit ScopedObjectAccessUnchecked(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : ScopedObjectAccessAlreadyRunnable(env), tsc_(Self(), kRunnable) { Self()->VerifyStack(); Locks::mutator_lock_->AssertSharedHeld(Self()); } explicit ScopedObjectAccessUnchecked(Thread* self) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : ScopedObjectAccessAlreadyRunnable(self), tsc_(self, kRunnable) { Self()->VerifyStack(); Locks::mutator_lock_->AssertSharedHeld(Self()); @@ -250,13 +250,13 @@ class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable { class ScopedObjectAccess : public ScopedObjectAccessUnchecked { public: explicit ScopedObjectAccess(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE : ScopedObjectAccessUnchecked(env) { } explicit ScopedObjectAccess(Thread* self) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE : ScopedObjectAccessUnchecked(self) { } diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc index 9f8c55c980..6cb795061d 100644 --- a/runtime/signal_catcher.cc +++ b/runtime/signal_catcher.cc @@ -133,8 +133,11 @@ void SignalCatcher::HandleSigQuit() { DumpCmdLine(os); - // Note: The string "ABI:" is chosen to match the format used by debuggerd. - os << "ABI: " << GetInstructionSetString(runtime->GetInstructionSet()) << "\n"; + // Note: The strings "Build fingerprint:" and "ABI:" are chosen to match the format used by + // debuggerd. This allows, for example, the stack tool to work. + std::string fingerprint = runtime->GetFingerprint(); + os << "Build fingerprint: '" << (fingerprint.empty() ? "unknown" : fingerprint) << "'\n"; + os << "ABI: '" << GetInstructionSetString(runtime->GetInstructionSet()) << "'\n"; os << "Build type: " << (kIsDebugBuild ? "debug" : "optimized") << "\n"; diff --git a/runtime/signal_catcher.h b/runtime/signal_catcher.h index 43bbef48ca..de6a212df4 100644 --- a/runtime/signal_catcher.h +++ b/runtime/signal_catcher.h @@ -35,19 +35,19 @@ class SignalCatcher { explicit SignalCatcher(const std::string& stack_trace_file); ~SignalCatcher(); - void HandleSigQuit() LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + void HandleSigQuit() REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); private: - static void* Run(void* arg); + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void* Run(void* arg) NO_THREAD_SAFETY_ANALYSIS; void HandleSigUsr1(); void Output(const std::string& s); - void SetHaltFlag(bool new_value); - bool ShouldHalt(); - int WaitForSignal(Thread* self, SignalSet& signals); + void SetHaltFlag(bool new_value) REQUIRES(!lock_); + bool ShouldHalt() REQUIRES(!lock_); + int WaitForSignal(Thread* self, SignalSet& signals) REQUIRES(!lock_); std::string stack_trace_file_; diff --git a/runtime/stack.cc b/runtime/stack.cc index fede91c94f..2916eaaf5e 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -19,6 +19,7 @@ #include "arch/context.h" #include "art_method-inl.h" #include "base/hex_dump.h" +#include "base/out.h" #include "entrypoints/entrypoint_utils-inl.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "gc_map.h" @@ -150,7 +151,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { } extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* StackVisitor::GetThisObject() const { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); @@ -180,7 +181,7 @@ mirror::Object* StackVisitor::GetThisObject() const { } else { uint16_t reg = code_item->registers_size_ - code_item->ins_size_; uint32_t value = 0; - bool success = GetVReg(m, reg, kReferenceVReg, &value); + bool success = GetVReg(m, reg, kReferenceVReg, outof(value)); // We currently always guarantee the `this` object is live throughout the method. CHECK(success) << "Failed to read the this object in " << PrettyMethod(m); return reinterpret_cast<mirror::Object*>(value); @@ -375,8 +376,8 @@ bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKin QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); uint32_t vmap_offset_lo, vmap_offset_hi; // TODO: IsInContext stops before spotting floating point registers. - if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) && - vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) { + if (vmap_table.IsInContext(vreg, kind_lo, outof(vmap_offset_lo)) && + vmap_table.IsInContext(vreg + 1, kind_hi, outof(vmap_offset_hi))) { bool is_float = (kind_lo == kDoubleLoVReg); uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo); @@ -399,8 +400,8 @@ bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, uint64_t* val) const { uint32_t low_32bits; uint32_t high_32bits; - bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits); - success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits); + bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, outof(low_32bits)); + success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, outof(high_32bits)); if (success) { *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits); } @@ -452,7 +453,7 @@ bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t ne QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); uint32_t vmap_offset; // TODO: IsInContext stops before spotting floating point registers. - if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) { + if (vmap_table.IsInContext(vreg, kind, outof(vmap_offset))) { bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind); @@ -532,8 +533,8 @@ bool StackVisitor::SetVRegPairFromQuickCode( QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); uint32_t vmap_offset_lo, vmap_offset_hi; // TODO: IsInContext stops before spotting floating point registers. - if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) && - vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) { + if (vmap_table.IsInContext(vreg, kind_lo, outof(vmap_offset_lo)) && + vmap_table.IsInContext(vreg + 1, kind_hi, outof(vmap_offset_hi))) { bool is_float = (kind_lo == kDoubleLoVReg); uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo); @@ -655,7 +656,7 @@ bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next next_dex_pc_(0) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (found_frame_) { ArtMethod* method = GetMethod(); if (method != nullptr && !method->IsRuntimeMethod()) { @@ -688,7 +689,7 @@ void StackVisitor::DescribeStack(Thread* thread) { explicit DescribeStackVisitor(Thread* thread_in) : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation(); return true; } diff --git a/runtime/stack.h b/runtime/stack.h index d60714f7a3..8023de1222 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -155,7 +155,7 @@ class ShadowFrame { // If this returns non-null then this does not mean the vreg is currently a reference // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_LT(i, NumberOfVRegs()); mirror::Object* ref; if (HasReferenceArray()) { @@ -229,7 +229,7 @@ class ShadowFrame { } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_LT(i, NumberOfVRegs()); if (kVerifyFlags & kVerifyWrites) { VerifyObject(val); @@ -244,14 +244,14 @@ class ShadowFrame { } } - ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(method_ != nullptr); return method_; } - mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_); bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { if (HasReferenceArray()) { @@ -333,7 +333,7 @@ class JavaFrameRootInfo : public RootInfo { : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) { } virtual void Describe(std::ostream& os) const OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: const StackVisitor* const stack_visitor_; @@ -410,7 +410,7 @@ class PACKED(4) ManagedStack { return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_); } - size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_); bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const; @@ -431,31 +431,31 @@ class StackVisitor { protected: StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); public: virtual ~StackVisitor() {} // Return 'true' if we should continue to visit more frames, 'false' to stop. - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0; void WalkStack(bool include_transitions = false) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_); bool IsShadowFrame() const { return cur_shadow_frame_ != nullptr; } - uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_); - size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_); uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Callee saves are held at the top of the frame DCHECK(GetMethod() != nullptr); uint8_t* save_addr = @@ -467,46 +467,46 @@ class StackVisitor { } // Returns the height of the stack in the managed stack frames, including transitions. - size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) { return GetNumFrames() - cur_depth_ - 1; } // Returns a frame ID for JDWP use, starting from 1. - size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFrameHeight() + 1; } - size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) { if (num_frames_ == 0) { num_frames_ = ComputeNumFrames(thread_, walk_kind_); } return num_frames_; } - size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) { return cur_depth_; } // Get the method and dex pc immediately after the one that's currently being visited. bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsReferenceVReg(ArtMethod* m, uint16_t vreg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); uintptr_t* GetGPRAddress(uint32_t reg) const; @@ -522,9 +522,9 @@ class StackVisitor { return reinterpret_cast<uint32_t*>(vreg_addr); } - uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_); - void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_); /* * Return sp-relative offset for a Dalvik virtual register, compiler @@ -606,17 +606,17 @@ class StackVisitor { return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size); } - std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_); static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_); private: // Private constructor known in the case that num_frames_ has already been computed. StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsAccessibleRegister(uint32_t reg, bool is_float) const { return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg); @@ -644,40 +644,40 @@ class StackVisitor { bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value, bool is_float) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_); - InlineInfo GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_); Thread* const thread_; const StackWalkKind walk_kind_; diff --git a/runtime/thread.cc b/runtime/thread.cc index a2edfa3155..b3efad0742 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1159,7 +1159,7 @@ void Thread::DumpState(std::ostream& os) const { struct StackDumpVisitor : public StackVisitor { StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), os(os_in), thread(thread_in), @@ -1175,7 +1175,7 @@ struct StackDumpVisitor : public StackVisitor { } } - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; @@ -1223,7 +1223,7 @@ struct StackDumpVisitor : public StackVisitor { } static void DumpLockedObject(mirror::Object* o, void* context) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *reinterpret_cast<std::ostream*>(context); os << " - locked "; if (o == nullptr) { @@ -1255,7 +1255,7 @@ struct StackDumpVisitor : public StackVisitor { }; static bool ShouldShowNativeStack(const Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ThreadState state = thread->GetState(); // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. @@ -1760,11 +1760,11 @@ void Thread::SetClassLoaderOverride(jobject class_loader_override) { class CountStackDepthVisitor : public StackVisitor { public: explicit CountStackDepthVisitor(Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), depth_(0), skip_depth_(0), skipping_(true) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) @@ -1808,29 +1808,26 @@ class BuildInternalStackTraceVisitor : public StackVisitor { trace_(nullptr), pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} - bool Init(int depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { // Allocate method trace with format [method pointers][pcs]. auto* cl = Runtime::Current()->GetClassLinker(); trace_ = cl->AllocPointerArray(self_, depth * 2); + const char* last_no_suspend_cause = + self_->StartAssertNoThreadSuspension("Building internal stack trace"); if (trace_ == nullptr) { self_->AssertPendingOOMException(); return false; } // If We are called from native, use non-transactional mode. - const char* last_no_suspend_cause = - self_->StartAssertNoThreadSuspension("Building internal stack trace"); CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; return true; } - virtual ~BuildInternalStackTraceVisitor() { - if (trace_ != nullptr) { - self_->EndAssertNoThreadSuspension(nullptr); - } + virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { + self_->EndAssertNoThreadSuspension(nullptr); } - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (trace_ == nullptr) { return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. } @@ -2012,7 +2009,7 @@ void Thread::ThrowNewException(const char* exception_class_descriptor, } static mirror::ClassLoader* GetCurrentClassLoader(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = self->GetCurrentMethod(nullptr); return method != nullptr ? method->GetDeclaringClass()->GetClassLoader() @@ -2142,7 +2139,7 @@ void Thread::DumpFromGdb() const { std::string str(ss.str()); // log to stderr for debugging command line processes std::cerr << str; -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // log to logcat for debugging frameworks processes LOG(INFO) << str; #endif @@ -2307,6 +2304,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) QUICK_ENTRY_POINT_INFO(pReadBarrierJni) + QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) #undef QUICK_ENTRY_POINT_INFO os << offset; @@ -2345,13 +2343,13 @@ Context* Thread::GetLongJumpContext() { // so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack. struct CurrentMethodVisitor FINAL : public StackVisitor { CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_object_(nullptr), method_(nullptr), dex_pc_(0), abort_on_error_(abort_on_error) {} - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. @@ -2391,13 +2389,13 @@ template <typename RootVisitor> class ReferenceMapVisitor : public StackVisitor { public: ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) // We are visiting the references in compiled frames, so we do not need // to know the inlined frames. : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), visitor_(visitor) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (false) { LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) << StringPrintf("@ PC:%04x", GetDexPc()); @@ -2411,7 +2409,7 @@ class ReferenceMapVisitor : public StackVisitor { return true; } - void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = shadow_frame->GetMethod(); DCHECK(m != nullptr); size_t num_regs = shadow_frame->NumberOfVRegs(); @@ -2453,7 +2451,7 @@ class ReferenceMapVisitor : public StackVisitor { } private: - void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) { auto* cur_quick_frame = GetCurrentQuickFrame(); DCHECK(cur_quick_frame != nullptr); auto* m = *cur_quick_frame; @@ -2557,7 +2555,7 @@ class RootCallbackVisitor { RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); } @@ -2620,7 +2618,7 @@ void Thread::VisitRoots(RootVisitor* visitor) { class VerifyRootVisitor : public SingleRootVisitor { public: void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { VerifyObject(root); } }; diff --git a/runtime/thread.h b/runtime/thread.h index cf87f22ad0..e4ad7f36db 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -162,20 +162,18 @@ class Thread { static Thread* Current(); // On a runnable thread, check for pending thread suspension request and handle if pending. - void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_); // Process pending thread suspension request and handle if pending. - void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, mirror::Object* thread_peer) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Translates 172 to pAllocArrayFromCode and so on. template<size_t size_of_pointers> @@ -186,18 +184,18 @@ class Thread { // Dumps the detailed thread state and the thread stack (used for SIGQUIT). void Dump(std::ostream& os) const - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void DumpJavaStack(std::ostream& os) const - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which // case we use 'tid' to identify the thread, and we'll include as much information as we can. static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); ThreadState GetState() const { DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated); @@ -207,11 +205,11 @@ class Thread { ThreadState SetState(ThreadState new_state); - int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) { return tls32_.suspend_count; } - int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) { return tls32_.debug_suspend_count; } @@ -223,10 +221,10 @@ class Thread { } bool ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, bool for_debugger) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); + REQUIRES(Locks::thread_suspend_count_lock_); bool RequestCheckpoint(Closure* function) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); + REQUIRES(Locks::thread_suspend_count_lock_); void SetFlipFunction(Closure* function); Closure* GetFlipFunction(); @@ -243,24 +241,25 @@ class Thread { // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. void FullSuspendCheck() - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Transition from non-runnable to runnable state acquiring share on mutator_lock_. ThreadState TransitionFromSuspendedToRunnable() - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE; // Transition from runnable into a state where mutator privileges are denied. Releases share of // mutator lock. void TransitionFromRunnableToSuspended(ThreadState new_state) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_) UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE; // Once called thread suspension will cause an assertion failure. - const char* StartAssertNoThreadSuspension(const char* cause) { + const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) { + Roles::uninterruptible_.Acquire(); // No-op. if (kIsDebugBuild) { CHECK(cause != nullptr); const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause; @@ -273,13 +272,14 @@ class Thread { } // End region where no thread suspension is expected. - void EndAssertNoThreadSuspension(const char* old_cause) { + void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) { if (kIsDebugBuild) { CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1); CHECK_GT(tls32_.no_thread_suspension, 0U); tls32_.no_thread_suspension--; tlsPtr_.last_no_thread_suspension_cause = old_cause; } + Roles::uninterruptible_.Release(); // No-op. } void AssertThreadSuspensionIsAllowable(bool check_locks = true) const; @@ -290,7 +290,7 @@ class Thread { size_t NumberOfHeldMutexes() const; - bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_); /* * Changes the priority of this thread to match that of the java.lang.Thread object. @@ -318,19 +318,19 @@ class Thread { // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer. mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, // allocation, or locking. void GetThreadName(std::string& name) const; // Sets the thread's name. - void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_); // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable. uint64_t GetCpuMicroTime() const; - mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(tlsPtr_.jpeer == nullptr); return tlsPtr_.opeer; } @@ -349,28 +349,28 @@ class Thread { return tlsPtr_.exception != nullptr; } - mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) { return tlsPtr_.exception; } void AssertPendingException() const; - void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_); void AssertNoPendingException() const; void AssertNoPendingExceptionForNewException(const char* msg) const; void SetException(mirror::Throwable* new_exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(new_exception != nullptr); // TODO: DCHECK(!IsExceptionPending()); tlsPtr_.exception = new_exception; } - void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) { tlsPtr_.exception = nullptr; } // Find catch block and perform long jump to appropriate exception handle - NO_RETURN void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_); Context* GetLongJumpContext(); void ReleaseLongJumpContext(Context* context) { @@ -392,12 +392,12 @@ class Thread { // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will // abort the runtime iff abort_on_error is true. ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns whether the given exception was thrown by the current Java method being executed // (Note that this includes native Java methods). bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetTopOfStack(ArtMethod** top_method) { tlsPtr_.managed_stack.SetTopQuickFrame(top_method); @@ -414,23 +414,24 @@ class Thread { // If 'msg' is null, no detail message is set. void ThrowNewException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // If 'msg' is null, no detail message is set. An exception must be pending, and will be // used as the new exception's cause. void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) __attribute__((format(printf, 3, 4))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // OutOfMemoryError is special, because we need to pre-allocate an instance. // Only the GC should call this. - void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); static void Startup(); static void FinishStartup(); @@ -442,50 +443,49 @@ class Thread { } // Convert a jobject into a Object* - mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) { return tlsPtr_.monitor_enter_object; } - void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { tlsPtr_.monitor_enter_object = obj; } // Implements java.lang.Thread.interrupted. - bool Interrupted() LOCKS_EXCLUDED(wait_mutex_); + bool Interrupted() REQUIRES(!*wait_mutex_); // Implements java.lang.Thread.isInterrupted. - bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_); - bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + bool IsInterrupted() REQUIRES(!*wait_mutex_); + bool IsInterruptedLocked() REQUIRES(wait_mutex_) { return interrupted_; } - void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_); - void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + void Interrupt(Thread* self) REQUIRES(!*wait_mutex_); + void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) { interrupted_ = i; } - void Notify() LOCKS_EXCLUDED(wait_mutex_); + void Notify() REQUIRES(!*wait_mutex_); private: - void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_); + void NotifyLocked(Thread* self) REQUIRES(wait_mutex_); public: Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) { return wait_mutex_; } - ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) { return wait_cond_; } - Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) { return wait_monitor_; } - void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) { wait_monitor_ = mon; } - // Waiter link-list support. Thread* GetWaitNext() const { return tlsPtr_.wait_next; @@ -505,7 +505,7 @@ class Thread { // and space efficient to compute than the StackTraceElement[]. template<bool kTransactionActive> jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many @@ -514,11 +514,11 @@ class Thread { static jobjectArray InternalStackTraceToStackTraceElementArray( const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array = nullptr, int* stack_depth = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_); // // Offsets of various members of native Thread class, used by compiled code. @@ -649,7 +649,7 @@ class Thread { } // Set the stack end to that to be used during a stack overflow - void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_); // Set the stack end to that to be used during regular execution void ResetDefaultStackEnd() { @@ -712,7 +712,7 @@ class Thread { } // Number of references allocated in JNI ShadowFrames on this thread. - size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) { return tlsPtr_.managed_stack.NumJniShadowFrameReferences(); } @@ -720,7 +720,7 @@ class Thread { size_t NumHandleReferences(); // Number of references allocated in handle scopes & JNI shadow frames on this thread. - size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) { return NumHandleReferences() + NumJniShadowFrameReferences(); } @@ -728,7 +728,7 @@ class Thread { bool HandleScopeContains(jobject obj) const; void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); HandleScope* GetTopHandleScope() { return tlsPtr_.top_handle_scope; @@ -867,10 +867,10 @@ class Thread { void RunCheckpointFunction(); bool PassActiveSuspendBarriers(Thread* self) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_); void ClearSuspendBarrier(AtomicInteger* target) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); + REQUIRES(Locks::thread_suspend_count_lock_); bool ReadFlag(ThreadFlag flag) const { return (tls32_.state_and_flags.as_struct.flags & flag) != 0; @@ -920,7 +920,7 @@ class Thread { // Push an object onto the allocation stack. bool PushOnThreadLocalAllocationStack(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Set the thread local allocation pointers to the given pointers. void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start, @@ -974,8 +974,7 @@ class Thread { private: explicit Thread(bool daemon); - ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_suspend_count_lock_); + ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_); void Destroy(); void CreatePeer(const char* name, bool as_daemon, jobject thread_group); @@ -983,7 +982,7 @@ class Thread { template<bool kTransactionActive> void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group, jobject thread_name, jint thread_priority) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and // Dbg::Disconnected. @@ -998,23 +997,23 @@ class Thread { return old_state; } - void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_); - void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_); void DumpStack(std::ostream& os) const - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Out-of-line conveniences for debugging in gdb. static Thread* CurrentFromGdb(); // Like Thread::Current. // Like Thread::Dump(std::cerr). - void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_); static void* CreateCallback(void* arg); void HandleUncaughtExceptions(ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_); // Initialize a thread. // @@ -1024,7 +1023,7 @@ class Thread { // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value // of false). bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); + REQUIRES(Locks::runtime_shutdown_lock_); void InitCardTable(); void InitCpu(); void CleanupCpu(); @@ -1346,12 +1345,12 @@ class Thread { DISALLOW_COPY_AND_ASSIGN(Thread); }; -class ScopedAssertNoThreadSuspension { +class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension { public: - ScopedAssertNoThreadSuspension(Thread* self, const char* cause) + ScopedAssertNoThreadSuspension(Thread* self, const char* cause) ACQUIRE(Roles::uninterruptible_) : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) { } - ~ScopedAssertNoThreadSuspension() { + ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) { self_->EndAssertNoThreadSuspension(old_cause_); } Thread* Self() { diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc index 9d54eba347..9563b994f8 100644 --- a/runtime/thread_linux.cc +++ b/runtime/thread_linux.cc @@ -44,7 +44,7 @@ static constexpr int kHostAltSigStackSize = void Thread::SetUpAlternateSignalStack() { // Create and set an alternate signal stack. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ LOG(FATAL) << "Invalid use of alternate signal stack on Android"; #endif stack_t ss; diff --git a/runtime/thread_list.h b/runtime/thread_list.h index edd1e05d4a..4c50181891 100644 --- a/runtime/thread_list.h +++ b/runtime/thread_list.h @@ -46,27 +46,25 @@ class ThreadList { ~ThreadList(); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_); // For thread suspend timeout dumps. void Dump(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); pid_t GetLockOwner(); // For SignalCatcher. // Thread suspension support. void ResumeAll() UNLOCK_FUNCTION(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void Resume(Thread* thread, bool for_debugger = false) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_); // Suspends all threads and gets exclusive access to the mutator_lock_. // If long suspend is true, then other people who try to suspend will never timeout. Long suspend // is currenly used for hprof since large heaps take a long time. void SuspendAll(const char* cause, bool long_suspend = false) EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success, @@ -76,18 +74,16 @@ class ThreadList { // is set to true. Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension, bool* timed_out) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the // thread on success else null. The thread id is used to identify the thread to avoid races with // the thread terminating. Note that as thread ids are recycled this may not suspend the expected // thread, that may be terminating. If the suspension times out then *timeout is set to true. Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); // Find an already suspended thread (or self) by its id. Thread* FindThreadByThreadId(uint32_t thin_lock_id); @@ -95,87 +91,78 @@ class ThreadList { // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside // of the suspend check. Returns how many checkpoints we should expect to run. size_t RunCheckpoint(Closure* checkpoint_function) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Flip thread roots from from-space refs to to-space refs. Used by // the concurrent copying collector. size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback, gc::collector::GarbageCollector* collector) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); // Suspends all threads void SuspendAllForDebugger() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); void SuspendSelfForDebugger() - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_); // Resume all threads void ResumeAllForDebugger() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void UndoDebuggerSuspensions() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Iterates over all the threads. void ForEach(void (*callback)(Thread*, void*), void* context) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + REQUIRES(Locks::thread_list_lock_); // Add/remove current thread from list. void Register(Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) - LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_); - void Unregister(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_); + REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); + void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); void VisitRoots(RootVisitor* visitor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return a copy of the thread list. - std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) { + std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) { return list_; } void DumpNativeStacks(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); private: uint32_t AllocThreadId(Thread* self); - void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(Locks::allocated_thread_ids_lock_); + void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_); - bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); - bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_); + bool Contains(pid_t tid) REQUIRES(Locks::thread_list_lock_); size_t RunCheckpoint(Closure* checkpoint_function, bool includeSuspended) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void DumpUnattachedThreads(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); void SuspendAllDaemonThreads() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void WaitForOtherNonDaemonThreadsToExit() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr, bool debug_suspend = false) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_); diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index 88700e6f72..1ca0a210cc 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -61,7 +61,7 @@ class ThreadPoolWorker { protected: ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size); - static void* Callback(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_); + static void* Callback(void* arg) REQUIRES(!Locks::mutator_lock_); virtual void Run(); ThreadPool* const thread_pool_; @@ -82,22 +82,22 @@ class ThreadPool { } // Broadcast to the workers and tell them to empty out the work queue. - void StartWorkers(Thread* self); + void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_); // Do not allow workers to grab any new tasks. - void StopWorkers(Thread* self); + void StopWorkers(Thread* self) REQUIRES(!task_queue_lock_); // Add a new task, the first available started worker will process it. Does not delete the task // after running it, it is the caller's responsibility. - void AddTask(Thread* self, Task* task); + void AddTask(Thread* self, Task* task) REQUIRES(!task_queue_lock_); explicit ThreadPool(const char* name, size_t num_threads); virtual ~ThreadPool(); // Wait for all tasks currently on queue to get completed. - void Wait(Thread* self, bool do_work, bool may_hold_locks); + void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_); - size_t GetTaskCount(Thread* self); + size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_); // Returns the total amount of workers waited for tasks. uint64_t GetWaitTime() const { @@ -106,18 +106,18 @@ class ThreadPool { // Provides a way to bound the maximum number of worker threads, threads must be less the the // thread count of the thread pool. - void SetMaxActiveWorkers(size_t threads); + void SetMaxActiveWorkers(size_t threads) REQUIRES(!task_queue_lock_); protected: // get a task to run, blocks if there are no tasks left - virtual Task* GetTask(Thread* self); + virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_); // Try to get a task, returning null if there is none available. - Task* TryGetTask(Thread* self); - Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_); + Task* TryGetTask(Thread* self) REQUIRES(!task_queue_lock_); + Task* TryGetTaskLocked() REQUIRES(task_queue_lock_); // Are we shutting down? - bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) { + bool IsShuttingDown() const REQUIRES(task_queue_lock_) { return shutting_down_; } diff --git a/runtime/trace.cc b/runtime/trace.cc index 487baedba4..439343068c 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -57,7 +57,7 @@ class BuildStackTraceVisitor : public StackVisitor { : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), method_trace_(Trace::AllocStackTrace()) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); // Ignore runtime frames (in particular callee save). if (!m->IsRuntimeMethod()) { @@ -218,7 +218,7 @@ static void Append8LE(uint8_t* buf, uint64_t val) { *buf++ = static_cast<uint8_t>(val >> 56); } -static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) { BuildStackTraceVisitor build_trace_visitor(thread); build_trace_visitor.WalkStack(); std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); @@ -636,7 +636,7 @@ void Trace::DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source static void GetVisitedMethodsFromBitSets( const std::map<const DexFile*, DexIndexBitSet*>& seen_methods, - std::set<ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + std::set<ArtMethod*>* visited_methods) SHARED_REQUIRES(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (auto& e : seen_methods) { DexIndexBitSet* bit_set = e.second; @@ -749,7 +749,7 @@ void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, void Trace::FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(thread, this_object, method, dex_pc, field); // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc; @@ -758,7 +758,7 @@ void Trace::FieldRead(Thread* thread, mirror::Object* this_object, void Trace::FieldWritten(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(thread, this_object, method, dex_pc, field, field_value); // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc; @@ -793,14 +793,14 @@ void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_U } void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(thread, exception_object); LOG(ERROR) << "Unexpected exception caught event in tracing"; } void Trace::BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t /*dex_pc_offset*/) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method); } diff --git a/runtime/trace.h b/runtime/trace.h index 69e6acc899..04be3ddeab 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -114,28 +114,20 @@ class Trace FINAL : public instrumentation::InstrumentationListener { static void Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags, TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_, - Locks::trace_lock_); - static void Pause() LOCKS_EXCLUDED(Locks::trace_lock_, Locks::thread_list_lock_); - static void Resume() LOCKS_EXCLUDED(Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, + !Locks::trace_lock_); + static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_); + static void Resume() REQUIRES(!Locks::trace_lock_); // Stop tracing. This will finish the trace and write it to file/send it via DDMS. static void Stop() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); // Abort tracing. This will just stop tracing and *not* write/send the collected data. static void Abort() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); static void Shutdown() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); - static TracingMode GetMethodTracingMode() LOCKS_EXCLUDED(Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); + static TracingMode GetMethodTracingMode() REQUIRES(!Locks::trace_lock_); bool UseWallClock(); bool UseThreadCpuClock(); @@ -143,33 +135,37 @@ class Trace FINAL : public instrumentation::InstrumentationListener { uint32_t GetClockOverheadNanoSeconds(); void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_); // InstrumentationListener implementation. void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void MethodUnwind(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; void FieldWritten(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; // Reuse an old stack trace if it exists, otherwise allocate a new one. static std::vector<ArtMethod*>* AllocStackTrace(); // Clear and store an old stack trace for later use. @@ -177,57 +173,61 @@ class Trace FINAL : public instrumentation::InstrumentationListener { // Save id and name of a thread before it exits. static void StoreExitingThreadInfo(Thread* thread); - static TraceOutputMode GetOutputMode() LOCKS_EXCLUDED(Locks::trace_lock_); - static TraceMode GetMode() LOCKS_EXCLUDED(Locks::trace_lock_); - static size_t GetBufferSize() LOCKS_EXCLUDED(Locks::trace_lock_); + static TraceOutputMode GetOutputMode() REQUIRES(!Locks::trace_lock_); + static TraceMode GetMode() REQUIRES(!Locks::trace_lock_); + static size_t GetBufferSize() REQUIRES(!Locks::trace_lock_); private: Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags, TraceOutputMode output_mode, TraceMode trace_mode); // The sampling interval in microseconds is passed as an argument. - static void* RunSamplingThread(void* arg) LOCKS_EXCLUDED(Locks::trace_lock_); + static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_); static void StopTracing(bool finish_tracing, bool flush_file) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); - void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_) + // There is an annoying issue with static functions that create a new object and call into + // that object that causes them to not be able to tell that we don't currently hold the lock. + // This causes the negative annotations to incorrectly have a false positive. TODO: Figure out + // how to annotate this. + NO_THREAD_SAFETY_ANALYSIS; + void FinishTracing() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff); void LogMethodTraceEvent(Thread* thread, ArtMethod* method, instrumentation::Instrumentation::InstrumentationEvent event, uint32_t thread_clock_diff, uint32_t wall_clock_diff) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_); // Methods to output traced methods and threads. - void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods); + void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods) + REQUIRES(!*unique_methods_lock_); void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); + void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_); // Methods to register seen entitites in streaming mode. The methods return true if the entity // is newly discovered. bool RegisterMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(streaming_lock_); bool RegisterThread(Thread* thread) - EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); + REQUIRES(streaming_lock_); // Copy a temporary buffer to the main buffer. Used for streaming. Exposed here for lock // annotation. void WriteToBuf(const uint8_t* src, size_t src_size) - EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); + REQUIRES(streaming_lock_); - uint32_t EncodeTraceMethod(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_); + uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_); uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) - LOCKS_EXCLUDED(unique_methods_lock_); - ArtMethod* DecodeTraceMethod(uint32_t tmid) LOCKS_EXCLUDED(unique_methods_lock_); - std::string GetMethodLine(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!*unique_methods_lock_); + ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_); + std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); // Singleton instance of the Trace or null when no method tracing is active. static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_); diff --git a/runtime/transaction.h b/runtime/transaction.h index 030478c7ad..8ff0614574 100644 --- a/runtime/transaction.h +++ b/runtime/transaction.h @@ -46,63 +46,63 @@ class Transaction FINAL { ~Transaction(); void Abort(const std::string& abort_message) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowAbortError(Thread* self, const std::string* abort_message) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsAborted() LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsAborted() REQUIRES(!log_lock_); // Record object field changes. void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, mirror::Object* value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); // Record array change. void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Record intern string table changes. void RecordStrongStringInsertion(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void RecordWeakStringInsertion(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void RecordStrongStringRemoval(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void RecordWeakStringRemoval(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); // Abort transaction by undoing all recorded changes. void Rollback() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(log_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!log_lock_); void VisitRoots(RootVisitor* visitor) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); private: class ObjectLog : public ValueObject { @@ -115,8 +115,8 @@ class Transaction FINAL { void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile); void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile); - void Undo(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Undo(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); size_t Size() const { return field_values_.size(); @@ -141,7 +141,7 @@ class Transaction FINAL { void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile); void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset, - const FieldValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const FieldValue& field_value) SHARED_REQUIRES(Locks::mutator_lock_); // Maps field's offset to its value. std::map<uint32_t, FieldValue> field_values_; @@ -151,7 +151,7 @@ class Transaction FINAL { public: void LogValue(size_t index, uint64_t value); - void Undo(mirror::Array* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Undo(mirror::Array* obj) SHARED_REQUIRES(Locks::mutator_lock_); size_t Size() const { return array_values_.size(); @@ -159,7 +159,7 @@ class Transaction FINAL { private: void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index, - uint64_t value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint64_t value) SHARED_REQUIRES(Locks::mutator_lock_); // Maps index to value. // TODO use JValue instead ? @@ -182,9 +182,9 @@ class Transaction FINAL { } void Undo(InternTable* intern_table) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::intern_table_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: mirror::String* str_; @@ -193,31 +193,31 @@ class Transaction FINAL { }; void LogInternedString(const InternStringLog& log) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void UndoObjectModifications() - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void UndoArrayModifications() - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void UndoInternStringTableModifications() - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void VisitObjectLogs(RootVisitor* visitor) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void VisitArrayLogs(RootVisitor* visitor) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void VisitStringLogs(RootVisitor* visitor) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - const std::string& GetAbortMessage() LOCKS_EXCLUDED(log_lock_); + const std::string& GetAbortMessage() REQUIRES(!log_lock_); Mutex log_lock_ ACQUIRED_AFTER(Locks::intern_table_lock_); std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_); diff --git a/runtime/utf.h b/runtime/utf.h index 7f05248c29..1193d29c7d 100644 --- a/runtime/utf.h +++ b/runtime/utf.h @@ -77,7 +77,7 @@ void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t * The java.lang.String hashCode() algorithm. */ int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset, size_t char_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count); // Compute a hash code of a modified UTF-8 string. Not the standard java hash since it returns a diff --git a/runtime/utils.cc b/runtime/utils.cc index 194d9fe853..20512f9765 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -1130,9 +1130,13 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, os << prefix << StringPrintf("#%02zu pc ", it->num); bool try_addr2line = false; if (!BacktraceMap::IsValid(it->map)) { - os << StringPrintf("%08" PRIxPTR " ???", it->pc); + os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " ???" + : "%08" PRIxPTR " ???", + it->pc); } else { - os << StringPrintf("%08" PRIxPTR " ", BacktraceMap::GetRelativePc(it->map, it->pc)); + os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " " + : "%08" PRIxPTR " ", + BacktraceMap::GetRelativePc(it->map, it->pc)); os << it->map.name; os << " ("; if (!it->func_name.empty()) { diff --git a/runtime/utils.h b/runtime/utils.h index 1ef98e70d5..4fa5f5a539 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -111,22 +111,22 @@ bool EndsWith(const std::string& s, const char* suffix); // "[[I" would be "int[][]", "[Ljava/lang/String;" would be // "java.lang.String[]", and so forth. std::string PrettyDescriptor(mirror::String* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyDescriptor(const char* descriptor); std::string PrettyDescriptor(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyDescriptor(Primitive::Type type); // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or // "int a.b.C.f" (depending on the value of 'with_type'). std::string PrettyField(ArtField* f, bool with_type = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true); // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). std::string PrettyMethod(ArtMethod* m, bool with_signature = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true); // Returns a human-readable form of the name of the *class* of the given object. @@ -134,7 +134,7 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with // be "java.lang.String". Given an array of int, the output would be "int[]". // Given String.class, the output would be "java.lang.Class<java.lang.String>". std::string PrettyTypeOf(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a human-readable form of the type at an index in the specified dex file. // Example outputs: char[], java.lang.String. @@ -143,11 +143,11 @@ std::string PrettyType(uint32_t type_idx, const DexFile& dex_file); // Returns a human-readable form of the name of the given class. // Given String.class, the output would be "java.lang.Class<java.lang.String>". std::string PrettyClass(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a human-readable form of the name of the given class with its class loader. std::string PrettyClassAndClassLoader(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a human-readable version of the Java part of the access flags, e.g., "private static " // (note the trailing whitespace). @@ -182,10 +182,10 @@ bool IsValidMemberName(const char* s); // Returns the JNI native function name for the non-overloaded method 'm'. std::string JniShortName(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. std::string JniLongName(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool ReadFileToString(const std::string& file_name, std::string* result); bool PrintFileToLog(const std::string& file_name, LogSeverity level); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 8c950a0610..0181e5b7cc 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -2874,6 +2874,13 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } } } + // Handle this like a RETURN_VOID now. Code is duplicated to separate standard from + // quickened opcodes (otherwise this could be a fall-through). + if (!IsConstructor()) { + if (!GetMethodReturnType().IsConflict()) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected"; + } + } break; // Note: the following instructions encode offsets derived from class linking. // As such they use Class*/Field*/AbstractMethod* as these offsets only have @@ -3590,7 +3597,7 @@ class MethodParamListDescriptorIterator { ++pos_; } - const char* GetDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const char* GetDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) { return res_method_->GetTypeDescriptorFromTypeIdx(params_->GetTypeItem(pos_).type_idx_); } diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index a2835f56b9..3b59bba506 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -150,13 +150,13 @@ class MethodVerifier { /* Verify a class. Returns "kNoFailure" on success. */ static FailureKind VerifyClass(Thread* self, mirror::Class* klass, bool allow_soft_failures, std::string* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static FailureKind VerifyClass(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, bool allow_soft_failures, std::string* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static MethodVerifier* VerifyMethodAndDump(Thread* self, VariableIndentationOutputStream* vios, @@ -167,10 +167,10 @@ class MethodVerifier { const DexFile::ClassDef* class_def, const DexFile::CodeItem* code_item, ArtMethod* method, uint32_t method_access_flags) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static FailureKind VerifyMethod(ArtMethod* method, bool allow_soft_failures, - std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string* error) SHARED_REQUIRES(Locks::mutator_lock_); uint8_t EncodePcToReferenceMapData() const; @@ -193,29 +193,29 @@ class MethodVerifier { // Dump the state of the verifier, namely each instruction, what flags are set on it, register // information - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Dump(VariableIndentationOutputStream* vios) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); + void Dump(VariableIndentationOutputStream* vios) SHARED_REQUIRES(Locks::mutator_lock_); // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding // to the locks held at 'dex_pc' in method 'm'. static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc, std::vector<uint32_t>* monitor_enter_dex_pcs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the accessed field corresponding to the quick instruction's field // offset at 'dex_pc' in method 'm'. static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the invoked method corresponding to the quick instruction's vtable // index at 'dex_pc' in method 'm'. static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void Init() SHARED_REQUIRES(Locks::mutator_lock_); static void Shutdown(); bool CanLoadClasses() const { @@ -228,7 +228,7 @@ class MethodVerifier { ArtMethod* method, uint32_t access_flags, bool can_load_classes, bool allow_soft_failures, bool need_precise_constants, bool allow_thread_suspension) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : MethodVerifier(self, dex_file, dex_cache, class_loader, class_def, code_item, method_idx, method, access_flags, can_load_classes, allow_soft_failures, need_precise_constants, false, allow_thread_suspension) {} @@ -237,22 +237,22 @@ class MethodVerifier { // Run verification on the method. Returns true if verification completes and false if the input // has an irrecoverable corruption. - bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Verify() SHARED_REQUIRES(Locks::mutator_lock_); // Describe VRegs at the given dex pc. std::vector<int32_t> DescribeVRegs(uint32_t dex_pc); static void VisitStaticRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoots(RootVisitor* visitor, const RootInfo& roots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Accessors used by the compiler via CompilerCallback const DexFile::CodeItem* CodeItem() const; RegisterLine* GetRegLine(uint32_t dex_pc); const InstructionFlags& GetInstructionFlags(size_t index) const; - mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_); + mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); MethodReference GetMethodReference() const; uint32_t GetAccessFlags() const; bool HasCheckCasts() const; @@ -263,15 +263,15 @@ class MethodVerifier { } const RegType& ResolveCheckedClass(uint32_t class_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the method of a quick invoke or null if it cannot be found. ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line, bool is_range, bool allow_failure) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the access field of a quick field access (iget/iput-quick) or null // if it cannot be found. ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is the method being verified a constructor? bool IsConstructor() const { @@ -295,7 +295,7 @@ class MethodVerifier { ArtMethod* method, uint32_t access_flags, bool can_load_classes, bool allow_soft_failures, bool need_precise_constants, bool verify_to_dump, bool allow_thread_suspension) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Adds the given string to the beginning of the last failure message. void PrependToLastFailMessage(std::string); @@ -321,18 +321,18 @@ class MethodVerifier { const DexFile::CodeItem* code_item, ArtMethod* method, uint32_t method_access_flags, bool allow_soft_failures, bool need_precise_constants) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FindLocksAtDexPc() SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Compute the width of the instruction at each address in the instruction stream, and store it in @@ -360,7 +360,7 @@ class MethodVerifier { * Returns "false" if something in the exception table looks fishy, but we're expecting the * exception table to be somewhat sane. */ - bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ScanTryCatchBlocks() SHARED_REQUIRES(Locks::mutator_lock_); /* * Perform static verification on all instructions in a method. @@ -466,11 +466,11 @@ class MethodVerifier { bool* selfOkay); /* Perform detailed code-flow analysis on a single method. */ - bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool VerifyCodeFlow() SHARED_REQUIRES(Locks::mutator_lock_); // Set the register types for the first instruction in the method based on the method signature. // This has the side-effect of validating the signature. - bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool SetTypesFromSignature() SHARED_REQUIRES(Locks::mutator_lock_); /* * Perform code flow on a method. @@ -518,7 +518,7 @@ class MethodVerifier { * reordering by specifying that you can't execute the new-instance instruction if a register * contains an uninitialized instance created by that same instruction. */ - bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CodeFlowVerifyMethod() SHARED_REQUIRES(Locks::mutator_lock_); /* * Perform verification for a single instruction. @@ -530,33 +530,33 @@ class MethodVerifier { * addresses. Does not set or clear any other flags in "insn_flags_". */ bool CodeFlowVerifyInstruction(uint32_t* start_guess) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of a new array instruction void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Helper to perform verification on puts of primitive type. void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type, - const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const uint32_t vregA) SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of an aget instruction. The destination register's type will be set to // be that of component type of the array unless the array type is unknown, in which case a // bottom type inferred from the type of instruction is used. is_primitive is false for an // aget-object. void VerifyAGet(const Instruction* inst, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of an aput instruction. void VerifyAPut(const Instruction* inst, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_); // Lookup instance field and fail for resolution violations ArtField* GetInstanceField(const RegType& obj_type, int field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Lookup static field and fail for resolution violations - ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetStaticField(int field_idx) SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of an iget/sget/iput/sput instruction. enum class FieldAccessType { // private @@ -566,16 +566,16 @@ class MethodVerifier { template <FieldAccessType kAccType> void VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <FieldAccessType kAccType> void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolves a class based on an index and performs access checks to ensure the referrer can // access the resolved class. const RegType& ResolveClassAndCheckAccess(uint32_t class_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler @@ -583,7 +583,7 @@ class MethodVerifier { * exception handler can be found or if the Join of exception types fails. */ const RegType& GetCaughtExceptionType() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Resolves a method based on an index and performs access checks to ensure @@ -591,7 +591,7 @@ class MethodVerifier { * Does not throw exceptions. */ ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify the arguments to a method. We're executing in "method", making @@ -618,22 +618,22 @@ class MethodVerifier { ArtMethod* VerifyInvocationArgs(const Instruction* inst, MethodType method_type, bool is_range, bool is_super) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Similar checks to the above, but on the proto. Will be used when the method cannot be // resolved. void VerifyInvocationArgsUnresolvedMethod(const Instruction* inst, MethodType method_type, bool is_range) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <class T> ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst, MethodType method_type, bool is_range, ArtMethod* res_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify that the target instruction is not "move-exception". It's important that the only way @@ -665,18 +665,18 @@ class MethodVerifier { * Returns "false" if an error is encountered. */ bool UpdateRegisters(uint32_t next_insn, RegisterLine* merge_line, bool update_merge_line) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return the register type for the method. - const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& GetMethodReturnType() SHARED_REQUIRES(Locks::mutator_lock_); // Get a type representing the declaring class of the method. - const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); InstructionFlags* CurrentInsnFlags(); const RegType& DetermineCat1Constant(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Try to create a register type from the given class. In case a precise type is requested, but // the class is not instantiable, a soft error (of type NO_CLASS) will be enqueued and a @@ -684,7 +684,7 @@ class MethodVerifier { // Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is // actually touched. const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // The thread we're verifying on. Thread* const self_; diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc index 3994536cca..2ab6b4aaab 100644 --- a/runtime/verifier/method_verifier_test.cc +++ b/runtime/verifier/method_verifier_test.cc @@ -30,7 +30,7 @@ namespace verifier { class MethodVerifierTest : public CommonRuntimeTest { protected: void VerifyClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != nullptr); Thread* self = Thread::Current(); mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str()); @@ -42,7 +42,7 @@ class MethodVerifierTest : public CommonRuntimeTest { } void VerifyDexFile(const DexFile& dex) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Verify all the classes defined in this file for (size_t i = 0; i < dex.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex.GetClassDef(i); diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index 6e23234182..7fe8bb93ff 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -46,19 +46,19 @@ const DoubleHiType* DoubleHiType::instance_ = nullptr; const IntegerType* IntegerType::instance_ = nullptr; PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) { CHECK(klass != nullptr); CHECK(!descriptor.empty()); } Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : PrimitiveType(klass, descriptor, cache_id) { } Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : PrimitiveType(klass, descriptor, cache_id) { } @@ -280,7 +280,7 @@ void BooleanType::Destroy() { } } -std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +std::string UndefinedType::Dump() const SHARED_REQUIRES(Locks::mutator_lock_) { return "Undefined"; } @@ -302,7 +302,9 @@ void UndefinedType::Destroy() { PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) : RegType(klass, descriptor, cache_id) { - DCHECK(klass->IsInstantiable()); + // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError + // would be thrown at runtime, but we need to continue verification and *not* create a + // hard failure or abort. } std::string UnresolvedMergedType::Dump() const { @@ -538,7 +540,7 @@ const RegType& RegType::GetSuperClass(RegTypeCache* cache) const { } } -bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool RegType::IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { // Primitive arrays will always resolve DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '['); @@ -551,11 +553,11 @@ bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lo } } -bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool RegType::IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_) { return IsReference() && GetClass()->IsObjectClass(); } -bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool RegType::IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { return descriptor_[0] == '['; } else if (HasClass()) { diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index d08c937a64..4893088832 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -112,7 +112,7 @@ class RegType { } // The high half that corresponds to this low half const RegType& HighHalf(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsConstantBoolean() const; virtual bool IsConstantChar() const { return false; } @@ -165,20 +165,20 @@ class RegType { return result; } virtual bool HasClassVirtual() const { return false; } - bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_); + bool IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_); + bool IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_); Primitive::Type GetPrimitiveType() const; bool IsJavaLangObjectArray() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_); const std::string& GetDescriptor() const { DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass())); return descriptor_; } - mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsUnresolvedReference()); DCHECK(!klass_.IsNull()) << Dump(); DCHECK(HasClass()); @@ -186,25 +186,25 @@ class RegType { } uint16_t GetId() const { return cache_id_; } const RegType& GetSuperClass(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); virtual std::string Dump() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Can this type access other? bool CanAccess(const RegType& other) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this type access a member with the given properties? bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this type be assigned by src? // Note: Object and interface types may always be assigned to one another, see // comment on // ClassJoin. bool IsAssignableFrom(const RegType& src) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this array type potentially be assigned by src. // This function is necessary as array types are valid even if their components types are not, @@ -215,13 +215,13 @@ class RegType { // (both are reference types). bool CanAssignArray(const RegType& src, RegTypeCache& reg_types, Handle<mirror::ClassLoader> class_loader, bool* soft_error) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't // allow assignment to // an interface from an Object. bool IsStrictlyAssignableFrom(const RegType& src) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Are these RegTypes the same? bool Equals(const RegType& other) const { return GetId() == other.GetId(); } @@ -229,7 +229,7 @@ class RegType { // Compute the merge of this register from one edge (path) with incoming_type // from another. const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * A basic Join operation on classes. For a pair of types S and T the Join, @@ -258,23 +258,23 @@ class RegType { * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy */ static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); virtual ~RegType() {} void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: RegType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) { if (kIsDebugBuild) { CheckInvariants(); } } - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); const std::string descriptor_; mutable GcRoot<mirror::Class> @@ -285,7 +285,7 @@ class RegType { private: static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(RegType); }; @@ -295,7 +295,7 @@ class ConflictType FINAL : public RegType { public: bool IsConflict() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); // Get the singleton Conflict instance. static const ConflictType* GetInstance() PURE; @@ -304,14 +304,14 @@ class ConflictType FINAL : public RegType { static const ConflictType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Destroy the singleton instance. static void Destroy(); private: ConflictType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) {} static const ConflictType* instance_; @@ -324,7 +324,7 @@ class UndefinedType FINAL : public RegType { public: bool IsUndefined() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); // Get the singleton Undefined instance. static const UndefinedType* GetInstance() PURE; @@ -333,14 +333,14 @@ class UndefinedType FINAL : public RegType { static const UndefinedType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Destroy the singleton instance. static void Destroy(); private: UndefinedType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) {} static const UndefinedType* instance_; @@ -349,7 +349,7 @@ class UndefinedType FINAL : public RegType { class PrimitiveType : public RegType { public: PrimitiveType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_); bool HasClassVirtual() const OVERRIDE { return true; } }; @@ -357,23 +357,23 @@ class PrimitiveType : public RegType { class Cat1Type : public PrimitiveType { public: Cat1Type(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_); }; class IntegerType : public Cat1Type { public: bool IsInteger() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const IntegerType* GetInstance() PURE; static void Destroy(); private: IntegerType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const IntegerType* instance_; }; @@ -381,17 +381,17 @@ class IntegerType : public Cat1Type { class BooleanType FINAL : public Cat1Type { public: bool IsBoolean() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const BooleanType* GetInstance() PURE; static void Destroy(); private: BooleanType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const BooleanType* instance_; @@ -400,17 +400,17 @@ class BooleanType FINAL : public Cat1Type { class ByteType FINAL : public Cat1Type { public: bool IsByte() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const ByteType* GetInstance() PURE; static void Destroy(); private: ByteType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const ByteType* instance_; }; @@ -418,17 +418,17 @@ class ByteType FINAL : public Cat1Type { class ShortType FINAL : public Cat1Type { public: bool IsShort() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const ShortType* GetInstance() PURE; static void Destroy(); private: ShortType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const ShortType* instance_; }; @@ -436,17 +436,17 @@ class ShortType FINAL : public Cat1Type { class CharType FINAL : public Cat1Type { public: bool IsChar() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const CharType* GetInstance() PURE; static void Destroy(); private: CharType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const CharType* instance_; }; @@ -454,17 +454,17 @@ class CharType FINAL : public Cat1Type { class FloatType FINAL : public Cat1Type { public: bool IsFloat() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const FloatType* GetInstance() PURE; static void Destroy(); private: FloatType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const FloatType* instance_; }; @@ -472,86 +472,86 @@ class FloatType FINAL : public Cat1Type { class Cat2Type : public PrimitiveType { public: Cat2Type(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_); }; class LongLoType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); bool IsLongLo() const OVERRIDE { return true; } bool IsLong() const OVERRIDE { return true; } static const LongLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const LongLoType* GetInstance() PURE; static void Destroy(); private: LongLoType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const LongLoType* instance_; }; class LongHiType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); bool IsLongHi() const OVERRIDE { return true; } static const LongHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const LongHiType* GetInstance() PURE; static void Destroy(); private: LongHiType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const LongHiType* instance_; }; class DoubleLoType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); bool IsDoubleLo() const OVERRIDE { return true; } bool IsDouble() const OVERRIDE { return true; } static const DoubleLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const DoubleLoType* GetInstance() PURE; static void Destroy(); private: DoubleLoType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const DoubleLoType* instance_; }; class DoubleHiType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); virtual bool IsDoubleHi() const OVERRIDE { return true; } static const DoubleHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const DoubleHiType* GetInstance() PURE; static void Destroy(); private: DoubleHiType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const DoubleHiType* instance_; }; class ConstantType : public RegType { public: - ConstantType(uint32_t constant, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + ConstantType(uint32_t constant, uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(nullptr, "", cache_id), constant_(constant) { } @@ -609,58 +609,58 @@ class ConstantType : public RegType { class PreciseConstType FINAL : public ConstantType { public: PreciseConstType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsPreciseConstant() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class PreciseConstLoType FINAL : public ConstantType { public: PreciseConstLoType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsPreciseConstantLo() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class PreciseConstHiType FINAL : public ConstantType { public: PreciseConstHiType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsPreciseConstantHi() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class ImpreciseConstType FINAL : public ConstantType { public: ImpreciseConstType(uint32_t constat, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constat, cache_id) { } bool IsImpreciseConstant() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class ImpreciseConstLoType FINAL : public ConstantType { public: ImpreciseConstLoType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsImpreciseConstantLo() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class ImpreciseConstHiType FINAL : public ConstantType { public: ImpreciseConstHiType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsImpreciseConstantHi() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // Common parent of all uninitialized types. Uninitialized types are created by @@ -690,14 +690,14 @@ class UninitializedReferenceType FINAL : public UninitializedType { UninitializedReferenceType(mirror::Class* klass, const std::string& descriptor, uint32_t allocation_pc, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(klass, descriptor, allocation_pc, cache_id) {} bool IsUninitializedReference() const OVERRIDE { return true; } bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // Similar to UnresolvedReferenceType but not yet having been passed to a @@ -706,7 +706,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType { public: UnresolvedUninitializedRefType(const std::string& descriptor, uint32_t allocation_pc, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -717,10 +717,10 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType { bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; // Similar to UninitializedReferenceType but special case for the this argument @@ -730,7 +730,7 @@ class UninitializedThisReferenceType FINAL : public UninitializedType { UninitializedThisReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(klass, descriptor, 0, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -741,17 +741,17 @@ class UninitializedThisReferenceType FINAL : public UninitializedType { bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { public: UnresolvedUninitializedThisRefType(const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(nullptr, descriptor, 0, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -762,10 +762,10 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; // A type of register holding a reference to an Object of type GetClass or a @@ -773,7 +773,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { class ReferenceType FINAL : public RegType { public: ReferenceType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) {} bool IsReference() const OVERRIDE { return true; } @@ -782,7 +782,7 @@ class ReferenceType FINAL : public RegType { bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // A type of register holding a reference to an Object of type GetClass and only @@ -792,7 +792,7 @@ class PreciseReferenceType FINAL : public RegType { public: PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsPreciseReference() const OVERRIDE { return true; } @@ -800,14 +800,14 @@ class PreciseReferenceType FINAL : public RegType { bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // Common parent of unresolved types. class UnresolvedType : public RegType { public: UnresolvedType(const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : RegType(nullptr, descriptor, cache_id) {} bool IsNonZeroReferenceTypes() const OVERRIDE; @@ -819,7 +819,7 @@ class UnresolvedType : public RegType { class UnresolvedReferenceType FINAL : public UnresolvedType { public: UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UnresolvedType(descriptor, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -830,10 +830,10 @@ class UnresolvedReferenceType FINAL : public UnresolvedType { bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; // Type representing the super-class of an unresolved type. @@ -841,7 +841,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType { public: UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UnresolvedType("", cache_id), unresolved_child_id_(child_id), reg_type_cache_(reg_type_cache) { @@ -859,10 +859,10 @@ class UnresolvedSuperClass FINAL : public UnresolvedType { return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF); } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); const uint16_t unresolved_child_id_; const RegTypeCache* const reg_type_cache_; @@ -875,7 +875,7 @@ class UnresolvedMergedType FINAL : public UnresolvedType { public: UnresolvedMergedType(uint16_t left_id, uint16_t right_id, const RegTypeCache* reg_type_cache, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UnresolvedType("", cache_id), reg_type_cache_(reg_type_cache), merged_types_(left_id, right_id) { @@ -897,17 +897,17 @@ class UnresolvedMergedType FINAL : public UnresolvedType { bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); const RegTypeCache* const reg_type_cache_; const std::pair<uint16_t, uint16_t> merged_types_; }; std::ostream& operator<<(std::ostream& os, const RegType& rhs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace verifier } // namespace art diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index b371d7e391..4469e64130 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -31,7 +31,7 @@ uint16_t RegTypeCache::primitive_count_ = 0; const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1]; static bool MatchingPrecisionForClass(const RegType* entry, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (entry->IsPreciseReference() == precise) { // We were or weren't looking for a precise reference and we found what we need. return true; @@ -427,9 +427,18 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) { } } entry = new ReferenceType(klass, "", entries_.size()); - } else if (klass->IsInstantiable()) { + } else if (!klass->IsPrimitive()) { // We're uninitialized because of allocation, look or create a precise type as allocations // may only create objects of that type. + // Note: we do not check whether the given klass is actually instantiable (besides being + // primitive), that is, we allow interfaces and abstract classes here. The reasoning is + // twofold: + // 1) The "new-instance" instruction to generate the uninitialized type will already + // queue an instantiation error. This is a soft error that must be thrown at runtime, + // and could potentially change if the class is resolved differently at runtime. + // 2) Checking whether the klass is instantiable and using conflict may produce a hard + // error when the value is used, which leads to a VerifyError, which is not the + // correct semantics. for (size_t i = primitive_count_; i < entries_.size(); i++) { const RegType* cur_entry = entries_[i]; if (cur_entry->IsPreciseReference() && cur_entry->GetClass() == klass) { diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h index 4b3105c3da..8319de6b28 100644 --- a/runtime/verifier/reg_type_cache.h +++ b/runtime/verifier/reg_type_cache.h @@ -42,7 +42,7 @@ class RegTypeCache { public: explicit RegTypeCache(bool can_load_classes); ~RegTypeCache(); - static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void Init() SHARED_REQUIRES(Locks::mutator_lock_) { if (!RegTypeCache::primitive_initialized_) { CHECK_EQ(RegTypeCache::primitive_count_, 0); CreatePrimitiveAndSmallConstantTypes(); @@ -53,110 +53,110 @@ class RegTypeCache { static void ShutDown(); const art::verifier::RegType& GetFromId(uint16_t id) const; const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat1Const(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat2ConstLo(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat2ConstHi(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromUnresolvedSuperClass(const RegType& child) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_); + const ConstantType& Zero() SHARED_REQUIRES(Locks::mutator_lock_) { return FromCat1Const(0, true); } - const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const ConstantType& One() SHARED_REQUIRES(Locks::mutator_lock_) { return FromCat1Const(1, true); } size_t GetCacheSize() { return entries_.size(); } - const BooleanType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const BooleanType& Boolean() SHARED_REQUIRES(Locks::mutator_lock_) { return *BooleanType::GetInstance(); } - const ByteType& Byte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const ByteType& Byte() SHARED_REQUIRES(Locks::mutator_lock_) { return *ByteType::GetInstance(); } - const CharType& Char() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const CharType& Char() SHARED_REQUIRES(Locks::mutator_lock_) { return *CharType::GetInstance(); } - const ShortType& Short() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const ShortType& Short() SHARED_REQUIRES(Locks::mutator_lock_) { return *ShortType::GetInstance(); } - const IntegerType& Integer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const IntegerType& Integer() SHARED_REQUIRES(Locks::mutator_lock_) { return *IntegerType::GetInstance(); } - const FloatType& Float() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const FloatType& Float() SHARED_REQUIRES(Locks::mutator_lock_) { return *FloatType::GetInstance(); } - const LongLoType& LongLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const LongLoType& LongLo() SHARED_REQUIRES(Locks::mutator_lock_) { return *LongLoType::GetInstance(); } - const LongHiType& LongHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const LongHiType& LongHi() SHARED_REQUIRES(Locks::mutator_lock_) { return *LongHiType::GetInstance(); } - const DoubleLoType& DoubleLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DoubleLoType& DoubleLo() SHARED_REQUIRES(Locks::mutator_lock_) { return *DoubleLoType::GetInstance(); } - const DoubleHiType& DoubleHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DoubleHiType& DoubleHi() SHARED_REQUIRES(Locks::mutator_lock_) { return *DoubleHiType::GetInstance(); } - const UndefinedType& Undefined() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const UndefinedType& Undefined() SHARED_REQUIRES(Locks::mutator_lock_) { return *UndefinedType::GetInstance(); } const ConflictType& Conflict() { return *ConflictType::GetInstance(); } - const PreciseReferenceType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const PreciseReferenceType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& JavaLangThrowable(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const PreciseReferenceType& JavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_); + const PreciseReferenceType& JavaLangString() SHARED_REQUIRES(Locks::mutator_lock_); + const RegType& JavaLangThrowable(bool precise) SHARED_REQUIRES(Locks::mutator_lock_); + const RegType& JavaLangObject(bool precise) SHARED_REQUIRES(Locks::mutator_lock_); const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Create an uninitialized 'this' argument for the given type. const UninitializedType& UninitializedThisArgument(const RegType& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromUninitialized(const RegType& uninit_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& ByteConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& CharConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& ShortConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& IntConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& PosByteConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& PosShortConstant() SHARED_REQUIRES(Locks::mutator_lock_); const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); const RegType& RegTypeFromPrimitiveType(Primitive::Type) const; void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void VisitStaticRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillPrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AddEntry(RegType* new_entry); template <class Type> static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + static void CreatePrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_); // A quick look up for popular small constants. static constexpr int32_t kMinSmallConstant = -1; diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h index 0de0d9ce0f..4fb3a2c99a 100644 --- a/runtime/verifier/register_line.h +++ b/runtime/verifier/register_line.h @@ -60,54 +60,54 @@ class RegisterLine { // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst". void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This // copies both halves of the register. void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Implement "move-result". Copy the category-1 value from the result register to another // register, and reset the result register. void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Implement "move-result-wide". Copy the category-2 value from the result register to another // register, and reset the result register. void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Set the invisible result register to unknown - void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_REQUIRES(Locks::mutator_lock_); // Set the type of register N, verifying that the register is valid. If "newType" is the "Lo" // part of a 64-bit value, register N+1 will be set to "newType+1". // The register index was validated during the static pass, so we don't need to check it here. ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type1, const RegType& new_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* Set the type of the "result" register. */ void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the type of register vsrc. const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const; ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type1, const RegType& check_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CopyFromLine(const RegisterLine* src) { DCHECK_EQ(num_regs_, src->num_regs_); @@ -116,7 +116,7 @@ class RegisterLine { reg_to_lock_depths_ = src->reg_to_lock_depths_; } - std::string Dump(MethodVerifier* verifier) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump(MethodVerifier* verifier) const SHARED_REQUIRES(Locks::mutator_lock_); void FillWithGarbage() { memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t)); @@ -131,7 +131,7 @@ class RegisterLine { * the new ones at the same time). */ void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Update all registers holding "uninit_type" to instead hold the corresponding initialized @@ -140,7 +140,7 @@ class RegisterLine { */ void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type, uint32_t this_reg, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Update all registers to be Conflict except vsrc. @@ -194,7 +194,7 @@ class RegisterLine { */ const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst, bool is_range, bool allow_failure = false) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for a simple two-register instruction (e.g. "neg-int"). @@ -202,22 +202,22 @@ class RegisterLine { */ void CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type1, const RegType& src_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for a simple three-register instruction (e.g. "add-int"). @@ -227,18 +227,18 @@ class RegisterLine { void CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type1_1, const RegType& src_type1_2, const RegType& src_type2_1, const RegType& src_type2_2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst, const RegType& long_lo_type, const RegType& long_hi_type, const RegType& int_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for a binary "2addr" operation. "src_type1"/"src_type2" @@ -248,18 +248,18 @@ class RegisterLine { const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type1_1, const RegType& src_type1_2, const RegType& src_type2_1, const RegType& src_type2_2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst, const RegType& long_lo_type, const RegType& long_hi_type, const RegType& int_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8"). @@ -270,15 +270,15 @@ class RegisterLine { void CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type, bool check_boolean_op, bool is_lit16) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx. void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Stack of currently held monitors and where they were locked size_t MonitorStackDepth() const { @@ -290,7 +290,7 @@ class RegisterLine { bool VerifyMonitorStackEmpty(MethodVerifier* verifier) const; bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) const; diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h index 66b9abece7..6dd8168cbf 100644 --- a/runtime/well_known_classes.h +++ b/runtime/well_known_classes.h @@ -38,7 +38,7 @@ struct WellKnownClasses { static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init); static mirror::Class* ToClass(jclass global_jclass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static jclass com_android_dex_Dex; static jclass dalvik_system_DexFile; diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc index 1391d147a7..c984b17c2c 100644 --- a/sigchainlib/sigchain.cc +++ b/sigchainlib/sigchain.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include <android/log.h> #else #include <stdarg.h> @@ -103,7 +103,7 @@ static void log(const char* format, ...) { va_list ap; va_start(ap, format); vsnprintf(buf, sizeof(buf), format, ap); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf); #else std::cout << buf << "\n"; @@ -337,14 +337,16 @@ extern "C" void SetSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn) // In case the chain isn't claimed, claim it for ourself so we can ensure the managed handler // goes first. if (!user_sigactions[signal].IsClaimed()) { - struct sigaction tmp; - tmp.sa_sigaction = sigchainlib_managed_handler_sigaction; - sigemptyset(&tmp.sa_mask); - tmp.sa_flags = SA_SIGINFO | SA_ONSTACK; + struct sigaction act, old_act; + act.sa_sigaction = sigchainlib_managed_handler_sigaction; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO | SA_ONSTACK; #if !defined(__APPLE__) && !defined(__mips__) - tmp.sa_restorer = nullptr; + act.sa_restorer = nullptr; #endif - user_sigactions[signal].Claim(tmp); + if (sigaction(signal, &act, &old_act) != -1) { + user_sigactions[signal].Claim(old_act); + } } } diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc index 8495a5417f..dfe0c6f981 100644 --- a/sigchainlib/sigchain_dummy.cc +++ b/sigchainlib/sigchain_dummy.cc @@ -17,7 +17,7 @@ #include <stdio.h> #include <stdlib.h> -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include <android/log.h> #else #include <stdarg.h> @@ -38,7 +38,7 @@ static void log(const char* format, ...) { va_list ap; va_start(ap, format); vsnprintf(buf, sizeof(buf), format, ap); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf); #else std::cout << buf << "\n"; diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc index e626e48be9..767e1de68f 100644 --- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc @@ -29,10 +29,10 @@ namespace art { } while (false); struct ReferenceMap2Visitor : public CheckReferenceMapVisitor { - explicit ReferenceMap2Visitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit ReferenceMap2Visitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : CheckReferenceMapVisitor(thread) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (CheckReferenceMapVisitor::VisitFrame()) { return true; } diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc index 6b155149a4..3a5854ba96 100644 --- a/test/004-StackWalk/stack_walk_jni.cc +++ b/test/004-StackWalk/stack_walk_jni.cc @@ -29,10 +29,10 @@ static int gJava_StackWalk_refmap_calls = 0; class TestReferenceMapVisitor : public CheckReferenceMapVisitor { public: - explicit TestReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit TestReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : CheckReferenceMapVisitor(thread) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (CheckReferenceMapVisitor::VisitFrame()) { return true; } diff --git a/test/046-reflect/expected.txt b/test/046-reflect/expected.txt index fa053fb92d..d657d44e61 100644 --- a/test/046-reflect/expected.txt +++ b/test/046-reflect/expected.txt @@ -24,7 +24,7 @@ Method name is myMethod SuperTarget constructor ()V Target constructor ()V Before, float is 3.1415925 -myMethod: hi there 3.1415925 Q ! +myMethod: hi there 3.1415925 ✔ ! Result of invoke: 7 Calling no-arg void-return method myNoargMethod ()V diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java index 0d8e576086..0c90109c69 100644 --- a/test/046-reflect/src/Main.java +++ b/test/046-reflect/src/Main.java @@ -147,7 +147,7 @@ public class Main { Object[] argList = new Object[] { new String[] { "hi there" }, new Float(3.1415926f), - new Character('Q') + new Character('\u2714') }; System.out.println("Before, float is " + ((Float)argList[1]).floatValue()); diff --git a/test/051-thread/thread_test.cc b/test/051-thread/thread_test.cc index 2b8e675cc6..4215207c97 100644 --- a/test/051-thread/thread_test.cc +++ b/test/051-thread/thread_test.cc @@ -28,7 +28,7 @@ extern "C" JNIEXPORT jint JNICALL Java_Main_getNativePriority(JNIEnv* env, extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportsThreadPriorities( JNIEnv* env ATTRIBUTE_UNUSED, jclass clazz ATTRIBUTE_UNUSED) { -#if defined(HAVE_ANDROID_OS) +#if defined(__ANDROID__) return JNI_TRUE; #else return JNI_FALSE; diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt index 7db61a1023..c932761c3b 100644 --- a/test/100-reflect2/expected.txt +++ b/test/100-reflect2/expected.txt @@ -1,6 +1,6 @@ true 8 -x +✔ 3.141592653589793 3.14 32 diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java index 72e14b15f3..bf3a574c99 100644 --- a/test/100-reflect2/src/Main.java +++ b/test/100-reflect2/src/Main.java @@ -20,7 +20,7 @@ import java.util.*; class Main { private static boolean z = true; private static byte b = 8; - private static char c = 'x'; + private static char c = '\u2714'; private static double d = Math.PI; private static float f = 3.14f; private static int i = 32; @@ -144,7 +144,7 @@ class Main { /* private static boolean z = true; private static byte b = 8; - private static char c = 'x'; + private static char c = '\u2714'; private static double d = Math.PI; private static float f = 3.14f; private static int i = 32; @@ -263,7 +263,7 @@ class Main { show(ctor.newInstance((Object[]) null)); ctor = String.class.getConstructor(char[].class, int.class, int.class); - show(ctor.newInstance(new char[] { 'x', 'y', 'z', '!' }, 1, 2)); + show(ctor.newInstance(new char[] { '\u2714', 'y', 'z', '!' }, 1, 2)); } private static void testPackagePrivateConstructor() { diff --git a/test/109-suspend-check/src/Main.java b/test/109-suspend-check/src/Main.java index 8046d751ed..3c3353b4db 100644 --- a/test/109-suspend-check/src/Main.java +++ b/test/109-suspend-check/src/Main.java @@ -32,6 +32,8 @@ public class Main { new InfiniteWhileLoopWithSpecialPutOrNop(new SpecialMethods2()), new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods1()), new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods2()), + new InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(new SpecialMethods1()), + new InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(new SpecialMethods2()), }; doWhileLoopWithLong.start(); for (SimpleLoopThread loop : simpleLoops) { @@ -135,6 +137,21 @@ class InfiniteWhileLoopWithSpecialConstOrIGet extends SimpleLoopThread { } } +class InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch extends SimpleLoopThread { + private SpecialMethodInterface smi; + public InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(SpecialMethodInterface smi) { + this.smi = smi; + } + public void run() { + try { + long i = 0L; + while (keepGoing) { + i += smi.ConstOrIGet(); + } + } catch (Throwable ignored) { } + } +} + class InfiniteWhileLoopWithIntrinsic extends SimpleLoopThread { private String[] strings = { "a", "b", "c", "d" }; private int sum = 0; diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt index 464d2c887e..372ecd0484 100644 --- a/test/115-native-bridge/expected.txt +++ b/test/115-native-bridge/expected.txt @@ -61,3 +61,4 @@ Getting trampoline for Java_Main_testNewStringObject with shorty V. trampoline_Java_Main_testNewStringObject called! Getting trampoline for Java_Main_testSignal with shorty I. NB signal handler with signal 11. +NB signal handler with signal 4. diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc index c8141a7fb8..a6a6e08e26 100644 --- a/test/115-native-bridge/nativebridge.cc +++ b/test/115-native-bridge/nativebridge.cc @@ -200,8 +200,9 @@ static jint trampoline_Java_Main_testSignal(JNIEnv*, jclass) { #if !defined(__APPLE__) && !defined(__mips__) tmp.sa_restorer = nullptr; #endif - sigaction(SIGSEGV, &tmp, nullptr); + // Test segv + sigaction(SIGSEGV, &tmp, nullptr); #if defined(__arm__) || defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) // On supported architectures we cause a real SEGV. *go_away_compiler = 'a'; @@ -209,6 +210,11 @@ static jint trampoline_Java_Main_testSignal(JNIEnv*, jclass) { // On other architectures we simulate SEGV. kill(getpid(), SIGSEGV); #endif + + // Test sigill + sigaction(SIGILL, &tmp, nullptr); + kill(getpid(), SIGILL); + return 1234; } @@ -385,27 +391,29 @@ extern "C" bool nb_is_compatible(uint32_t bridge_version ATTRIBUTE_UNUSED) { // 004-SignalTest. static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* context) { printf("NB signal handler with signal %d.\n", sig); + if (sig == SIGSEGV) { #if defined(__arm__) - struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); - struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); - sc->arm_pc += 2; // Skip instruction causing segv. + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + sc->arm_pc += 2; // Skip instruction causing segv & sigill. #elif defined(__aarch64__) - struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); - struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); - sc->pc += 4; // Skip instruction causing segv. + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + sc->pc += 4; // Skip instruction causing segv & sigill. #elif defined(__i386__) || defined(__x86_64__) - struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); - uc->CTX_EIP += 3; + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + uc->CTX_EIP += 3; #else - UNUSED(context); + UNUSED(context); #endif + } // We handled this... return true; } static ::android::NativeBridgeSignalHandlerFn native_bridge_get_signal_handler(int signal) { - // Only test segfault handler. - if (signal == SIGSEGV) { + // Test segv for already claimed signal, and sigill for not claimed signal + if ((signal == SIGSEGV) || (signal == SIGILL)) { return &nb_signalhandler; } return nullptr; diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java index 4db116a128..c108a900e2 100644 --- a/test/441-checker-inliner/src/Main.java +++ b/test/441-checker-inliner/src/Main.java @@ -157,6 +157,31 @@ public class Main { return x; } + /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (before) + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect + /// CHECK-DAG: Return [<<Result>>] + + /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt + /// CHECK-DAG: Return [<<Result>>] + + private static int returnAbs(int i) { + return Math.abs(i); + } + + /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (before) + /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1 + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect + /// CHECK-DAG: Return [<<Result>>] + + /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (after) + /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1 + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt + /// CHECK-DAG: Return [<<Result>>] + + public static int InlinedIntrinsicsAreStillIntrinsic() { + return returnAbs(-1); + } private static void returnVoid() { return; @@ -238,5 +263,13 @@ public class Main { if (InlineWithControlFlow(false) != 2) { throw new Error(); } + + if (InlinedIntrinsicsAreStillIntrinsic() != 1) { + throw new Error(); + } + + if (returnAbs(-1) != 1) { + throw new Error(); + } } } diff --git a/test/449-checker-bce/expected.txt b/test/449-checker-bce/expected.txt index e69de29bb2..e114c50371 100644 --- a/test/449-checker-bce/expected.txt +++ b/test/449-checker-bce/expected.txt @@ -0,0 +1 @@ +java.lang.ArrayIndexOutOfBoundsException: length=5; index=82 diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java index c4f7ddbeaf..a746664160 100644 --- a/test/449-checker-bce/src/Main.java +++ b/test/449-checker-bce/src/Main.java @@ -1101,6 +1101,28 @@ public class Main { } + public void testExceptionMessage() { + short[] B1 = new short[5]; + int[] B2 = new int[5]; + Exception err = null; + try { + testExceptionMessage1(B1, B2, null, -1, 6); + } catch (Exception e) { + err = e; + } + System.out.println(err); + } + + void testExceptionMessage1(short[] a1, int[] a2, long a3[], int start, int finish) { + int j = finish + 77; + // Bug: 22665511 + // A deoptimization will be triggered here right before the loop. Need to make + // sure the value of j is preserved for the interpreter. + for (int i = start; i <= finish; i++) { + a2[j - 1] = a1[i + 1]; + } + } + // Make sure this method is compiled with optimizing. /// CHECK-START: void Main.main(java.lang.String[]) register (after) /// CHECK: ParallelMove @@ -1141,6 +1163,7 @@ public class Main { }; testUnknownBounds(); + new Main().testExceptionMessage(); } } diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java index 4e14b90fe3..251a53e456 100644 --- a/test/450-checker-types/src/Main.java +++ b/test/450-checker-types/src/Main.java @@ -483,6 +483,24 @@ public class Main { s.$noinline$f(); } + /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) reference_type_propagation_after_inlining (after) + /// CHECK: <<This:l\d+>> ParameterValue + /// CHECK: <<Param:l\d+>> ParameterValue + /// CHECK: <<Clazz:l\d+>> LoadClass + /// CHECK: CheckCast [<<Param>>,<<Clazz>>] + /// CHECK: BoundType [<<Param>>] can_be_null:true + + /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) instruction_simplifier_after_types (after) + /// CHECK: <<This:l\d+>> ParameterValue + /// CHECK: <<Param:l\d+>> ParameterValue + /// CHECK: <<Clazz:l\d+>> LoadClass + /// CHECK: CheckCast [<<Param>>,<<Clazz>>] + /// CHECK: <<Bound:l\d+>> BoundType [<<Param>>] + /// CHECK: NullCheck [<<Bound>>] + public String checkcastPreserveNullCheck(Object a) { + return ((SubclassA)a).toString(); + } + public static void main(String[] args) { } } diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc index 33bdc200db..9facfdb076 100644 --- a/test/454-get-vreg/get_vreg_jni.cc +++ b/test/454-get-vreg/get_vreg_jni.cc @@ -28,12 +28,12 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context, mirror::Object* this_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_value_(this_value), found_method_index_(0) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc index 754118935c..21149f67e8 100644 --- a/test/455-set-vreg/set_vreg_jni.cc +++ b/test/455-set-vreg/set_vreg_jni.cc @@ -28,11 +28,11 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context, mirror::Object* this_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_value_(this_value) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc index 96f0e52995..c21168b81e 100644 --- a/test/457-regs/regs_jni.cc +++ b/test/457-regs/regs_jni.cc @@ -28,10 +28,10 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc index 23fe43d906..8108c97f77 100644 --- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc +++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc @@ -28,12 +28,12 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context, mirror::Object* this_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_value_(this_value), found_method_index_(0) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc index c4f415b3f9..9b32fc397b 100644 --- a/test/466-get-live-vreg/get_live_vreg_jni.cc +++ b/test/466-get-live-vreg/get_live_vreg_jni.cc @@ -27,10 +27,10 @@ namespace { class TestVisitor : public StackVisitor { public: - TestVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + TestVisitor(Thread* thread, Context* context) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/484-checker-register-hints/src/Main.java b/test/484-checker-register-hints/src/Main.java index 3715ca2b14..6e68f7c91e 100644 --- a/test/484-checker-register-hints/src/Main.java +++ b/test/484-checker-register-hints/src/Main.java @@ -16,6 +16,14 @@ public class Main { + static class Foo { + int field0; + int field1; + int field2; + int field3; + int field4; + }; + /// CHECK-START: void Main.test1(boolean, int, int, int, int, int) register (after) /// CHECK: name "B0" /// CHECK-NOT: ParallelMove @@ -25,7 +33,7 @@ public class Main { /// CHECK-NOT: ParallelMove /// CHECK: name "B3" /// CHECK-NOT: end_block - /// CHECK: ArraySet + /// CHECK: InstanceFieldSet // We could check here that there is a parallel move, but it's only valid // for some architectures (for example x86), as other architectures may // not do move at all. @@ -36,19 +44,19 @@ public class Main { int e = live1; int f = live2; int g = live3; + int j = live0; if (z) { } else { // Create enough live instructions to force spilling on x86. int h = live4; int i = live5; - array[2] = e + i + h; - array[3] = f + i + h; - array[4] = g + i + h; - array[0] = h; - array[1] = i + h; - + foo.field2 = e + i + h; + foo.field3 = f + i + h; + foo.field4 = g + i + h; + foo.field0 = h; + foo.field1 = i + h; } - live1 = e + f + g; + live1 = e + f + g + j; } /// CHECK-START: void Main.test2(boolean, int, int, int, int, int) register (after) @@ -60,7 +68,7 @@ public class Main { /// CHECK-NOT: ParallelMove /// CHECK: name "B3" /// CHECK-NOT: end_block - /// CHECK: ArraySet + /// CHECK: InstanceFieldSet // We could check here that there is a parallel move, but it's only valid // for some architectures (for example x86), as other architectures may // not do move at all. @@ -71,18 +79,19 @@ public class Main { int e = live1; int f = live2; int g = live3; + int j = live0; if (z) { if (y) { int h = live4; int i = live5; - array[2] = e + i + h; - array[3] = f + i + h; - array[4] = g + i + h; - array[0] = h; - array[1] = i + h; + foo.field2 = e + i + h; + foo.field3 = f + i + h; + foo.field4 = g + i + h; + foo.field0 = h; + foo.field1 = i + h; } } - live1 = e + f + g; + live1 = e + f + g + j; } /// CHECK-START: void Main.test3(boolean, int, int, int, int, int) register (after) @@ -94,7 +103,7 @@ public class Main { /// CHECK-NOT: ParallelMove /// CHECK: name "B6" /// CHECK-NOT: end_block - /// CHECK: ArraySet + /// CHECK: InstanceFieldSet // We could check here that there is a parallel move, but it's only valid // for some architectures (for example x86), as other architectures may // not do move at all. @@ -107,6 +116,7 @@ public class Main { int e = live1; int f = live2; int g = live3; + int j = live0; if (z) { live1 = e; } else { @@ -115,24 +125,25 @@ public class Main { } else { int h = live4; int i = live5; - array[2] = e + i + h; - array[3] = f + i + h; - array[4] = g + i + h; - array[0] = h; - array[1] = i + h; + foo.field2 = e + i + h; + foo.field3 = f + i + h; + foo.field4 = g + i + h; + foo.field0 = h; + foo.field1 = i + h; } } - live1 = e + f + g; + live1 = e + f + g + j; } public static void main(String[] args) { } static boolean y; + static int live0; static int live1; static int live2; static int live3; static int live4; static int live5; - static int[] array; + static Foo foo; } diff --git a/test/525-arrays-and-fields/expected.txt b/test/525-checker-arrays-and-fields/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/525-arrays-and-fields/expected.txt +++ b/test/525-checker-arrays-and-fields/expected.txt diff --git a/test/525-arrays-and-fields/info.txt b/test/525-checker-arrays-and-fields/info.txt index 3e16abf204..3e16abf204 100644 --- a/test/525-arrays-and-fields/info.txt +++ b/test/525-checker-arrays-and-fields/info.txt diff --git a/test/525-arrays-and-fields/src/Main.java b/test/525-checker-arrays-and-fields/src/Main.java index cb1e4afeab..a635a5157f 100644 --- a/test/525-arrays-and-fields/src/Main.java +++ b/test/525-checker-arrays-and-fields/src/Main.java @@ -80,56 +80,129 @@ public class Main { // // Loops on static arrays with invariant static field references. + // The checker is used to ensure hoisting occurred. // + /// CHECK-START: void Main.SInvLoopZ() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopZ() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopZ() { for (int i = 0; i < sArrZ.length; i++) { sArrZ[i] = sZ; } } + /// CHECK-START: void Main.SInvLoopB() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopB() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopB() { for (int i = 0; i < sArrB.length; i++) { sArrB[i] = sB; } } + /// CHECK-START: void Main.SInvLoopC() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopC() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopC() { for (int i = 0; i < sArrC.length; i++) { sArrC[i] = sC; } } + /// CHECK-START: void Main.SInvLoopS() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopS() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopS() { for (int i = 0; i < sArrS.length; i++) { sArrS[i] = sS; } } + /// CHECK-START: void Main.SInvLoopI() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopI() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopI() { for (int i = 0; i < sArrI.length; i++) { sArrI[i] = sI; } } + /// CHECK-START: void Main.SInvLoopJ() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopJ() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopJ() { for (int i = 0; i < sArrJ.length; i++) { sArrJ[i] = sJ; } } + /// CHECK-START: void Main.SInvLoopF() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopF() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopF() { for (int i = 0; i < sArrF.length; i++) { sArrF[i] = sF; } } + /// CHECK-START: void Main.SInvLoopD() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopD() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopD() { for (int i = 0; i < sArrD.length; i++) { sArrD[i] = sD; } } + /// CHECK-START: void Main.SInvLoopL() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopL() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + private static void SInvLoopL() { for (int i = 0; i < sArrL.length; i++) { sArrL[i] = sL; @@ -138,6 +211,7 @@ public class Main { // // Loops on static arrays with variant static field references. + // Incorrect hoisting is detected by incorrect outcome. // private static void SVarLoopZ() { @@ -214,56 +288,130 @@ public class Main { // // Loops on static arrays with a cross-over reference. + // Incorrect hoisting is detected by incorrect outcome. + // In addition, the checker is used to detect no hoisting. // + /// CHECK-START: void Main.SCrossOverLoopZ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopZ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopZ() { for (int i = 0; i < sArrZ.length; i++) { sArrZ[i] = !sArrZ[20]; } } + /// CHECK-START: void Main.SCrossOverLoopB() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopB() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopB() { for (int i = 0; i < sArrB.length; i++) { sArrB[i] = (byte)(sArrB[20] + 2); } } + /// CHECK-START: void Main.SCrossOverLoopC() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopC() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopC() { for (int i = 0; i < sArrC.length; i++) { sArrC[i] = (char)(sArrC[20] + 2); } } + /// CHECK-START: void Main.SCrossOverLoopS() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopS() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopS() { for (int i = 0; i < sArrS.length; i++) { sArrS[i] = (short)(sArrS[20] + 2); } } + /// CHECK-START: void Main.SCrossOverLoopI() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopI() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopI() { for (int i = 0; i < sArrI.length; i++) { sArrI[i] = sArrI[20] + 2; } } + /// CHECK-START: void Main.SCrossOverLoopJ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopJ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopJ() { for (int i = 0; i < sArrJ.length; i++) { sArrJ[i] = sArrJ[20] + 2; } } + /// CHECK-START: void Main.SCrossOverLoopF() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopF() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopF() { for (int i = 0; i < sArrF.length; i++) { sArrF[i] = sArrF[20] + 2; } } + /// CHECK-START: void Main.SCrossOverLoopD() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopD() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopD() { for (int i = 0; i < sArrD.length; i++) { sArrD[i] = sArrD[20] + 2; } } + /// CHECK-START: void Main.SCrossOverLoopL() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopL() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private static void SCrossOverLoopL() { for (int i = 0; i < sArrL.length; i++) { sArrL[i] = (sArrL[20] == anObject) ? anotherObject : anObject; @@ -272,56 +420,129 @@ public class Main { // // Loops on instance arrays with invariant instance field references. + // The checker is used to ensure hoisting occurred. // + /// CHECK-START: void Main.InvLoopZ() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopZ() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopZ() { for (int i = 0; i < mArrZ.length; i++) { mArrZ[i] = mZ; } } + /// CHECK-START: void Main.InvLoopB() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopB() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopB() { for (int i = 0; i < mArrB.length; i++) { mArrB[i] = mB; } } + /// CHECK-START: void Main.InvLoopC() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopC() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopC() { for (int i = 0; i < mArrC.length; i++) { mArrC[i] = mC; } } + /// CHECK-START: void Main.InvLoopS() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopS() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopS() { for (int i = 0; i < mArrS.length; i++) { mArrS[i] = mS; } } + /// CHECK-START: void Main.InvLoopI() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopI() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopI() { for (int i = 0; i < mArrI.length; i++) { mArrI[i] = mI; } } + /// CHECK-START: void Main.InvLoopJ() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopJ() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopJ() { for (int i = 0; i < mArrJ.length; i++) { mArrJ[i] = mJ; } } + /// CHECK-START: void Main.InvLoopF() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopF() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopF() { for (int i = 0; i < mArrF.length; i++) { mArrF[i] = mF; } } + /// CHECK-START: void Main.InvLoopD() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopD() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopD() { for (int i = 0; i < mArrD.length; i++) { mArrD[i] = mD; } } + /// CHECK-START: void Main.InvLoopL() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopL() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + private void InvLoopL() { for (int i = 0; i < mArrL.length; i++) { mArrL[i] = mL; @@ -330,6 +551,7 @@ public class Main { // // Loops on instance arrays with variant instance field references. + // Incorrect hoisting is detected by incorrect outcome. // private void VarLoopZ() { @@ -406,56 +628,130 @@ public class Main { // // Loops on instance arrays with a cross-over reference. + // Incorrect hoisting is detected by incorrect outcome. + // In addition, the checker is used to detect no hoisting. // + /// CHECK-START: void Main.CrossOverLoopZ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopZ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopZ() { for (int i = 0; i < mArrZ.length; i++) { mArrZ[i] = !mArrZ[20]; } } + /// CHECK-START: void Main.CrossOverLoopB() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopB() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopB() { for (int i = 0; i < mArrB.length; i++) { mArrB[i] = (byte)(mArrB[20] + 2); } } + /// CHECK-START: void Main.CrossOverLoopC() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopC() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopC() { for (int i = 0; i < mArrC.length; i++) { mArrC[i] = (char)(mArrC[20] + 2); } } + /// CHECK-START: void Main.CrossOverLoopS() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopS() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopS() { for (int i = 0; i < mArrS.length; i++) { mArrS[i] = (short)(mArrS[20] + 2); } } + /// CHECK-START: void Main.CrossOverLoopI() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopI() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopI() { for (int i = 0; i < mArrI.length; i++) { mArrI[i] = mArrI[20] + 2; } } + /// CHECK-START: void Main.CrossOverLoopJ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopJ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopJ() { for (int i = 0; i < mArrJ.length; i++) { mArrJ[i] = mArrJ[20] + 2; } } + /// CHECK-START: void Main.CrossOverLoopF() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopF() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopF() { for (int i = 0; i < mArrF.length; i++) { mArrF[i] = mArrF[20] + 2; } } + /// CHECK-START: void Main.CrossOverLoopD() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopD() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopD() { for (int i = 0; i < mArrD.length; i++) { mArrD[i] = mArrD[20] + 2; } } + /// CHECK-START: void Main.CrossOverLoopL() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopL() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + private void CrossOverLoopL() { for (int i = 0; i < mArrL.length; i++) { mArrL[i] = (mArrL[20] == anObject) ? anotherObject : anObject; diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt index fd9fcafbff..728ccea256 100644 --- a/test/800-smali/expected.txt +++ b/test/800-smali/expected.txt @@ -36,4 +36,5 @@ b/22411633 (2) b/22411633 (3) b/22411633 (4) b/22411633 (5) +b/22777307 Done! diff --git a/test/800-smali/smali/b_22777307.smali b/test/800-smali/smali/b_22777307.smali new file mode 100644 index 0000000000..6de3c703b5 --- /dev/null +++ b/test/800-smali/smali/b_22777307.smali @@ -0,0 +1,18 @@ +.class public LB22777307; +.super Ljava/lang/Object; + +# A static field. That way we can use the reference. +.field private static sTest:Ljava/lang/Object; + +.method public static run()V +.registers 2 + # This is a broken new-instance. It needs to throw at runtime, though. This test is here to + # ensure we won't produce a VerifyError. + # Cloneable was chosen because it's an already existing interface. + new-instance v0, Ljava/lang/Cloneable; + invoke-direct {v0}, Ljava/lang/Cloneable;-><init>()V + sput-object v0, LB22777307;->sTest:Ljava/lang/Object; + + return-void + +.end method diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java index 8da2af4e84..438e21481c 100644 --- a/test/800-smali/src/Main.java +++ b/test/800-smali/src/Main.java @@ -119,6 +119,8 @@ public class Main { new VerifyError(), null)); testCases.add(new TestCase("b/22411633 (5)", "B22411633_5", "run", new Object[] { false }, null, null)); + testCases.add(new TestCase("b/22777307", "B22777307", "run", null, new InstantiationError(), + null)); } public void runTests() { diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 3d5c483a3b..3698bc8248 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -113,7 +113,7 @@ ifeq ($(ART_TEST_DEFAULT_COMPILER),true) COMPILER_TYPES += default endif ifeq ($(ART_TEST_INTERPRETER_ACCESS_CHECKS),true) - COMPILER_TYPES += interpreter-access-checks + COMPILER_TYPES += interp-ac endif ifeq ($(ART_TEST_INTERPRETER),true) COMPILER_TYPES += interpreter @@ -277,9 +277,9 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \ 506-verify-aput \ 800-smali -ifneq (,$(filter interpreter-access-checks,$(COMPILER_TYPES))) +ifneq (,$(filter interp-ac,$(COMPILER_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ - interpreter-access-checks,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + interp-ac,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS), $(ALL_ADDRESS_SIZES)) endif @@ -629,7 +629,7 @@ endif # Create a rule to build and run a tests following the form: # test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}- -# {4: interpreter default optimizing jit interpreter-access-checks}- +# {4: interpreter default optimizing jit interp-ac}- # {5: relocate nrelocate relocate-npatchoat}- # {6: trace or ntrace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}- # {9: no-image image picimage}-{10: pictest npictest}- @@ -700,7 +700,7 @@ define define-test-art-run-test ifeq ($(4),interpreter) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES run_test_options += --interpreter - else ifeq ($(4),interpreter-access-checks) + else ifeq ($(4),interp-ac) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_ACCESS_CHECKS_RULES run_test_options += --interpreter --verify-soft-fail else diff --git a/test/run-test b/test/run-test index eabbab32a9..934329f4dc 100755 --- a/test/run-test +++ b/test/run-test @@ -264,7 +264,7 @@ while true; do shift elif [ "x$1" = "x--verify-soft-fail" ]; then run_args="${run_args} --verify-soft-fail" - image_suffix="-interpreter-access-checks" + image_suffix="-interp-ac" shift elif [ "x$1" = "x--no-optimize" ]; then run_args="${run_args} --no-optimize" @@ -621,9 +621,13 @@ if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then USE_JACK="false" if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no" ]; then - run_checker="yes" - run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \ - -Xcompiler-option -j1" + # In no-prebuild mode, the compiler is only invoked if both dex2oat and + # patchoat are available. Disable Checker otherwise (b/22552692). + if [ "$prebuild_mode" = "yes" ] || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then + run_checker="yes" + run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \ + -Xcompiler-option -j1" + fi fi fi diff --git a/tools/checker/checker.py b/tools/checker/checker.py index ed630e3d12..4e516deee0 100755 --- a/tools/checker/checker.py +++ b/tools/checker/checker.py @@ -62,7 +62,7 @@ def DumpPass(outputFilename, passName): def FindCheckerFiles(path): """ Returns a list of files to scan for check annotations in the given path. Path to a file is returned as a single-element list, directories are - recursively traversed and all '.java' files returned. + recursively traversed and all '.java' and '.smali' files returned. """ if not path: Logger.fail("No source path provided") diff --git a/tools/checker/file_format/checker/parser.py b/tools/checker/file_format/checker/parser.py index 33735cbea0..f354395171 100644 --- a/tools/checker/file_format/checker/parser.py +++ b/tools/checker/file_format/checker/parser.py @@ -76,7 +76,7 @@ def __processLine(line, lineNo, prefix, fileName): if notLine is not None: return (notLine, TestAssertion.Variant.Not, lineNo), None - Logger.fail("Checker assertion could not be parsed", fileName, lineNo) + Logger.fail("Checker assertion could not be parsed: '" + line + "'", fileName, lineNo) def __isMatchAtStart(match): """ Tests if the given Match occurred at the beginning of the line. """ diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index 992a8a6ea1..d58f034f93 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -150,5 +150,12 @@ result: EXEC_FAILED, modes: [device], names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"] +}, +{ + description: "TimeZoneTest.testAllDisplayNames times out, needs investigation", + result: EXEC_FAILED, + modes: [device], + names: ["libcore.java.util.TimeZoneTest.testAllDisplayNames"], + bug: 22786792 } ] |