diff options
author | 2016-10-11 16:52:17 -0700 | |
---|---|---|
committer | 2016-10-12 13:30:54 -0700 | |
commit | e8a3c576301fd531d5f73a65fc8b84a63619d580 (patch) | |
tree | a00e488f153505ee890c28873308d373e6ab231b | |
parent | e8317d90c61dde07c12e404a2bc1fabf584905c1 (diff) |
Replace StackHandleScopeCollection with VariableSizedHandleScope
VariableSizedHandleScope's internal handle scopes are not pushed
directly on the thread. This means that it is safe to intermix with
other types of handle scopes.
Added test.
Test: clean-oat-host && test-art-host
Change-Id: Id2fd1155788428f394d49615d337d9134824c8f0
-rw-r--r-- | compiler/image_writer.cc | 2 | ||||
-rw-r--r-- | compiler/jni/jni_compiler_test.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/builder.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/inliner.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/nodes.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 14 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_unit_test.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation.h | 6 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation_test.cc | 8 | ||||
-rw-r--r-- | compiler/optimizing/ssa_builder.h | 4 | ||||
-rw-r--r-- | runtime/debugger.cc | 2 | ||||
-rw-r--r-- | runtime/handle_scope-inl.h | 141 | ||||
-rw-r--r-- | runtime/handle_scope.h | 187 | ||||
-rw-r--r-- | runtime/handle_scope_test.cc | 85 | ||||
-rw-r--r-- | runtime/native/dalvik_system_VMDebug.cc | 4 | ||||
-rw-r--r-- | runtime/thread.cc | 12 | ||||
-rw-r--r-- | runtime/thread.h | 10 |
19 files changed, 337 insertions, 158 deletions
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 8ae04a1e49..13c73dcf42 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -1389,7 +1389,7 @@ void ImageWriter::ProcessWorkStack(WorkStack* work_stack) { void ImageWriter::CalculateNewObjectOffsets() { Thread* const self = Thread::Current(); - StackHandleScopeCollection handles(self); + VariableSizedHandleScope handles(self); std::vector<Handle<ObjectArray<Object>>> image_roots; for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) { image_roots.push_back(handles.NewHandle(CreateImageRoots(i))); diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 0cf5f18c4b..afb8fce8d7 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -530,7 +530,7 @@ struct ScopedCheckHandleScope { << "invocations have finished (as before they were invoked)."; } - HandleScope* const handle_scope_; + BaseHandleScope* const handle_scope_; }; static void expectNumStackReferences(size_t val1, size_t val2) { diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 580ef72767..f896f1199e 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -43,7 +43,7 @@ class HGraphBuilder : public ValueObject { OptimizingCompilerStats* compiler_stats, const uint8_t* interpreter_metadata, Handle<mirror::DexCache> dex_cache, - StackHandleScopeCollection* handles) + VariableSizedHandleScope* handles) : graph_(graph), dex_file_(dex_file), code_item_(code_item), @@ -68,7 +68,7 @@ class HGraphBuilder : public ValueObject { // Only for unit testing. HGraphBuilder(HGraph* graph, const DexFile::CodeItem& code_item, - StackHandleScopeCollection* handles, + VariableSizedHandleScope* handles, Primitive::Type return_type = Primitive::kPrimInt) : graph_(graph), dex_file_(nullptr), diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 486626b1fe..a1dcd58a84 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -38,7 +38,7 @@ class HInliner : public HOptimization { const DexCompilationUnit& outer_compilation_unit, const DexCompilationUnit& caller_compilation_unit, CompilerDriver* compiler_driver, - StackHandleScopeCollection* handles, + VariableSizedHandleScope* handles, OptimizingCompilerStats* stats, size_t total_number_of_dex_registers, size_t depth) @@ -197,7 +197,7 @@ class HInliner : public HOptimization { const size_t total_number_of_dex_registers_; const size_t depth_; size_t number_of_inlined_instructions_; - StackHandleScopeCollection* const handles_; + VariableSizedHandleScope* const handles_; DISALLOW_COPY_AND_ASSIGN(HInliner); }; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 874c1edf35..1e69966b98 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -35,7 +35,7 @@ namespace art { // double). static constexpr bool kEnableFloatingPointStaticEvaluation = (FLT_EVAL_METHOD == 0); -void HGraph::InitializeInexactObjectRTI(StackHandleScopeCollection* handles) { +void HGraph::InitializeInexactObjectRTI(VariableSizedHandleScope* handles) { ScopedObjectAccess soa(Thread::Current()); // Create the inexact Object reference type and store it in the HGraph. ClassLinker* linker = Runtime::Current()->GetClassLinker(); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 348f99d6df..daefc3c3aa 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -336,7 +336,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { } // Acquires and stores RTI of inexact Object to be used when creating HNullConstant. - void InitializeInexactObjectRTI(StackHandleScopeCollection* handles); + void InitializeInexactObjectRTI(VariableSizedHandleScope* handles); ArenaAllocator* GetArena() const { return arena_; } const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index d6f8307ac2..4370a84bd2 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -319,7 +319,7 @@ class OptimizingCompiler FINAL : public Compiler { CompilerDriver* driver, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, - StackHandleScopeCollection* handles) const; + VariableSizedHandleScope* handles) const; void RunOptimizations(HOptimization* optimizations[], size_t length, @@ -358,7 +358,7 @@ class OptimizingCompiler FINAL : public Compiler { CompilerDriver* driver, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, - StackHandleScopeCollection* handles) const; + VariableSizedHandleScope* handles) const; void RunArchOptimizations(InstructionSet instruction_set, HGraph* graph, @@ -442,7 +442,7 @@ static HOptimization* BuildOptimization( CodeGenerator* codegen, CompilerDriver* driver, const DexCompilationUnit& dex_compilation_unit, - StackHandleScopeCollection* handles, + VariableSizedHandleScope* handles, SideEffectsAnalysis* most_recent_side_effects, HInductionVarAnalysis* most_recent_induction) { std::string opt_name = ConvertPassNameToOptimizationName(pass_name); @@ -524,7 +524,7 @@ static ArenaVector<HOptimization*> BuildOptimizations( CodeGenerator* codegen, CompilerDriver* driver, const DexCompilationUnit& dex_compilation_unit, - StackHandleScopeCollection* handles) { + VariableSizedHandleScope* handles) { // Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis // instances. This method assumes that each of them expects the nearest instance preceeding it // in the pass name list. @@ -570,7 +570,7 @@ void OptimizingCompiler::MaybeRunInliner(HGraph* graph, CompilerDriver* driver, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, - StackHandleScopeCollection* handles) const { + VariableSizedHandleScope* handles) const { OptimizingCompilerStats* stats = compilation_stats_.get(); const CompilerOptions& compiler_options = driver->GetCompilerOptions(); bool should_inline = (compiler_options.GetInlineDepthLimit() > 0) @@ -707,7 +707,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, CompilerDriver* driver, const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer, - StackHandleScopeCollection* handles) const { + VariableSizedHandleScope* handles) const { OptimizingCompilerStats* stats = compilation_stats_.get(); ArenaAllocator* arena = graph->GetArena(); if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) { @@ -949,7 +949,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, { ScopedObjectAccess soa(Thread::Current()); - StackHandleScopeCollection handles(soa.Self()); + VariableSizedHandleScope handles(soa.Self()); // Do not hold `mutator_lock_` between optimizations. ScopedThreadSuspension sts(soa.Self(), kNative); diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 2a23c92f1f..58d90176cd 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -90,7 +90,7 @@ inline HGraph* CreateCFG(ArenaAllocator* allocator, { ScopedObjectAccess soa(Thread::Current()); - StackHandleScopeCollection handles(soa.Self()); + VariableSizedHandleScope handles(soa.Self()); HGraphBuilder builder(graph, *item, &handles, return_type); bool graph_built = (builder.BuildGraph() == kAnalysisSuccess); return graph_built ? graph : nullptr; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 45a3ce411e..83698adba4 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -35,7 +35,7 @@ static inline mirror::DexCache* FindDexCacheWithHint(Thread* self, } } -static inline ReferenceTypeInfo::TypeHandle GetRootHandle(StackHandleScopeCollection* handles, +static inline ReferenceTypeInfo::TypeHandle GetRootHandle(VariableSizedHandleScope* handles, ClassLinker::ClassRoot class_root, ReferenceTypeInfo::TypeHandle* cache) { if (!ReferenceTypeInfo::IsValidHandle(*cache)) { @@ -109,7 +109,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph, Handle<mirror::DexCache> hint_dex_cache, - StackHandleScopeCollection* handles, + VariableSizedHandleScope* handles, bool is_first_run, const char* name) : HOptimization(graph, name), diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index 61428b2a45..4663471729 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -34,7 +34,7 @@ class ReferenceTypePropagation : public HOptimization { public: ReferenceTypePropagation(HGraph* graph, Handle<mirror::DexCache> hint_dex_cache, - StackHandleScopeCollection* handles, + VariableSizedHandleScope* handles, bool is_first_run, const char* name = kReferenceTypePropagationPassName); @@ -56,7 +56,7 @@ class ReferenceTypePropagation : public HOptimization { private: class HandleCache { public: - explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { } + explicit HandleCache(VariableSizedHandleScope* handles) : handles_(handles) { } template <typename T> MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -74,7 +74,7 @@ class ReferenceTypePropagation : public HOptimization { ReferenceTypeInfo::TypeHandle GetThrowableClassHandle(); private: - StackHandleScopeCollection* handles_; + VariableSizedHandleScope* handles_; ReferenceTypeInfo::TypeHandle object_class_handle_; ReferenceTypeInfo::TypeHandle class_class_handle_; diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc index 75a4eac538..b061c871b0 100644 --- a/compiler/optimizing/reference_type_propagation_test.cc +++ b/compiler/optimizing/reference_type_propagation_test.cc @@ -35,7 +35,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest { ~ReferenceTypePropagationTest() { } - void SetupPropagation(StackHandleScopeCollection* handles) { + void SetupPropagation(VariableSizedHandleScope* handles) { graph_->InitializeInexactObjectRTI(handles); propagation_ = new (&allocator_) ReferenceTypePropagation(graph_, Handle<mirror::DexCache>(), @@ -79,7 +79,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest { TEST_F(ReferenceTypePropagationTest, ProperSetup) { ScopedObjectAccess soa(Thread::Current()); - StackHandleScopeCollection handles(soa.Self()); + VariableSizedHandleScope handles(soa.Self()); SetupPropagation(&handles); EXPECT_TRUE(propagation_ != nullptr); @@ -88,7 +88,7 @@ TEST_F(ReferenceTypePropagationTest, ProperSetup) { TEST_F(ReferenceTypePropagationTest, MergeInvalidTypes) { ScopedObjectAccess soa(Thread::Current()); - StackHandleScopeCollection handles(soa.Self()); + VariableSizedHandleScope handles(soa.Self()); SetupPropagation(&handles); // Two invalid types. @@ -120,7 +120,7 @@ TEST_F(ReferenceTypePropagationTest, MergeInvalidTypes) { TEST_F(ReferenceTypePropagationTest, MergeValidTypes) { ScopedObjectAccess soa(Thread::Current()); - StackHandleScopeCollection handles(soa.Self()); + VariableSizedHandleScope handles(soa.Self()); SetupPropagation(&handles); // Same types. diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index d7360adef8..45dac54115 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -49,7 +49,7 @@ class SsaBuilder : public ValueObject { public: SsaBuilder(HGraph* graph, Handle<mirror::DexCache> dex_cache, - StackHandleScopeCollection* handles) + VariableSizedHandleScope* handles) : graph_(graph), dex_cache_(dex_cache), handles_(handles), @@ -116,7 +116,7 @@ class SsaBuilder : public ValueObject { HGraph* graph_; Handle<mirror::DexCache> dex_cache_; - StackHandleScopeCollection* const handles_; + VariableSizedHandleScope* const handles_; // True if types of ambiguous ArrayGets have been resolved. bool agets_fixed_; diff --git a/runtime/debugger.cc b/runtime/debugger.cc index d8a6ba9f7f..502ce4b76d 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -892,7 +892,7 @@ JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class std::vector<uint64_t>* counts) { gc::Heap* heap = Runtime::Current()->GetHeap(); heap->CollectGarbage(false); - StackHandleScopeCollection hs(Thread::Current()); + VariableSizedHandleScope hs(Thread::Current()); std::vector<Handle<mirror::Class>> classes; counts->clear(); for (size_t i = 0; i < class_ids.size(); ++i) { diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index cceb0072a9..b212d095cb 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -28,24 +28,30 @@ namespace art { template<size_t kNumReferences> -inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value) - : HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) { - DCHECK_EQ(self, Thread::Current()); +inline FixedSizeHandleScope<kNumReferences>::FixedSizeHandleScope(BaseHandleScope* link, + mirror::Object* fill_value) + : HandleScope(link, kNumReferences) { if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } - static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference"); - // TODO: Figure out how to use a compile assert. - CHECK_EQ(&storage_[0], GetReferences()); + static_assert(kNumReferences >= 1, "FixedSizeHandleScope must contain at least 1 reference"); + DCHECK_EQ(&storage_[0], GetReferences()); // TODO: Figure out how to use a compile assert. for (size_t i = 0; i < kNumReferences; ++i) { SetReference(i, fill_value); } +} + +template<size_t kNumReferences> +inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value) + : FixedSizeHandleScope<kNumReferences>(self->GetTopHandleScope(), fill_value), + self_(self) { + DCHECK_EQ(self, Thread::Current()); self_->PushHandleScope(this); } template<size_t kNumReferences> inline StackHandleScope<kNumReferences>::~StackHandleScope() { - HandleScope* top_handle_scope = self_->PopHandleScope(); + BaseHandleScope* top_handle_scope = self_->PopHandleScope(); DCHECK_EQ(top_handle_scope, this); if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(self_); @@ -66,7 +72,7 @@ inline size_t HandleScope::SizeOf(PointerSize pointer_size, uint32_t num_referen } inline mirror::Object* HandleScope::GetReference(size_t i) const { - DCHECK_LT(i, number_of_references_); + DCHECK_LT(i, NumberOfReferences()); if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } @@ -74,12 +80,12 @@ inline mirror::Object* HandleScope::GetReference(size_t i) const { } inline Handle<mirror::Object> HandleScope::GetHandle(size_t i) { - DCHECK_LT(i, number_of_references_); + DCHECK_LT(i, NumberOfReferences()); return Handle<mirror::Object>(&GetReferences()[i]); } inline MutableHandle<mirror::Object> HandleScope::GetMutableHandle(size_t i) { - DCHECK_LT(i, number_of_references_); + DCHECK_LT(i, NumberOfReferences()); return MutableHandle<mirror::Object>(&GetReferences()[i]); } @@ -87,7 +93,7 @@ inline void HandleScope::SetReference(size_t i, mirror::Object* object) { if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } - DCHECK_LT(i, number_of_references_); + DCHECK_LT(i, NumberOfReferences()); GetReferences()[i].Assign(object); } @@ -95,13 +101,13 @@ inline bool HandleScope::Contains(StackReference<mirror::Object>* handle_scope_e // A HandleScope should always contain something. One created by the // jni_compiler should have a jobject/jclass as a native method is // passed in a this pointer or a class - DCHECK_GT(number_of_references_, 0U); + DCHECK_GT(NumberOfReferences(), 0U); return &GetReferences()[0] <= handle_scope_entry && handle_scope_entry <= &GetReferences()[number_of_references_ - 1]; } template<size_t kNumReferences> template<class T> -inline MutableHandle<T> StackHandleScope<kNumReferences>::NewHandle(T* object) { +inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) { SetReference(pos_, object); MutableHandle<T> h(GetHandle<T>(pos_)); pos_++; @@ -109,24 +115,24 @@ inline MutableHandle<T> StackHandleScope<kNumReferences>::NewHandle(T* object) { } template<size_t kNumReferences> template<class MirrorType, bool kPoison> -inline MutableHandle<MirrorType> StackHandleScope<kNumReferences>::NewHandle( +inline MutableHandle<MirrorType> FixedSizeHandleScope<kNumReferences>::NewHandle( ObjPtr<MirrorType, kPoison> object) { return NewHandle(object.Ptr()); } template<size_t kNumReferences> template<class T> -inline HandleWrapper<T> StackHandleScope<kNumReferences>::NewHandleWrapper(T** object) { +inline HandleWrapper<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper(T** object) { return HandleWrapper<T>(object, NewHandle(*object)); } template<size_t kNumReferences> template<class T> -inline HandleWrapperObjPtr<T> StackHandleScope<kNumReferences>::NewHandleWrapper( +inline HandleWrapperObjPtr<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper( ObjPtr<T>* object) { return HandleWrapperObjPtr<T>(object, NewHandle(*object)); } template<size_t kNumReferences> -inline void StackHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) { +inline void FixedSizeHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) { if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } @@ -135,12 +141,111 @@ inline void StackHandleScope<kNumReferences>::SetReference(size_t i, mirror::Obj GetReferences()[i].Assign(object); } +// Number of references contained within this handle scope. +inline uint32_t BaseHandleScope::NumberOfReferences() const { + return LIKELY(!IsVariableSized()) + ? AsHandleScope()->NumberOfReferences() + : AsVariableSized()->NumberOfReferences(); +} + +inline bool BaseHandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry) const { + return LIKELY(!IsVariableSized()) + ? AsHandleScope()->Contains(handle_scope_entry) + : AsVariableSized()->Contains(handle_scope_entry); +} + +template <typename Visitor> +inline void BaseHandleScope::VisitRoots(Visitor& visitor) { + if (LIKELY(!IsVariableSized())) { + AsHandleScope()->VisitRoots(visitor); + } else { + AsVariableSized()->VisitRoots(visitor); + } +} + +inline VariableSizedHandleScope* BaseHandleScope::AsVariableSized() { + DCHECK(IsVariableSized()); + return down_cast<VariableSizedHandleScope*>(this); +} + +inline HandleScope* BaseHandleScope::AsHandleScope() { + DCHECK(!IsVariableSized()); + return down_cast<HandleScope*>(this); +} + +inline const VariableSizedHandleScope* BaseHandleScope::AsVariableSized() const { + DCHECK(IsVariableSized()); + return down_cast<const VariableSizedHandleScope*>(this); +} + +inline const HandleScope* BaseHandleScope::AsHandleScope() const { + DCHECK(!IsVariableSized()); + return down_cast<const HandleScope*>(this); +} + +template<class T> +MutableHandle<T> VariableSizedHandleScope::NewHandle(T* object) { + if (current_scope_->RemainingSlots() == 0) { + current_scope_ = new LocalScopeType(current_scope_); + } + return current_scope_->NewHandle(object); +} + template<class MirrorType, bool kPoison> -inline MutableHandle<MirrorType> StackHandleScopeCollection::NewHandle( +inline MutableHandle<MirrorType> VariableSizedHandleScope::NewHandle( ObjPtr<MirrorType, kPoison> ptr) { return NewHandle(ptr.Ptr()); } +inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self) + : BaseHandleScope(self->GetTopHandleScope()), + self_(self) { + current_scope_ = new LocalScopeType(/*link*/ nullptr); + self_->PushHandleScope(this); +} + +inline VariableSizedHandleScope::~VariableSizedHandleScope() { + BaseHandleScope* top_handle_scope = self_->PopHandleScope(); + DCHECK_EQ(top_handle_scope, this); + while (current_scope_ != nullptr) { + LocalScopeType* next = reinterpret_cast<LocalScopeType*>(current_scope_->GetLink()); + delete current_scope_; + current_scope_ = next; + } +} + +inline uint32_t VariableSizedHandleScope::NumberOfReferences() const { + uint32_t sum = 0; + const LocalScopeType* cur = current_scope_; + while (cur != nullptr) { + sum += cur->NumberOfReferences(); + cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink()); + } + return sum; +} + +inline bool VariableSizedHandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry) + const { + const LocalScopeType* cur = current_scope_; + while (cur != nullptr) { + if (cur->Contains(handle_scope_entry)) { + return true; + } + cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink()); + } + return false; +} + +template <typename Visitor> +inline void VariableSizedHandleScope::VisitRoots(Visitor& visitor) { + LocalScopeType* cur = current_scope_; + while (cur != nullptr) { + cur->VisitRoots(visitor); + cur = reinterpret_cast<LocalScopeType*>(cur->GetLink()); + } +} + + } // namespace art #endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_ diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index fc729a547b..8a0aba6121 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -29,26 +29,69 @@ namespace art { +class HandleScope; template<class MirrorType, bool kPoison> class ObjPtr; +class Thread; +class VariableSizedHandleScope; namespace mirror { class Object; } -class Thread; +// Basic handle scope, tracked by a list. May be variable sized. +class PACKED(4) BaseHandleScope { + public: + bool IsVariableSized() const { + return number_of_references_ == kNumReferencesVariableSized; + } + + // Number of references contained within this handle scope. + ALWAYS_INLINE uint32_t NumberOfReferences() const; + + ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; + + template <typename Visitor> + ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + + // Link to previous BaseHandleScope or null. + BaseHandleScope* GetLink() const { + return link_; + } + + ALWAYS_INLINE VariableSizedHandleScope* AsVariableSized(); + ALWAYS_INLINE HandleScope* AsHandleScope(); + ALWAYS_INLINE const VariableSizedHandleScope* AsVariableSized() const; + ALWAYS_INLINE const HandleScope* AsHandleScope() const; + + protected: + BaseHandleScope(BaseHandleScope* link, uint32_t num_references) + : link_(link), + number_of_references_(num_references) {} + + // Variable sized constructor. + BaseHandleScope(BaseHandleScope* link) + : link_(link), + number_of_references_(kNumReferencesVariableSized) {} + + static constexpr int32_t kNumReferencesVariableSized = -1; + + // Link-list of handle scopes. The root is held by a Thread. + BaseHandleScope* const link_; + + // Number of handlerized references. -1 for variable sized handle scopes. + const int32_t number_of_references_; + + private: + DISALLOW_COPY_AND_ASSIGN(BaseHandleScope); +}; // HandleScopes are scoped objects containing a number of Handles. They are used to allocate // handles, for these handles (and the objects contained within them) to be visible/roots for the // GC. It is most common to stack allocate HandleScopes using StackHandleScope. -class PACKED(4) HandleScope { +class PACKED(4) HandleScope : public BaseHandleScope { public: ~HandleScope() {} - // Number of references contained within this handle scope. - uint32_t NumberOfReferences() const { - return number_of_references_; - } - // We have versions with and without explicit pointer size of the following. The first two are // used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one // takes the pointer size explicitly so that at compile time we can cross-compile correctly. @@ -59,11 +102,6 @@ class PACKED(4) HandleScope { // Returns the size of a HandleScope containing num_references handles. static size_t SizeOf(PointerSize pointer_size, uint32_t num_references); - // Link to previous HandleScope or null. - HandleScope* GetLink() const { - return link_; - } - ALWAYS_INLINE mirror::Object* GetReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_); @@ -93,11 +131,26 @@ class PACKED(4) HandleScope { } // Placement new creation. - static HandleScope* Create(void* storage, HandleScope* link, uint32_t num_references) + static HandleScope* Create(void* storage, BaseHandleScope* link, uint32_t num_references) WARN_UNUSED { return new (storage) HandleScope(link, num_references); } + // Number of references contained within this handle scope. + ALWAYS_INLINE uint32_t NumberOfReferences() const { + DCHECK_GE(number_of_references_, 0); + return static_cast<uint32_t>(number_of_references_); + } + + template <typename Visitor> + void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) { + for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) { + // GetReference returns a pointer to the stack reference within the handle scope. If this + // needs to be updated, it will be done by the root visitor. + visitor.VisitRootIfNonNull(GetHandle(i).GetReference()); + } + } + protected: // Return backing storage used for references. ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const { @@ -105,20 +158,11 @@ class PACKED(4) HandleScope { return reinterpret_cast<StackReference<mirror::Object>*>(address); } - explicit HandleScope(size_t number_of_references) : - link_(nullptr), number_of_references_(number_of_references) { - } + explicit HandleScope(size_t number_of_references) : HandleScope(nullptr, number_of_references) {} // Semi-hidden constructor. Construction expected by generated code and StackHandleScope. - HandleScope(HandleScope* link, uint32_t num_references) : - link_(link), number_of_references_(num_references) { - } - - // Link-list of handle scopes. The root is held by a Thread. - HandleScope* const link_; - - // Number of handlerized references. - const uint32_t number_of_references_; + HandleScope(BaseHandleScope* link, uint32_t num_references) + : BaseHandleScope(link, num_references) {} // Storage for references. // StackReference<mirror::Object> references_[number_of_references_] @@ -165,14 +209,10 @@ class HandleWrapperObjPtr : public MutableHandle<T> { ObjPtr<T>* const obj_; }; - -// Scoped handle storage of a fixed size that is usually stack allocated. +// Fixed size handle scope that is not necessarily linked in the thread. template<size_t kNumReferences> -class PACKED(4) StackHandleScope FINAL : public HandleScope { +class PACKED(4) FixedSizeHandleScope : public HandleScope { public: - explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr); - ALWAYS_INLINE ~StackHandleScope(); - template<class T> ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_); @@ -191,11 +231,15 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_); - Thread* Self() const { - return self_; + size_t RemainingSlots() const { + return kNumReferences - pos_; } private: + explicit ALWAYS_INLINE FixedSizeHandleScope(BaseHandleScope* link, + mirror::Object* fill_value = nullptr); + ALWAYS_INLINE ~FixedSizeHandleScope() {} + template<class T> ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK_LT(i, kNumReferences); @@ -205,66 +249,65 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { // Reference storage needs to be first as expected by the HandleScope layout. StackReference<mirror::Object> storage_[kNumReferences]; - // The thread that the stack handle scope is a linked list upon. The stack handle scope will - // push and pop itself from this thread. - Thread* const self_; - // Position new handles will be created. - size_t pos_; + size_t pos_ = 0; template<size_t kNumRefs> friend class StackHandleScope; + friend class VariableSizedHandleScope; }; -// Utility class to manage a collection (stack) of StackHandleScope. All the managed -// scope handle have the same fixed sized. -// Calls to NewHandle will create a new handle inside the top StackHandleScope. -// When the handle scope becomes full a new one is created and push on top of the -// previous. -// -// NB: -// - it is not safe to use the *same* StackHandleScopeCollection intermix with -// other StackHandleScopes. -// - this is a an easy way around implementing a full ZoneHandleScope to manage an -// arbitrary number of handles. -class StackHandleScopeCollection { +// Scoped handle storage of a fixed size that is stack allocated. +template<size_t kNumReferences> +class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> { public: - explicit StackHandleScopeCollection(Thread* const self) : - self_(self), - current_scope_num_refs_(0) { - } + explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr); + ALWAYS_INLINE ~StackHandleScope(); - ~StackHandleScopeCollection() { - while (!scopes_.empty()) { - delete scopes_.top(); - scopes_.pop(); - } + Thread* Self() const { + return self_; } + private: + // The thread that the stack handle scope is a linked list upon. The stack handle scope will + // push and pop itself from this thread. + Thread* const self_; +}; + +// Utility class to manage a variable sized handle scope by having a list of fixed size handle +// scopes. +// Calls to NewHandle will create a new handle inside the current FixedSizeHandleScope. +// When the current handle scope becomes full a new one is created and put at the front of the +// list. +class VariableSizedHandleScope : public BaseHandleScope { + public: + explicit VariableSizedHandleScope(Thread* const self); + ~VariableSizedHandleScope(); + template<class T> - MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) { - if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) { - StackHandleScope<kNumReferencesPerScope>* scope = - new StackHandleScope<kNumReferencesPerScope>(self_); - scopes_.push(scope); - current_scope_num_refs_ = 0; - } - current_scope_num_refs_++; - return scopes_.top()->NewHandle(object); - } + MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_); template<class MirrorType, bool kPoison> MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType, kPoison> ptr) REQUIRES_SHARED(Locks::mutator_lock_); + // Number of references contained within this handle scope. + ALWAYS_INLINE uint32_t NumberOfReferences() const; + + ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; + + template <typename Visitor> + void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + private: static constexpr size_t kNumReferencesPerScope = 4; Thread* const self_; - std::stack<StackHandleScope<kNumReferencesPerScope>*> scopes_; - size_t current_scope_num_refs_; + // Linked list of fixed size handle scopes. + using LocalScopeType = FixedSizeHandleScope<kNumReferencesPerScope>; + LocalScopeType* current_scope_; - DISALLOW_COPY_AND_ASSIGN(StackHandleScopeCollection); + DISALLOW_COPY_AND_ASSIGN(VariableSizedHandleScope); }; } // namespace art diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc index c269a37f8d..92063c4ba8 100644 --- a/runtime/handle_scope_test.cc +++ b/runtime/handle_scope_test.cc @@ -15,6 +15,7 @@ */ #include "base/enums.h" +#include "common_runtime_test.h" #include "gtest/gtest.h" #include "handle_scope-inl.h" #include "scoped_thread_state_change-inl.h" @@ -22,51 +23,85 @@ namespace art { -// Handle scope with a fixed size which is allocated on the stack. -template<size_t kNumReferences> -class NoThreadStackHandleScope : public HandleScope { - public: - explicit NoThreadStackHandleScope(HandleScope* link) : HandleScope(link, kNumReferences) { - } - ~NoThreadStackHandleScope() { - } - - private: - // references_storage_ needs to be first so that it matches the address of references_ - StackReference<mirror::Object> references_storage_[kNumReferences]; -}; +class HandleScopeTest : public CommonRuntimeTest {}; // Test the offsets computed for members of HandleScope. Because of cross-compiling // it is impossible the use OFFSETOF_MEMBER, so we do some reasonable computations ourselves. This // test checks whether we do the right thing. -TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS { +TEST_F(HandleScopeTest, Offsets) { + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); // As the members of HandleScope are private, we cannot use OFFSETOF_MEMBER // here. So do the inverse: set some data, and access it through pointers created from the offsets. - NoThreadStackHandleScope<0x9ABC> test_table(reinterpret_cast<HandleScope*>(0x5678)); - test_table.SetReference(0, reinterpret_cast<mirror::Object*>(0x1234)); + StackHandleScope<0x1> hs0(soa.Self()); + static const size_t kNumReferences = 0x9ABC; + StackHandleScope<kNumReferences> test_table(soa.Self()); + ObjPtr<mirror::Class> c = class_linker->FindSystemClass(soa.Self(), "Ljava/lang/Object;"); + test_table.SetReference(0, c.Ptr()); uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table); { - uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr + + BaseHandleScope** link_ptr = reinterpret_cast<BaseHandleScope**>(table_base_ptr + HandleScope::LinkOffset(kRuntimePointerSize)); - EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678)); + EXPECT_EQ(*link_ptr, &hs0); } { uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr + HandleScope::NumberOfReferencesOffset(kRuntimePointerSize)); - EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC)); + EXPECT_EQ(*num_ptr, static_cast<size_t>(kNumReferences)); } { - // Assume sizeof(StackReference<mirror::Object>) == sizeof(uint32_t) - // TODO: How can we make this assumption-less but still access directly and fully? - EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t)); - - uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr + + auto* ref_ptr = reinterpret_cast<StackReference<mirror::Object>*>(table_base_ptr + HandleScope::ReferencesOffset(kRuntimePointerSize)); - EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234)); + EXPECT_OBJ_PTR_EQ(ref_ptr->AsMirrorPtr(), c); + } +} + +class CollectVisitor { + public: + void VisitRootIfNonNull(StackReference<mirror::Object>* ref) { + if (!ref->IsNull()) { + visited.insert(ref); + } + ++total_visited; + } + + std::set<StackReference<mirror::Object>*> visited; + size_t total_visited = 0; // including null. +}; + +// Test functionality of variable sized handle scopes. +TEST_F(HandleScopeTest, VariableSized) { + ScopedObjectAccess soa(Thread::Current()); + VariableSizedHandleScope hs(soa.Self()); + ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); + Handle<mirror::Class> c = + hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/Object;")); + // Test nested scopes. + StackHandleScope<1> inner(soa.Self()); + inner.NewHandle(c->AllocObject(soa.Self())); + // Add a bunch of handles and make sure callbacks work. + static const size_t kNumHandles = 100; + std::vector<Handle<mirror::Object>> handles; + for (size_t i = 0; i < kNumHandles; ++i) { + BaseHandleScope* base = &hs; + ObjPtr<mirror::Object> o = c->AllocObject(soa.Self()); + handles.push_back(hs.NewHandle(o)); + EXPECT_OBJ_PTR_EQ(o, handles.back().Get()); + EXPECT_TRUE(hs.Contains(handles.back().GetReference())); + EXPECT_TRUE(base->Contains(handles.back().GetReference())); + EXPECT_EQ(hs.NumberOfReferences(), base->NumberOfReferences()); + } + CollectVisitor visitor; + BaseHandleScope* base = &hs; + base->VisitRoots(visitor); + EXPECT_LE(visitor.visited.size(), base->NumberOfReferences()); + EXPECT_EQ(visitor.total_visited, base->NumberOfReferences()); + for (StackReference<mirror::Object>* ref : visitor.visited) { + EXPECT_TRUE(base->Contains(ref)); } } diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index 31ce4c11cd..18529561cf 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -265,7 +265,7 @@ static jlong VMDebug_countInstancesOfClass(JNIEnv* env, if (c == nullptr) { return 0; } - StackHandleScopeCollection hs(soa.Self()); + VariableSizedHandleScope hs(soa.Self()); std::vector<Handle<mirror::Class>> classes {hs.NewHandle(c)}; uint64_t count = 0; heap->CountInstances(classes, countAssignable, &count); @@ -284,7 +284,7 @@ static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env, if (decoded_classes == nullptr) { return nullptr; } - StackHandleScopeCollection hs(soa.Self()); + VariableSizedHandleScope hs(soa.Self()); std::vector<Handle<mirror::Class>> classes; for (size_t i = 0, count = decoded_classes->GetLength(); i < count; ++i) { classes.push_back(hs.NewHandle(decoded_classes->Get(i))); diff --git a/runtime/thread.cc b/runtime/thread.cc index 7f88035abc..45d3e348d6 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1826,7 +1826,7 @@ void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) { size_t Thread::NumHandleReferences() { size_t count = 0; - for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) { + for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) { count += cur->NumberOfReferences(); } return count; @@ -1835,7 +1835,7 @@ size_t Thread::NumHandleReferences() { bool Thread::HandleScopeContains(jobject obj) const { StackReference<mirror::Object>* hs_entry = reinterpret_cast<StackReference<mirror::Object>*>(obj); - for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) { + for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) { if (cur->Contains(hs_entry)) { return true; } @@ -1847,12 +1847,8 @@ bool Thread::HandleScopeContains(jobject obj) const { void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) { BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( visitor, RootInfo(kRootNativeStack, thread_id)); - for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { - for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) { - // GetReference returns a pointer to the stack reference within the handle scope. If this - // needs to be updated, it will be done by the root visitor. - buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference()); - } + for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { + cur->VisitRoots(buffered_visitor); } } diff --git a/runtime/thread.h b/runtime/thread.h index 20b4cc144b..376a69c6c0 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -799,17 +799,17 @@ class Thread { void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) REQUIRES_SHARED(Locks::mutator_lock_); - HandleScope* GetTopHandleScope() { + BaseHandleScope* GetTopHandleScope() { return tlsPtr_.top_handle_scope; } - void PushHandleScope(HandleScope* handle_scope) { + void PushHandleScope(BaseHandleScope* handle_scope) { DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope); tlsPtr_.top_handle_scope = handle_scope; } - HandleScope* PopHandleScope() { - HandleScope* handle_scope = tlsPtr_.top_handle_scope; + BaseHandleScope* PopHandleScope() { + BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope; DCHECK(handle_scope != nullptr); tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink(); return handle_scope; @@ -1446,7 +1446,7 @@ class Thread { mirror::Object* monitor_enter_object; // Top of linked list of handle scopes or null for none. - HandleScope* top_handle_scope; + BaseHandleScope* top_handle_scope; // Needed to get the right ClassLoader in JNI_OnLoad, but also // useful for testing. |