diff options
author | 2023-11-10 09:45:51 +0100 | |
---|---|---|
committer | 2023-11-13 07:59:25 +0000 | |
commit | 49c100b4b785c92464b283cf8ff65e187e8c7546 (patch) | |
tree | d4bd69dcc2950900f599ac86ab2fd37a673d5975 | |
parent | 4076d337451ab2f423c85d398b9443a4fcc819eb (diff) |
Clean up `HandleScope`.
Implement `Size()` and rename `NumberOfReferences()` to
`Capacity()`. Replace `Capacity()` with `Size()` where
appropriate, for example to avoid visiting unused slots.
Remove some obsolete dead code and move some functions
from `FixedSizeHandleScope<.>` to `HandleScope`.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Change-Id: I1e49fb6144f77692ff22c6fbd0578d995b5a01f1
-rw-r--r-- | compiler/optimizing/inliner.cc | 18 | ||||
-rw-r--r-- | runtime/handle_scope-inl.h | 161 | ||||
-rw-r--r-- | runtime/handle_scope.h | 134 | ||||
-rw-r--r-- | runtime/handle_scope_test.cc | 14 | ||||
-rw-r--r-- | runtime/interpreter/unstarted_runtime_test.cc | 2 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 6 | ||||
-rw-r--r-- | runtime/string_builder_append.cc | 3 |
7 files changed, 162 insertions, 176 deletions
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 91be79f8ec..9d80db0de0 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -336,8 +336,8 @@ static dex::TypeIndex FindClassIndexIn(ObjPtr<mirror::Class> cls, HInliner::InlineCacheType HInliner::GetInlineCacheType( const StackHandleScope<InlineCache::kIndividualCacheSize>& classes) { - DCHECK_EQ(classes.NumberOfReferences(), InlineCache::kIndividualCacheSize); - uint8_t number_of_types = InlineCache::kIndividualCacheSize - classes.RemainingSlots(); + DCHECK_EQ(classes.Capacity(), InlineCache::kIndividualCacheSize); + uint8_t number_of_types = classes.Size(); if (number_of_types == 0) { return kInlineCacheUninitialized; } else if (number_of_types == 1) { @@ -669,8 +669,8 @@ HInliner::InlineCacheType HInliner::GetInlineCacheJIT( HInliner::InlineCacheType HInliner::GetInlineCacheAOT( HInvoke* invoke_instruction, /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) { - DCHECK_EQ(classes->NumberOfReferences(), InlineCache::kIndividualCacheSize); - DCHECK_EQ(classes->RemainingSlots(), InlineCache::kIndividualCacheSize); + DCHECK_EQ(classes->Capacity(), InlineCache::kIndividualCacheSize); + DCHECK_EQ(classes->Size(), 0u); const ProfileCompilationInfo* pci = codegen_->GetCompilerOptions().GetProfileCompilationInfo(); if (pci == nullptr) { @@ -716,7 +716,7 @@ HInliner::InlineCacheType HInliner::GetInlineCacheAOT( << descriptor; return kInlineCacheMissingTypes; } - DCHECK_NE(classes->RemainingSlots(), 0u); + DCHECK_LT(classes->Size(), classes->Capacity()); classes->NewHandle(clazz); } @@ -967,8 +967,8 @@ bool HInliner::TryInlinePolymorphicCall( bool all_targets_inlined = true; bool one_target_inlined = false; - DCHECK_EQ(classes.NumberOfReferences(), InlineCache::kIndividualCacheSize); - uint8_t number_of_types = InlineCache::kIndividualCacheSize - classes.RemainingSlots(); + DCHECK_EQ(classes.Capacity(), InlineCache::kIndividualCacheSize); + uint8_t number_of_types = classes.Size(); for (size_t i = 0; i != number_of_types; ++i) { DCHECK(classes.GetReference(i) != nullptr); Handle<mirror::Class> handle = @@ -1154,8 +1154,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( // Check whether we are actually calling the same method among // the different types seen. - DCHECK_EQ(classes.NumberOfReferences(), InlineCache::kIndividualCacheSize); - uint8_t number_of_types = InlineCache::kIndividualCacheSize - classes.RemainingSlots(); + DCHECK_EQ(classes.Capacity(), InlineCache::kIndividualCacheSize); + uint8_t number_of_types = classes.Size(); for (size_t i = 0; i != number_of_types; ++i) { DCHECK(classes.GetReference(i) != nullptr); ArtMethod* new_method = nullptr; diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index 60a82a29ae..1874237174 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -30,23 +30,24 @@ namespace art { template<size_t kNumReferences> -inline FixedSizeHandleScope<kNumReferences>::FixedSizeHandleScope(BaseHandleScope* link, - ObjPtr<mirror::Object> fill_value) +inline FixedSizeHandleScope<kNumReferences>::FixedSizeHandleScope(BaseHandleScope* link) : HandleScope(link, kNumReferences) { if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } static_assert(kNumReferences >= 1, "FixedSizeHandleScope must contain at least 1 reference"); DCHECK_EQ(&storage_[0], GetReferences()); // TODO: Figure out how to use a compile assert. - for (size_t i = 0; i < kNumReferences; ++i) { - SetReference(i, fill_value); + if (kIsDebugBuild) { + // Fill storage with "DEAD HAndleSCope", mapping H->"4" and S->"5". + for (size_t i = 0; i < kNumReferences; ++i) { + GetReferences()[i].Assign(reinterpret_cast32<mirror::Object*>(0xdead4a5c)); + } } } template<size_t kNumReferences> -inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, - ObjPtr<mirror::Object> fill_value) - : FixedSizeHandleScope<kNumReferences>(self->GetTopHandleScope(), fill_value), +inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self) + : FixedSizeHandleScope<kNumReferences>(self->GetTopHandleScope()), self_(self) { DCHECK_EQ(self, Thread::Current()); if (kDebugLocking) { @@ -64,113 +65,96 @@ inline StackHandleScope<kNumReferences>::~StackHandleScope() { DCHECK_EQ(top_handle_scope, this); } -inline size_t HandleScope::SizeOf(uint32_t num_references) { - size_t header_size = sizeof(HandleScope); - size_t data_size = sizeof(StackReference<mirror::Object>) * num_references; - return header_size + data_size; -} - -inline size_t HandleScope::SizeOf(PointerSize pointer_size, uint32_t num_references) { - // Assume that the layout is packed. - size_t header_size = ReferencesOffset(pointer_size); - size_t data_size = sizeof(StackReference<mirror::Object>) * num_references; - return header_size + data_size; -} - inline ObjPtr<mirror::Object> HandleScope::GetReference(size_t i) const { - DCHECK_LT(i, NumberOfReferences()); + DCHECK_LT(i, Size()); if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } return GetReferences()[i].AsMirrorPtr(); } -inline Handle<mirror::Object> HandleScope::GetHandle(size_t i) { - DCHECK_LT(i, NumberOfReferences()); - return Handle<mirror::Object>(&GetReferences()[i]); +template<class T> +inline Handle<T> HandleScope::GetHandle(size_t i) { + DCHECK_LT(i, Size()); + return Handle<T>(&GetReferences()[i]); } -inline MutableHandle<mirror::Object> HandleScope::GetMutableHandle(size_t i) { - DCHECK_LT(i, NumberOfReferences()); - return MutableHandle<mirror::Object>(&GetReferences()[i]); +template<class T> +inline MutableHandle<T> HandleScope::GetMutableHandle(size_t i) { + DCHECK_LT(i, Size()); + return MutableHandle<T>(&GetReferences()[i]); } inline void HandleScope::SetReference(size_t i, ObjPtr<mirror::Object> object) { if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } - DCHECK_LT(i, NumberOfReferences()); + DCHECK_LT(i, Size()); + VerifyObject(object); GetReferences()[i].Assign(object); } +template<class T> +inline MutableHandle<T> HandleScope::NewHandle(T* object) { + return NewHandle(ObjPtr<T>(object)); +} + +template<class MirrorType> +inline MutableHandle<MirrorType> HandleScope::NewHandle( + ObjPtr<MirrorType> object) { + DCHECK_LT(Size(), Capacity()); + size_t pos = size_; + ++size_; + SetReference(pos, object); + MutableHandle<MirrorType> h(GetMutableHandle<MirrorType>(pos)); + return h; +} + +template<class T> +inline HandleWrapper<T> HandleScope::NewHandleWrapper(T** object) { + return HandleWrapper<T>(object, NewHandle(*object)); +} + +template<class T> +inline HandleWrapperObjPtr<T> HandleScope::NewHandleWrapper( + ObjPtr<T>* object) { + return HandleWrapperObjPtr<T>(object, NewHandle(*object)); +} + inline bool HandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry) const { - // A HandleScope should always contain something. One created by the - // jni_compiler should have a jobject/jclass as a native method is - // passed in a this pointer or a class - DCHECK_GT(NumberOfReferences(), 0U); - return &GetReferences()[0] <= handle_scope_entry && - handle_scope_entry <= &GetReferences()[number_of_references_ - 1]; + return GetReferences() <= handle_scope_entry && handle_scope_entry < GetReferences() + size_; } template <typename Visitor> inline void HandleScope::VisitRoots(Visitor& visitor) { - for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) { + for (size_t i = 0, size = Size(); i < size; ++i) { // GetReference returns a pointer to the stack reference within the handle scope. If this // needs to be updated, it will be done by the root visitor. - visitor.VisitRootIfNonNull(GetHandle(i).GetReference()); + visitor.VisitRootIfNonNull(GetHandle<mirror::Object>(i).GetReference()); } } template <typename Visitor> inline void HandleScope::VisitHandles(Visitor& visitor) { - for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) { - if (GetHandle(i) != nullptr) { - visitor.Visit(GetHandle(i)); + for (size_t i = 0, size = Size(); i < size; ++i) { + if (GetHandle<mirror::Object>(i) != nullptr) { + visitor.Visit(GetHandle<mirror::Object>(i)); } } } -template<size_t kNumReferences> template<class T> -inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) { - return NewHandle(ObjPtr<T>(object)); -} - -template<size_t kNumReferences> template<class MirrorType> -inline MutableHandle<MirrorType> FixedSizeHandleScope<kNumReferences>::NewHandle( - ObjPtr<MirrorType> object) { - SetReference(pos_, object); - MutableHandle<MirrorType> h(GetHandle<MirrorType>(pos_)); - ++pos_; - return h; -} - -template<size_t kNumReferences> template<class T> -inline HandleWrapper<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper(T** object) { - return HandleWrapper<T>(object, NewHandle(*object)); -} - -template<size_t kNumReferences> template<class T> -inline HandleWrapperObjPtr<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper( - ObjPtr<T>* object) { - return HandleWrapperObjPtr<T>(object, NewHandle(*object)); -} - -template<size_t kNumReferences> -inline void FixedSizeHandleScope<kNumReferences>::SetReference(size_t i, - ObjPtr<mirror::Object> object) { - if (kDebugLocking) { - Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); - } - DCHECK_LT(i, kNumReferences); - VerifyObject(object); - GetReferences()[i].Assign(object); +// The current size of this handle scope. +inline uint32_t BaseHandleScope::Size() const { + return LIKELY(!IsVariableSized()) + ? AsHandleScope()->Size() + : AsVariableSized()->Size(); } -// Number of references contained within this handle scope. -inline uint32_t BaseHandleScope::NumberOfReferences() const { +// The current capacity of this handle scope. +inline uint32_t BaseHandleScope::Capacity() const { return LIKELY(!IsVariableSized()) - ? AsHandleScope()->NumberOfReferences() - : AsVariableSized()->NumberOfReferences(); + ? AsHandleScope()->Capacity() + : AsVariableSized()->Capacity(); } inline bool BaseHandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry) const { @@ -224,7 +208,8 @@ inline MutableHandle<T> VariableSizedHandleScope::NewHandle(T* object) { template<class MirrorType> inline MutableHandle<MirrorType> VariableSizedHandleScope::NewHandle(ObjPtr<MirrorType> ptr) { - if (current_scope_->RemainingSlots() == 0) { + DCHECK_EQ(current_scope_->Capacity(), kNumReferencesPerScope); + if (current_scope_->Size() == kNumReferencesPerScope) { current_scope_ = new LocalScopeType(current_scope_); } return current_scope_->NewHandle(ptr); @@ -256,11 +241,27 @@ inline VariableSizedHandleScope::~VariableSizedHandleScope() { } } -inline uint32_t VariableSizedHandleScope::NumberOfReferences() const { +inline uint32_t VariableSizedHandleScope::Size() const { + const LocalScopeType* cur = current_scope_; + DCHECK(cur != nullptr); + // The linked list of local scopes starts from the latest which may not be fully filled. + uint32_t sum = cur->Size(); + cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink()); + while (cur != nullptr) { + // All other local scopes are fully filled. + DCHECK_EQ(cur->Size(), kNumReferencesPerScope); + sum += kNumReferencesPerScope; + cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink()); + } + return sum; +} + +inline uint32_t VariableSizedHandleScope::Capacity() const { uint32_t sum = 0; const LocalScopeType* cur = current_scope_; while (cur != nullptr) { - sum += cur->NumberOfReferences(); + DCHECK_EQ(cur->Capacity(), kNumReferencesPerScope); + sum += kNumReferencesPerScope; cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink()); } return sum; diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index 0c50312203..4cf6e5e6fe 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -45,11 +45,15 @@ class Object; class PACKED(4) BaseHandleScope { public: bool IsVariableSized() const { - return number_of_references_ == kNumReferencesVariableSized; + return capacity_ == kNumReferencesVariableSized; } - // Number of references contained within this handle scope. - ALWAYS_INLINE uint32_t NumberOfReferences() const; + // The current size of this handle scope. + ALWAYS_INLINE uint32_t Size() const; + + // The current capacity of this handle scope. + // It can change (increase) only for a `VariableSizedHandleScope`. + ALWAYS_INLINE uint32_t Capacity() const; ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; @@ -70,14 +74,14 @@ class PACKED(4) BaseHandleScope { ALWAYS_INLINE const HandleScope* AsHandleScope() const; protected: - BaseHandleScope(BaseHandleScope* link, uint32_t num_references) + BaseHandleScope(BaseHandleScope* link, uint32_t capacity) : link_(link), - number_of_references_(num_references) {} + capacity_(capacity) {} // Variable sized constructor. explicit BaseHandleScope(BaseHandleScope* link) : link_(link), - number_of_references_(kNumReferencesVariableSized) {} + capacity_(kNumReferencesVariableSized) {} static constexpr int32_t kNumReferencesVariableSized = -1; @@ -85,7 +89,7 @@ class PACKED(4) BaseHandleScope { BaseHandleScope* const link_; // Number of handlerized references. -1 for variable sized handle scopes. - const int32_t number_of_references_; + const int32_t capacity_; private: DISALLOW_COPY_AND_ASSIGN(BaseHandleScope); @@ -98,52 +102,57 @@ class PACKED(4) HandleScope : public BaseHandleScope { public: ~HandleScope() {} - // We have versions with and without explicit pointer size of the following. The first two are - // used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one - // takes the pointer size explicitly so that at compile time we can cross-compile correctly. + ALWAYS_INLINE ObjPtr<mirror::Object> GetReference(size_t i) const + REQUIRES_SHARED(Locks::mutator_lock_); - // Returns the size of a HandleScope containing num_references handles. - static size_t SizeOf(uint32_t num_references); + template<class T> + ALWAYS_INLINE Handle<T> GetHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_); - // Returns the size of a HandleScope containing num_references handles. - static size_t SizeOf(PointerSize pointer_size, uint32_t num_references); + template<class T> + ALWAYS_INLINE MutableHandle<T> GetMutableHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_); - ALWAYS_INLINE ObjPtr<mirror::Object> GetReference(size_t i) const + ALWAYS_INLINE void SetReference(size_t i, ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_); - ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i); + template<class T> + ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_); - ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i) + template<class T> + ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object) REQUIRES_SHARED(Locks::mutator_lock_); - ALWAYS_INLINE void SetReference(size_t i, ObjPtr<mirror::Object> object) + template<class T> + ALWAYS_INLINE HandleWrapperObjPtr<T> NewHandleWrapper(ObjPtr<T>* object) REQUIRES_SHARED(Locks::mutator_lock_); + template<class MirrorType> + ALWAYS_INLINE MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType> object) + REQUIRES_SHARED(Locks::mutator_lock_); + ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; // Offset of link within HandleScope, used by generated code. static constexpr size_t LinkOffset([[maybe_unused]] PointerSize pointer_size) { return 0; } // Offset of length within handle scope, used by generated code. - static constexpr size_t NumberOfReferencesOffset(PointerSize pointer_size) { + static constexpr size_t CapacityOffset(PointerSize pointer_size) { return static_cast<size_t>(pointer_size); } // Offset of link within handle scope, used by generated code. static constexpr size_t ReferencesOffset(PointerSize pointer_size) { - return NumberOfReferencesOffset(pointer_size) + sizeof(number_of_references_); + return CapacityOffset(pointer_size) + sizeof(capacity_) + sizeof(size_); } - // Placement new creation. - static HandleScope* Create(void* storage, BaseHandleScope* link, uint32_t num_references) - WARN_UNUSED { - return new (storage) HandleScope(link, num_references); + // The current size of this handle scope. + ALWAYS_INLINE uint32_t Size() const { + return size_; } - // Number of references contained within this handle scope. - ALWAYS_INLINE uint32_t NumberOfReferences() const { - DCHECK_GE(number_of_references_, 0); - return static_cast<uint32_t>(number_of_references_); + // The capacity of this handle scope, immutable. + ALWAYS_INLINE uint32_t Capacity() const { + DCHECK_GT(capacity_, 0); + return static_cast<uint32_t>(capacity_); } template <typename Visitor> @@ -159,14 +168,20 @@ class PACKED(4) HandleScope : public BaseHandleScope { return reinterpret_cast<StackReference<mirror::Object>*>(address); } - explicit HandleScope(size_t number_of_references) : HandleScope(nullptr, number_of_references) {} + explicit HandleScope(size_t capacity) : HandleScope(nullptr, capacity) {} - // Semi-hidden constructor. Construction expected by generated code and StackHandleScope. - HandleScope(BaseHandleScope* link, uint32_t num_references) - : BaseHandleScope(link, num_references) {} + HandleScope(BaseHandleScope* link, uint32_t capacity) + : BaseHandleScope(link, capacity) { + // Handle scope should be created only if we have a code path that stores something in it. + // We may not take that code path and the handle scope may remain empty. + DCHECK_NE(capacity, 0u); + } + + // Position new handles will be created. + uint32_t size_ = 0; - // Storage for references. - // StackReference<mirror::Object> references_[number_of_references_] + // Storage for references is in derived classes. + // StackReference<mirror::Object> references_[capacity_] private: DISALLOW_COPY_AND_ASSIGN(HandleScope); @@ -175,47 +190,14 @@ class PACKED(4) HandleScope : public BaseHandleScope { // Fixed size handle scope that is not necessarily linked in the thread. template<size_t kNumReferences> class PACKED(4) FixedSizeHandleScope : public HandleScope { - public: - template<class T> - ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_); - - template<class T> - ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object) - REQUIRES_SHARED(Locks::mutator_lock_); - - template<class T> - ALWAYS_INLINE HandleWrapperObjPtr<T> NewHandleWrapper(ObjPtr<T>* object) - REQUIRES_SHARED(Locks::mutator_lock_); - - template<class MirrorType> - ALWAYS_INLINE MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType> object) - REQUIRES_SHARED(Locks::mutator_lock_); - - ALWAYS_INLINE void SetReference(size_t i, ObjPtr<mirror::Object> object) - REQUIRES_SHARED(Locks::mutator_lock_); - - size_t RemainingSlots() const { - return kNumReferences - pos_; - } - private: - explicit ALWAYS_INLINE FixedSizeHandleScope(BaseHandleScope* link, - ObjPtr<mirror::Object> fill_value = nullptr) + explicit ALWAYS_INLINE FixedSizeHandleScope(BaseHandleScope* link) REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE ~FixedSizeHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {} - template<class T> - ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK_LT(i, kNumReferences); - return MutableHandle<T>(&GetReferences()[i]); - } - - // Reference storage needs to be first as expected by the HandleScope layout. + // Reference storage. StackReference<mirror::Object> storage_[kNumReferences]; - // Position new handles will be created. - uint32_t pos_ = 0; - template<size_t kNumRefs> friend class StackHandleScope; friend class VariableSizedHandleScope; }; @@ -224,8 +206,7 @@ class PACKED(4) FixedSizeHandleScope : public HandleScope { template<size_t kNumReferences> class PACKED(4) StackHandleScope final : public FixedSizeHandleScope<kNumReferences> { public: - explicit ALWAYS_INLINE StackHandleScope(Thread* self, - ObjPtr<mirror::Object> fill_value = nullptr) + explicit ALWAYS_INLINE StackHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE ~StackHandleScope() REQUIRES_SHARED(Locks::mutator_lock_); @@ -257,8 +238,11 @@ class VariableSizedHandleScope : public BaseHandleScope { MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType> ptr) REQUIRES_SHARED(Locks::mutator_lock_); - // Number of references contained within this handle scope. - ALWAYS_INLINE uint32_t NumberOfReferences() const; + // The current size of this handle scope. + ALWAYS_INLINE uint32_t Size() const; + + // The current capacity of this handle scope. + ALWAYS_INLINE uint32_t Capacity() const; ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; @@ -273,8 +257,8 @@ class VariableSizedHandleScope : public BaseHandleScope { static constexpr size_t kSizeOfReferencesPerScope = kLocalScopeSize - /* BaseHandleScope::link_ */ sizeof(BaseHandleScope*) - - /* BaseHandleScope::number_of_references_ */ sizeof(int32_t) - - /* FixedSizeHandleScope<>::pos_ */ sizeof(uint32_t); + - /* BaseHandleScope::capacity_ */ sizeof(int32_t) + - /* HandleScope<>::size_ */ sizeof(uint32_t); static constexpr size_t kNumReferencesPerScope = kSizeOfReferencesPerScope / sizeof(StackReference<mirror::Object>); diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc index 9207303750..fe85f25d96 100644 --- a/runtime/handle_scope_test.cc +++ b/runtime/handle_scope_test.cc @@ -57,7 +57,7 @@ TEST_F(HandleScopeTest, Offsets) { static const size_t kNumReferences = 0x9ABC; StackHandleScope<kNumReferences> test_table(soa.Self()); ObjPtr<mirror::Class> c = class_linker->FindSystemClass(soa.Self(), "Ljava/lang/Object;"); - test_table.SetReference(0, c.Ptr()); + test_table.NewHandle(c); uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table); @@ -68,8 +68,8 @@ TEST_F(HandleScopeTest, Offsets) { } { - uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr + - HandleScope::NumberOfReferencesOffset(kRuntimePointerSize)); + uint32_t* num_ptr = reinterpret_cast<uint32_t*>( + table_base_ptr + HandleScope::CapacityOffset(kRuntimePointerSize)); EXPECT_EQ(*num_ptr, static_cast<size_t>(kNumReferences)); } @@ -113,13 +113,15 @@ TEST_F(HandleScopeTest, VariableSized) { EXPECT_OBJ_PTR_EQ(o, handles.back().Get()); EXPECT_TRUE(hs.Contains(handles.back().GetReference())); EXPECT_TRUE(base->Contains(handles.back().GetReference())); - EXPECT_EQ(hs.NumberOfReferences(), base->NumberOfReferences()); + EXPECT_EQ(hs.Capacity(), base->Capacity()); } + // Add one null handle. + hs.NewHandle<mirror::Object>(nullptr); CollectVisitor visitor; BaseHandleScope* base = &hs; base->VisitRoots(visitor); - EXPECT_LE(visitor.visited.size(), base->NumberOfReferences()); - EXPECT_EQ(visitor.total_visited, base->NumberOfReferences()); + EXPECT_EQ(visitor.visited.size() + /* null handle */ 1u, base->Size()); + EXPECT_EQ(visitor.total_visited, base->Size()); for (StackReference<mirror::Object>* ref : visitor.visited) { EXPECT_TRUE(base->Contains(ref)); } diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index 3227ef7d76..daa1a825de 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -125,7 +125,7 @@ class UnstartedRuntimeTest : public CommonRuntimeTest { const StackHandleScope<3>& data) REQUIRES_SHARED(Locks::mutator_lock_) { CHECK_EQ(array->GetLength(), 3); - CHECK_EQ(data.NumberOfReferences(), 3U); + CHECK_EQ(data.Size(), 3U); for (size_t i = 0; i < 3; ++i) { EXPECT_OBJ_PTR_EQ(data.GetReference(i), array->Get(static_cast<int32_t>(i))) << i; } diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 3aaca9acff..697ea94a85 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -616,15 +616,15 @@ void JitCodeCache::CopyInlineCacheInto( const InlineCache& ic, /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) { static_assert(arraysize(ic.classes_) == InlineCache::kIndividualCacheSize); - DCHECK_EQ(classes->NumberOfReferences(), InlineCache::kIndividualCacheSize); - DCHECK_EQ(classes->RemainingSlots(), InlineCache::kIndividualCacheSize); + DCHECK_EQ(classes->Capacity(), InlineCache::kIndividualCacheSize); + DCHECK_EQ(classes->Size(), 0u); WaitUntilInlineCacheAccessible(Thread::Current()); // Note that we don't need to lock `lock_` here, the compiler calling // this method has already ensured the inline cache will not be deleted. for (const GcRoot<mirror::Class>& root : ic.classes_) { mirror::Class* object = root.Read(); if (object != nullptr) { - DCHECK_NE(classes->RemainingSlots(), 0u); + DCHECK_LT(classes->Size(), classes->Capacity()); classes->NewHandle(object); } } diff --git a/runtime/string_builder_append.cc b/runtime/string_builder_append.cc index 2071733a1e..9803fe3880 100644 --- a/runtime/string_builder_append.cc +++ b/runtime/string_builder_append.cc @@ -396,7 +396,6 @@ inline int32_t StringBuilderAppend::Builder::CalculateLengthWithFlag() { UNREACHABLE(); } ++current_arg; - DCHECK_LE(hs_.NumberOfReferences(), kMaxArgs); } if (UNLIKELY(has_fp_args)) { @@ -430,6 +429,7 @@ inline void StringBuilderAppend::Builder::StoreData(ObjPtr<mirror::String> new_s DCHECK_LE(f & kArgMask, static_cast<uint32_t>(Argument::kLast)); switch (static_cast<Argument>(f & kArgMask)) { case Argument::kString: { + DCHECK_LT(handle_index, hs_.Size()); ObjPtr<mirror::String> str = ObjPtr<mirror::String>::DownCast(hs_.GetReference(handle_index)); ++handle_index; @@ -485,7 +485,6 @@ inline void StringBuilderAppend::Builder::StoreData(ObjPtr<mirror::String> new_s UNREACHABLE(); } ++current_arg; - DCHECK_LE(handle_index, hs_.NumberOfReferences()); DCHECK_LE(fp_arg_index, std::size(converted_fp_args_)); } DCHECK_EQ(RemainingSpace(new_string, data), 0u) << std::hex << format_; |