diff options
| author | 2023-08-24 01:37:12 +0000 | |
|---|---|---|
| committer | 2023-08-24 03:06:07 +0000 | |
| commit | f08a30d5de4f4d97367161397fa509d1c86f3034 (patch) | |
| tree | 21a18939a8eae2e3a0e6dc85920a8c6b0d8ad350 | |
| parent | 001a141186bdf4efcc95390e468783b9766bea2b (diff) | |
Revert "Implement clamp-growth-limit for userfaultfd GC"
This reverts commit 9e5a197ee85b35e0e95fa48316f1a87e1d9232e5.
Reason for revert: Fix regression test failure on 'test_mapping_presubmit_rvc'
Bug: 297281725
Change-Id: I8ea8896ebb2a98b664fe1c6678b3eb38039ac570
| -rw-r--r-- | runtime/gc/accounting/bitmap.cc | 6 | ||||
| -rw-r--r-- | runtime/gc/accounting/bitmap.h | 16 | ||||
| -rw-r--r-- | runtime/gc/accounting/space_bitmap.h | 4 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_compact.cc | 79 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_compact.h | 22 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 8 | ||||
| -rw-r--r-- | runtime/gc/space/bump_pointer_space-walk-inl.h | 2 | ||||
| -rw-r--r-- | runtime/gc/space/bump_pointer_space.cc | 58 | ||||
| -rw-r--r-- | runtime/gc/space/bump_pointer_space.h | 44 | ||||
| -rw-r--r-- | runtime/gc/space/large_object_space.cc | 21 | ||||
| -rw-r--r-- | runtime/gc/space/large_object_space.h | 4 |
11 files changed, 62 insertions, 202 deletions
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc index 4e4109d1ac..bd10958496 100644 --- a/runtime/gc/accounting/bitmap.cc +++ b/runtime/gc/accounting/bitmap.cc @@ -33,12 +33,12 @@ Bitmap* Bitmap::CreateFromMemMap(MemMap&& mem_map, size_t num_bits) { return new Bitmap(std::move(mem_map), num_bits); } -Bitmap::Bitmap(MemMap&& mem_map, size_t num_bits) +Bitmap::Bitmap(MemMap&& mem_map, size_t bitmap_size) : mem_map_(std::move(mem_map)), bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map_.Begin())), - bitmap_numbits_(num_bits) { + bitmap_size_(bitmap_size) { CHECK(bitmap_begin_ != nullptr); - CHECK_NE(num_bits, 0U); + CHECK_NE(bitmap_size, 0U); } Bitmap::~Bitmap() { diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h index f413243094..06398d6b10 100644 --- a/runtime/gc/accounting/bitmap.h +++ b/runtime/gc/accounting/bitmap.h @@ -19,12 +19,10 @@ #include <limits.h> #include <stdint.h> - #include <memory> #include <set> #include <vector> -#include "base/bit_utils.h" #include "base/locks.h" #include "base/mem_map.h" #include "runtime_globals.h" @@ -88,7 +86,9 @@ class Bitmap { } // Size of our bitmap in bits. - size_t BitmapSize() const { return bitmap_numbits_; } + size_t BitmapSize() const { + return bitmap_size_; + } // Check that a bit index is valid with a DCHECK. ALWAYS_INLINE void CheckValidBitIndex(size_t bit_index) const { @@ -118,7 +118,7 @@ class Bitmap { uintptr_t* const bitmap_begin_; // Number of bits in the bitmap. - size_t bitmap_numbits_; + const size_t bitmap_size_; private: DISALLOW_IMPLICIT_CONSTRUCTORS(Bitmap); @@ -133,14 +133,6 @@ class MemoryRangeBitmap : public Bitmap { static MemoryRangeBitmap* CreateFromMemMap( MemMap&& mem_map, uintptr_t cover_begin, size_t num_bits); - void SetBitmapSize(size_t bytes) { - CHECK_ALIGNED(bytes, kAlignment); - bitmap_numbits_ = bytes / kAlignment; - size_t rounded_size = - RoundUp(bitmap_numbits_, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t); - mem_map_.SetSize(rounded_size); - } - // Beginning of the memory range that the bitmap covers. ALWAYS_INLINE uintptr_t CoverBegin() const { return cover_begin_; diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 326901c715..dbbea3a462 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -181,9 +181,9 @@ class SpaceBitmap { } void SetHeapSize(size_t bytes) { + // TODO: Un-map the end of the mem map. heap_limit_ = heap_begin_ + bytes; - bitmap_size_ = ComputeBitmapSize(bytes); - mem_map_.SetSize(bitmap_size_); + bitmap_size_ = OffsetToIndex(bytes) * sizeof(intptr_t); CHECK_EQ(HeapSize(), bytes); } diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 0e0d26fbb9..78a364bc64 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -378,24 +378,6 @@ static bool IsSigbusFeatureAvailable() { return (gUffdFeatures & kUffdFeaturesForSigbus) == kUffdFeaturesForSigbus; } -size_t MarkCompact::InitializeInfoMap(uint8_t* p, size_t moving_space_sz) { - size_t nr_moving_pages = moving_space_sz / kPageSize; - - chunk_info_vec_ = reinterpret_cast<uint32_t*>(p); - vector_length_ = moving_space_sz / kOffsetChunkSize; - size_t total = vector_length_ * sizeof(uint32_t); - - first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p + total); - total += heap_->GetNonMovingSpace()->Capacity() / kPageSize * sizeof(ObjReference); - - first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p + total); - total += nr_moving_pages * sizeof(ObjReference); - - pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p + total); - total += nr_moving_pages * sizeof(uint32_t); - return total; -} - MarkCompact::MarkCompact(Heap* heap) : GarbageCollector(heap, "concurrent mark compact"), gc_barrier_(0), @@ -413,8 +395,7 @@ MarkCompact::MarkCompact(Heap* heap) uffd_minor_fault_supported_(false), use_uffd_sigbus_(IsSigbusFeatureAvailable()), minor_fault_initialized_(false), - map_linear_alloc_shared_(false), - clamp_info_map_status_(ClampInfoStatus::kClampInfoNotDone) { + map_linear_alloc_shared_(false) { if (kIsDebugBuild) { updated_roots_.reset(new std::unordered_set<void*>()); } @@ -452,8 +433,18 @@ MarkCompact::MarkCompact(Heap* heap) if (UNLIKELY(!info_map_.IsValid())) { LOG(FATAL) << "Failed to allocate concurrent mark-compact chunk-info vector: " << err_msg; } else { - size_t total = InitializeInfoMap(info_map_.Begin(), moving_space_size); - DCHECK_EQ(total, info_map_.Size()); + uint8_t* p = info_map_.Begin(); + chunk_info_vec_ = reinterpret_cast<uint32_t*>(p); + vector_length_ = chunk_info_vec_size; + + p += chunk_info_vec_size * sizeof(uint32_t); + first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p); + + p += nr_non_moving_pages * sizeof(ObjReference); + first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p); + + p += nr_moving_pages * sizeof(ObjReference); + pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p); } size_t moving_space_alignment = BestPageTableAlignment(moving_space_size); @@ -581,49 +572,6 @@ void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) { is_shared); } -void MarkCompact::ClampGrowthLimit(size_t new_capacity) { - // From-space is the same size as moving-space in virtual memory. - // However, if it's in >4GB address space then we don't need to do it - // synchronously. -#if defined(__LP64__) - constexpr bool kClampFromSpace = kObjPtrPoisoning; -#else - constexpr bool kClampFromSpace = true; -#endif - size_t old_capacity = bump_pointer_space_->Capacity(); - new_capacity = bump_pointer_space_->ClampGrowthLimit(new_capacity); - if (new_capacity < old_capacity) { - CHECK(from_space_map_.IsValid()); - if (kClampFromSpace) { - from_space_map_.SetSize(new_capacity); - } - // NOTE: We usually don't use shadow_to_space_map_ and therefore the condition will - // mostly be false. - if (shadow_to_space_map_.IsValid() && shadow_to_space_map_.Size() > new_capacity) { - shadow_to_space_map_.SetSize(new_capacity); - } - clamp_info_map_status_ = ClampInfoStatus::kClampInfoPending; - } -} - -void MarkCompact::MaybeClampGcStructures() { - size_t moving_space_size = bump_pointer_space_->Capacity(); - DCHECK(thread_running_gc_ != nullptr); - if (UNLIKELY(clamp_info_map_status_ == ClampInfoStatus::kClampInfoPending)) { - CHECK(from_space_map_.IsValid()); - if (from_space_map_.Size() > moving_space_size) { - from_space_map_.SetSize(moving_space_size); - } - // Bitmaps and other data structures - live_words_bitmap_->SetBitmapSize(moving_space_size); - size_t set_size = InitializeInfoMap(info_map_.Begin(), moving_space_size); - CHECK_LT(set_size, info_map_.Size()); - info_map_.SetSize(set_size); - - clamp_info_map_status_ = ClampInfoStatus::kClampInfoFinished; - } -} - void MarkCompact::PrepareCardTableForMarking(bool clear_alloc_space_cards) { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); accounting::CardTable* const card_table = heap_->GetCardTable(); @@ -3961,7 +3909,6 @@ void MarkCompact::MarkingPhase() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); DCHECK_EQ(thread_running_gc_, Thread::Current()); WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_); - MaybeClampGcStructures(); PrepareCardTableForMarking(/*clear_alloc_space_cards*/ true); MarkZygoteLargeObjects(); MarkRoots( diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h index 97709b8cd4..3f16d06825 100644 --- a/runtime/gc/collector/mark_compact.h +++ b/runtime/gc/collector/mark_compact.h @@ -76,7 +76,6 @@ class MarkCompact final : public GarbageCollector { void RunPhases() override REQUIRES(!Locks::mutator_lock_, !lock_); - void ClampGrowthLimit(size_t new_capacity) REQUIRES(Locks::heap_bitmap_lock_); // Updated before (or in) pre-compaction pause and is accessed only in the // pause or during concurrent compaction. The flag is reset in next GC cycle's // InitializePhase(). Therefore, it's safe to update without any memory ordering. @@ -167,13 +166,6 @@ class MarkCompact final : public GarbageCollector { kProcessedAndMapped = 6 // Processed and mapped. For SIGBUS. }; - // Different heap clamping states. - enum class ClampInfoStatus : uint8_t { - kClampInfoNotDone, - kClampInfoPending, - kClampInfoFinished - }; - private: using ObjReference = mirror::CompressedReference<mirror::Object>; // Number of bits (live-words) covered by a single chunk-info (below) @@ -199,7 +191,6 @@ class MarkCompact final : public GarbageCollector { static constexpr uint32_t kBitmapWordsPerVectorWord = kBitsPerVectorWord / Bitmap::kBitsPerBitmapWord; static_assert(IsPowerOfTwo(kBitmapWordsPerVectorWord)); - using MemRangeBitmap::SetBitmapSize; static LiveWordsBitmap* Create(uintptr_t begin, uintptr_t end); // Return offset (within the indexed chunk-info) of the nth live word. @@ -535,14 +526,6 @@ class MarkCompact final : public GarbageCollector { uint8_t* shadow_page, Atomic<PageState>& state, bool page_touched); - // Called for clamping of 'info_map_' and other GC data structures, which are - // small and/or in >4GB address space. There is no real benefit of clamping - // them synchronously during app forking. It clamps only if clamp_info_map_status_ - // is set to kClampInfoPending, which is done by ClampGrowthLimit(). - void MaybeClampGcStructures() REQUIRES(Locks::heap_bitmap_lock_); - // Initialize all the info-map related fields of this GC. Returns total size - // of all the structures in info-map. - size_t InitializeInfoMap(uint8_t* p, size_t moving_space_sz); // For checkpoints Barrier gc_barrier_; @@ -778,10 +761,6 @@ class MarkCompact final : public GarbageCollector { // non-zygote processes during first GC, which sets up everyting for using // minor-fault from next GC. bool map_linear_alloc_shared_; - // Clamping statue of `info_map_`. Initialized with 'NotDone'. Once heap is - // clamped but info_map_ is delayed, we set it to 'Pending'. Once 'info_map_' - // is also clamped, then we set it to 'Finished'. - ClampInfoStatus clamp_info_map_status_; class FlipCallback; class ThreadFlipVisitor; @@ -802,7 +781,6 @@ class MarkCompact final : public GarbageCollector { }; std::ostream& operator<<(std::ostream& os, MarkCompact::PageState value); -std::ostream& operator<<(std::ostream& os, MarkCompact::ClampInfoStatus value); } // namespace collector } // namespace gc diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index bcc613851c..99b9f47165 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -3826,18 +3826,10 @@ void Heap::ClampGrowthLimit() { malloc_space->ClampGrowthLimit(); } } - if (large_object_space_ != nullptr) { - large_object_space_->ClampGrowthLimit(capacity_); - } if (collector_type_ == kCollectorTypeCC) { DCHECK(region_space_ != nullptr); // Twice the capacity as CC needs extra space for evacuating objects. region_space_->ClampGrowthLimit(2 * capacity_); - } else if (collector_type_ == kCollectorTypeCMC) { - DCHECK(gUseUserfaultfd); - DCHECK_NE(mark_compact_, nullptr); - DCHECK_NE(bump_pointer_space_, nullptr); - mark_compact_->ClampGrowthLimit(capacity_); } // This space isn't added for performance reasons. if (main_space_backup_.get() != nullptr) { diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h index 89e42bcf27..a978f62c61 100644 --- a/runtime/gc/space/bump_pointer_space-walk-inl.h +++ b/runtime/gc/space/bump_pointer_space-walk-inl.h @@ -49,7 +49,7 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) { }; { - MutexLock mu(Thread::Current(), lock_); + MutexLock mu(Thread::Current(), block_lock_); // If we have 0 blocks then we need to update the main header since we have bump pointer style // allocation into an unbounded region (actually bounded by Capacity()). if (block_sizes_.empty()) { diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index c357a877e2..7753f73ca4 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -45,12 +45,15 @@ BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, Me } BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit) - : ContinuousMemMapAllocSpace( - name, MemMap::Invalid(), begin, begin, limit, kGcRetentionPolicyAlwaysCollect), + : ContinuousMemMapAllocSpace(name, + MemMap::Invalid(), + begin, + begin, + limit, + kGcRetentionPolicyAlwaysCollect), growth_end_(limit), - objects_allocated_(0), - bytes_allocated_(0), - lock_("Bump-pointer space lock"), + objects_allocated_(0), bytes_allocated_(0), + block_lock_("Block lock"), main_block_size_(0) { // This constructor gets called only from Heap::PreZygoteFork(), which // doesn't require a mark_bitmap. @@ -64,9 +67,8 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map) mem_map.End(), kGcRetentionPolicyAlwaysCollect), growth_end_(mem_map_.End()), - objects_allocated_(0), - bytes_allocated_(0), - lock_("Bump-pointer space lock", kBumpPointerSpaceBlockLock), + objects_allocated_(0), bytes_allocated_(0), + block_lock_("Block lock", kBumpPointerSpaceBlockLock), main_block_size_(0) { mark_bitmap_ = accounting::ContinuousSpaceBitmap::Create("bump-pointer space live bitmap", @@ -85,34 +87,14 @@ void BumpPointerSpace::Clear() { SetEnd(Begin()); objects_allocated_.store(0, std::memory_order_relaxed); bytes_allocated_.store(0, std::memory_order_relaxed); + growth_end_ = Limit(); { - MutexLock mu(Thread::Current(), lock_); - growth_end_ = Limit(); + MutexLock mu(Thread::Current(), block_lock_); block_sizes_.clear(); main_block_size_ = 0; } } -size_t BumpPointerSpace::ClampGrowthLimit(size_t new_capacity) { - CHECK(gUseUserfaultfd); - MutexLock mu(Thread::Current(), lock_); - CHECK_EQ(growth_end_, Limit()); - uint8_t* end = End(); - CHECK_LE(end, growth_end_); - size_t free_capacity = growth_end_ - end; - size_t clamp_size = Capacity() - new_capacity; - if (clamp_size > free_capacity) { - new_capacity += clamp_size - free_capacity; - } - SetLimit(Begin() + new_capacity); - growth_end_ = Limit(); - GetMemMap()->SetSize(new_capacity); - if (GetMarkBitmap()->HeapBegin() != 0) { - GetMarkBitmap()->SetHeapSize(new_capacity); - } - return new_capacity; -} - void BumpPointerSpace::Dump(std::ostream& os) const { os << GetName() << " " << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - " @@ -120,7 +102,7 @@ void BumpPointerSpace::Dump(std::ostream& os) const { } size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) { - MutexLock mu(Thread::Current(), lock_); + MutexLock mu(Thread::Current(), block_lock_); RevokeThreadLocalBuffersLocked(thread); return 0U; } @@ -139,7 +121,7 @@ size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() { void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) { if (kIsDebugBuild) { - MutexLock mu(Thread::Current(), lock_); + MutexLock mu(Thread::Current(), block_lock_); DCHECK(!thread->HasTlab()); } } @@ -187,7 +169,7 @@ uint64_t BumpPointerSpace::GetBytesAllocated() { MutexLock mu(self, *Locks::runtime_shutdown_lock_); MutexLock mu2(self, *Locks::thread_list_lock_); std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); - MutexLock mu3(Thread::Current(), lock_); + MutexLock mu3(Thread::Current(), block_lock_); // If we don't have any blocks, we don't have any thread local buffers. This check is required // since there can exist multiple bump pointer spaces which exist at the same time. if (!block_sizes_.empty()) { @@ -205,7 +187,7 @@ uint64_t BumpPointerSpace::GetObjectsAllocated() { MutexLock mu(self, *Locks::runtime_shutdown_lock_); MutexLock mu2(self, *Locks::thread_list_lock_); std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); - MutexLock mu3(Thread::Current(), lock_); + MutexLock mu3(Thread::Current(), block_lock_); // If we don't have any blocks, we don't have any thread local buffers. This check is required // since there can exist multiple bump pointer spaces which exist at the same time. if (!block_sizes_.empty()) { @@ -223,7 +205,7 @@ void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { } bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) { - MutexLock mu(Thread::Current(), lock_); + MutexLock mu(Thread::Current(), block_lock_); RevokeThreadLocalBuffersLocked(self); uint8_t* start = AllocBlock(bytes); if (start == nullptr) { @@ -263,7 +245,7 @@ uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment) { end_.store(aligned_end, std::memory_order_relaxed); // If we have blocks after the main one. Then just add the diff to the last // block. - MutexLock mu(self, lock_); + MutexLock mu(self, block_lock_); if (!block_sizes_.empty()) { block_sizes_.back() += diff; } @@ -273,7 +255,7 @@ uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment) { std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_block_size) { std::vector<size_t>* block_sizes = nullptr; - MutexLock mu(self, lock_); + MutexLock mu(self, block_lock_); if (!block_sizes_.empty()) { block_sizes = new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end()); } else { @@ -286,7 +268,7 @@ std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_ void BumpPointerSpace::SetBlockSizes(Thread* self, const size_t main_block_size, const size_t first_valid_idx) { - MutexLock mu(self, lock_); + MutexLock mu(self, block_lock_); main_block_size_ = main_block_size; if (!block_sizes_.empty()) { block_sizes_.erase(block_sizes_.begin(), block_sizes_.begin() + first_valid_idx); diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index c2cac13a1f..bba171109d 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -88,14 +88,6 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { growth_end_ = Limit(); } - // Attempts to clamp the space limit to 'new_capacity'. If not possible, then - // clamps to whatever possible. Returns the new capacity. 'lock_' is used to - // ensure that TLAB allocations, which are the only ones which may be happening - // concurrently with this function are synchronized. The other Alloc* functions - // are either used in single-threaded mode, or when used in multi-threaded mode, - // then the space is used by GCs (like SS) which don't have clamping implemented. - size_t ClampGrowthLimit(size_t new_capacity) REQUIRES(!lock_); - // Override capacity so that we only return the possibly limited capacity size_t Capacity() const override { return growth_end_ - begin_; @@ -111,21 +103,21 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { } // Reset the space to empty. - void Clear() override REQUIRES(!lock_); + void Clear() override REQUIRES(!block_lock_); void Dump(std::ostream& os) const override; - size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!lock_); + size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!block_lock_); size_t RevokeAllThreadLocalBuffers() override - REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_); - void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!lock_); + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); + void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_); void AssertAllThreadLocalBuffersAreRevoked() - REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_); + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_); + REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_); + REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); // Return the pre-determined allocated object count. This could be beneficial // when we know that all the TLABs are revoked. int32_t GetAccumulatedObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_) { @@ -143,7 +135,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit); // Allocate a new TLAB, returns false if the allocation failed. - bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!lock_); + bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_); BumpPointerSpace* AsBumpPointerSpace() override { return this; @@ -151,7 +143,9 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { // Go through all of the blocks and visit the continuous objects. template <typename Visitor> - ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_); + ALWAYS_INLINE void Walk(Visitor&& visitor) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!block_lock_); accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override; @@ -171,27 +165,27 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { BumpPointerSpace(const std::string& name, MemMap&& mem_map); // Allocate a raw block of bytes. - uint8_t* AllocBlock(size_t bytes) REQUIRES(lock_); - void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(lock_); + uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_); + void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_); // The main block is an unbounded block where objects go when there are no other blocks. This // enables us to maintain tightly packed objects when you are not using thread local buffers for // allocation. The main block starts at the space Begin(). - void UpdateMainBlock() REQUIRES(lock_); + void UpdateMainBlock() REQUIRES(block_lock_); uint8_t* growth_end_; AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions. AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions. - Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; // The objects at the start of the space are stored in the main block. - size_t main_block_size_ GUARDED_BY(lock_); + size_t main_block_size_ GUARDED_BY(block_lock_); // List of block sizes (in bytes) after the main-block. Needed for Walk(). // If empty then the space has only one long continuous block. Each TLAB // allocation has one entry in this deque. // Keeping block-sizes off-heap simplifies sliding compaction algorithms. // The compaction algorithm should ideally compact all objects into the main // block, thereby enabling erasing corresponding entries from here. - std::deque<size_t> block_sizes_ GUARDED_BY(lock_); + std::deque<size_t> block_sizes_ GUARDED_BY(block_lock_); private: // Return the object which comes after obj, while ensuring alignment. @@ -200,7 +194,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { // Return a vector of block sizes on the space. Required by MarkCompact GC for // walking black objects allocated after marking phase. - std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!lock_); + std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!block_lock_); // Once the MarkCompact decides the post-compact layout of the space in the // pre-compaction pause, it calls this function to update the block sizes. It is @@ -208,7 +202,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { // into itself, and the index of first unconsumed block. This works as all the // block sizes are ordered. Also updates 'end_' to reflect the change. void SetBlockSizes(Thread* self, const size_t main_block_size, const size_t first_valid_idx) - REQUIRES(!lock_, Locks::mutator_lock_); + REQUIRES(!block_lock_, Locks::mutator_lock_); // Align end to the given alignment. This is done in MarkCompact GC when // mutators are suspended so that upcoming TLAB allocations start with a new diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 88543a0507..80ed9b356d 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -390,27 +390,6 @@ FreeListSpace::FreeListSpace(const std::string& name, allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin()); } -void FreeListSpace::ClampGrowthLimit(size_t new_capacity) { - MutexLock mu(Thread::Current(), lock_); - new_capacity = RoundUp(new_capacity, kAlignment); - CHECK_LE(new_capacity, Size()); - size_t diff = Size() - new_capacity; - // If we don't have enough free-bytes at the end to clamp, then do the best - // that we can. - if (diff > free_end_) { - new_capacity = Size() - free_end_; - diff = free_end_; - } - - size_t alloc_info_size = sizeof(AllocationInfo) * (new_capacity / kAlignment); - allocation_info_map_.SetSize(alloc_info_size); - mem_map_.SetSize(new_capacity); - // We don't need to change anything in 'free_blocks_' as the free block at - // the end of the space isn't in there. - free_end_ -= diff; - end_ -= diff; -} - FreeListSpace::~FreeListSpace() {} void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index 7611784080..d94f467f6f 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -115,8 +115,6 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and // End() from different allocations. virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0; - // Clamp the space size to the given capacity. - virtual void ClampGrowthLimit(size_t capacity) = 0; protected: explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end, @@ -166,7 +164,6 @@ class LargeObjectMapSpace : public LargeObjectSpace { bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS; void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_); std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_); - void ClampGrowthLimit(size_t capacity ATTRIBUTE_UNUSED) override {} protected: struct LargeObject { @@ -202,7 +199,6 @@ class FreeListSpace final : public LargeObjectSpace { void Dump(std::ostream& os) const override REQUIRES(!lock_); void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_); std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_); - void ClampGrowthLimit(size_t capacity) override REQUIRES(!lock_); protected: FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end); |