Implement clamp-growth-limit for userfaultfd GC
Test: manual
Bug: 160737021
Change-Id: I1e6140ecbe8312528c6960cb2b190aaa99236caa
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bd10958..4e4109d 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -33,12 +33,12 @@
return new Bitmap(std::move(mem_map), num_bits);
}
-Bitmap::Bitmap(MemMap&& mem_map, size_t bitmap_size)
+Bitmap::Bitmap(MemMap&& mem_map, size_t num_bits)
: mem_map_(std::move(mem_map)),
bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map_.Begin())),
- bitmap_size_(bitmap_size) {
+ bitmap_numbits_(num_bits) {
CHECK(bitmap_begin_ != nullptr);
- CHECK_NE(bitmap_size, 0U);
+ CHECK_NE(num_bits, 0U);
}
Bitmap::~Bitmap() {
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index 06398d6..f413243 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -19,10 +19,12 @@
#include <limits.h>
#include <stdint.h>
+
#include <memory>
#include <set>
#include <vector>
+#include "base/bit_utils.h"
#include "base/locks.h"
#include "base/mem_map.h"
#include "runtime_globals.h"
@@ -86,9 +88,7 @@
}
// Size of our bitmap in bits.
- size_t BitmapSize() const {
- return bitmap_size_;
- }
+ size_t BitmapSize() const { return bitmap_numbits_; }
// Check that a bit index is valid with a DCHECK.
ALWAYS_INLINE void CheckValidBitIndex(size_t bit_index) const {
@@ -118,7 +118,7 @@
uintptr_t* const bitmap_begin_;
// Number of bits in the bitmap.
- const size_t bitmap_size_;
+ size_t bitmap_numbits_;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Bitmap);
@@ -133,6 +133,14 @@
static MemoryRangeBitmap* CreateFromMemMap(
MemMap&& mem_map, uintptr_t cover_begin, size_t num_bits);
+ void SetBitmapSize(size_t bytes) {
+ CHECK_ALIGNED(bytes, kAlignment);
+ bitmap_numbits_ = bytes / kAlignment;
+ size_t rounded_size =
+ RoundUp(bitmap_numbits_, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t);
+ mem_map_.SetSize(rounded_size);
+ }
+
// Beginning of the memory range that the bitmap covers.
ALWAYS_INLINE uintptr_t CoverBegin() const {
return cover_begin_;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index dbbea3a..326901c 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -181,9 +181,9 @@
}
void SetHeapSize(size_t bytes) {
- // TODO: Un-map the end of the mem map.
heap_limit_ = heap_begin_ + bytes;
- bitmap_size_ = OffsetToIndex(bytes) * sizeof(intptr_t);
+ bitmap_size_ = ComputeBitmapSize(bytes);
+ mem_map_.SetSize(bitmap_size_);
CHECK_EQ(HeapSize(), bytes);
}
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 172acb1..aee5b29 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -378,6 +378,24 @@
return (gUffdFeatures & kUffdFeaturesForSigbus) == kUffdFeaturesForSigbus;
}
+size_t MarkCompact::InitializeInfoMap(uint8_t* p, size_t moving_space_sz) {
+ size_t nr_moving_pages = moving_space_sz / kPageSize;
+
+ chunk_info_vec_ = reinterpret_cast<uint32_t*>(p);
+ vector_length_ = moving_space_sz / kOffsetChunkSize;
+ size_t total = vector_length_ * sizeof(uint32_t);
+
+ first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
+ total += heap_->GetNonMovingSpace()->Capacity() / kPageSize * sizeof(ObjReference);
+
+ first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
+ total += nr_moving_pages * sizeof(ObjReference);
+
+ pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p + total);
+ total += nr_moving_pages * sizeof(uint32_t);
+ return total;
+}
+
MarkCompact::MarkCompact(Heap* heap)
: GarbageCollector(heap, "concurrent mark compact"),
gc_barrier_(0),
@@ -395,7 +413,8 @@
uffd_minor_fault_supported_(false),
use_uffd_sigbus_(IsSigbusFeatureAvailable()),
minor_fault_initialized_(false),
- map_linear_alloc_shared_(false) {
+ map_linear_alloc_shared_(false),
+ clamp_info_map_status_(ClampInfoStatus::kClampInfoNotDone) {
if (kIsDebugBuild) {
updated_roots_.reset(new std::unordered_set<void*>());
}
@@ -433,18 +452,8 @@
if (UNLIKELY(!info_map_.IsValid())) {
LOG(FATAL) << "Failed to allocate concurrent mark-compact chunk-info vector: " << err_msg;
} else {
- uint8_t* p = info_map_.Begin();
- chunk_info_vec_ = reinterpret_cast<uint32_t*>(p);
- vector_length_ = chunk_info_vec_size;
-
- p += chunk_info_vec_size * sizeof(uint32_t);
- first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p);
-
- p += nr_non_moving_pages * sizeof(ObjReference);
- first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p);
-
- p += nr_moving_pages * sizeof(ObjReference);
- pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p);
+ size_t total = InitializeInfoMap(info_map_.Begin(), moving_space_size);
+ DCHECK_EQ(total, info_map_.Size());
}
size_t moving_space_alignment = BestPageTableAlignment(moving_space_size);
@@ -572,6 +581,49 @@
is_shared);
}
+void MarkCompact::ClampGrowthLimit(size_t new_capacity) {
+ // From-space is the same size as moving-space in virtual memory.
+ // However, if it's in >4GB address space then we don't need to do it
+ // synchronously.
+#if defined(__LP64__)
+ constexpr bool kClampFromSpace = kObjPtrPoisoning;
+#else
+ constexpr bool kClampFromSpace = true;
+#endif
+ size_t old_capacity = bump_pointer_space_->Capacity();
+ new_capacity = bump_pointer_space_->ClampGrowthLimit(new_capacity);
+ if (new_capacity < old_capacity) {
+ CHECK(from_space_map_.IsValid());
+ if (kClampFromSpace) {
+ from_space_map_.SetSize(new_capacity);
+ }
+ // NOTE: We usually don't use shadow_to_space_map_ and therefore the condition will
+ // mostly be false.
+ if (shadow_to_space_map_.IsValid() && shadow_to_space_map_.Size() > new_capacity) {
+ shadow_to_space_map_.SetSize(new_capacity);
+ }
+ clamp_info_map_status_ = ClampInfoStatus::kClampInfoPending;
+ }
+}
+
+void MarkCompact::MaybeClampGcStructures() {
+ size_t moving_space_size = bump_pointer_space_->Capacity();
+ DCHECK(thread_running_gc_ != nullptr);
+ if (UNLIKELY(clamp_info_map_status_ == ClampInfoStatus::kClampInfoPending)) {
+ CHECK(from_space_map_.IsValid());
+ if (from_space_map_.Size() > moving_space_size) {
+ from_space_map_.SetSize(moving_space_size);
+ }
+ // Bitmaps and other data structures
+ live_words_bitmap_->SetBitmapSize(moving_space_size);
+ size_t set_size = InitializeInfoMap(info_map_.Begin(), moving_space_size);
+ CHECK_LT(set_size, info_map_.Size());
+ info_map_.SetSize(set_size);
+
+ clamp_info_map_status_ = ClampInfoStatus::kClampInfoFinished;
+ }
+}
+
void MarkCompact::PrepareCardTableForMarking(bool clear_alloc_space_cards) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
@@ -3915,6 +3967,7 @@
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
DCHECK_EQ(thread_running_gc_, Thread::Current());
WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ MaybeClampGcStructures();
PrepareCardTableForMarking(/*clear_alloc_space_cards*/ true);
MarkZygoteLargeObjects();
MarkRoots(
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 3f16d06..97709b8 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -76,6 +76,7 @@
void RunPhases() override REQUIRES(!Locks::mutator_lock_, !lock_);
+ void ClampGrowthLimit(size_t new_capacity) REQUIRES(Locks::heap_bitmap_lock_);
// Updated before (or in) pre-compaction pause and is accessed only in the
// pause or during concurrent compaction. The flag is reset in next GC cycle's
// InitializePhase(). Therefore, it's safe to update without any memory ordering.
@@ -166,6 +167,13 @@
kProcessedAndMapped = 6 // Processed and mapped. For SIGBUS.
};
+ // Different heap clamping states.
+ enum class ClampInfoStatus : uint8_t {
+ kClampInfoNotDone,
+ kClampInfoPending,
+ kClampInfoFinished
+ };
+
private:
using ObjReference = mirror::CompressedReference<mirror::Object>;
// Number of bits (live-words) covered by a single chunk-info (below)
@@ -191,6 +199,7 @@
static constexpr uint32_t kBitmapWordsPerVectorWord =
kBitsPerVectorWord / Bitmap::kBitsPerBitmapWord;
static_assert(IsPowerOfTwo(kBitmapWordsPerVectorWord));
+ using MemRangeBitmap::SetBitmapSize;
static LiveWordsBitmap* Create(uintptr_t begin, uintptr_t end);
// Return offset (within the indexed chunk-info) of the nth live word.
@@ -526,6 +535,14 @@
uint8_t* shadow_page,
Atomic<PageState>& state,
bool page_touched);
+ // Called for clamping of 'info_map_' and other GC data structures, which are
+ // small and/or in >4GB address space. There is no real benefit of clamping
+ // them synchronously during app forking. It clamps only if clamp_info_map_status_
+ // is set to kClampInfoPending, which is done by ClampGrowthLimit().
+ void MaybeClampGcStructures() REQUIRES(Locks::heap_bitmap_lock_);
+ // Initialize all the info-map related fields of this GC. Returns total size
+ // of all the structures in info-map.
+ size_t InitializeInfoMap(uint8_t* p, size_t moving_space_sz);
// For checkpoints
Barrier gc_barrier_;
@@ -761,6 +778,10 @@
// non-zygote processes during first GC, which sets up everyting for using
// minor-fault from next GC.
bool map_linear_alloc_shared_;
+ // Clamping statue of `info_map_`. Initialized with 'NotDone'. Once heap is
+ // clamped but info_map_ is delayed, we set it to 'Pending'. Once 'info_map_'
+ // is also clamped, then we set it to 'Finished'.
+ ClampInfoStatus clamp_info_map_status_;
class FlipCallback;
class ThreadFlipVisitor;
@@ -781,6 +802,7 @@
};
std::ostream& operator<<(std::ostream& os, MarkCompact::PageState value);
+std::ostream& operator<<(std::ostream& os, MarkCompact::ClampInfoStatus value);
} // namespace collector
} // namespace gc
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8d603ee..a5cfb42 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3817,10 +3817,18 @@
malloc_space->ClampGrowthLimit();
}
}
+ if (large_object_space_ != nullptr) {
+ large_object_space_->ClampGrowthLimit(capacity_);
+ }
if (collector_type_ == kCollectorTypeCC) {
DCHECK(region_space_ != nullptr);
// Twice the capacity as CC needs extra space for evacuating objects.
region_space_->ClampGrowthLimit(2 * capacity_);
+ } else if (collector_type_ == kCollectorTypeCMC) {
+ DCHECK(gUseUserfaultfd);
+ DCHECK_NE(mark_compact_, nullptr);
+ DCHECK_NE(bump_pointer_space_, nullptr);
+ mark_compact_->ClampGrowthLimit(capacity_);
}
// This space isn't added for performance reasons.
if (main_space_backup_.get() != nullptr) {
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
index a978f62..89e42bc 100644
--- a/runtime/gc/space/bump_pointer_space-walk-inl.h
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -49,7 +49,7 @@
};
{
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
// If we have 0 blocks then we need to update the main header since we have bump pointer style
// allocation into an unbounded region (actually bounded by Capacity()).
if (block_sizes_.empty()) {
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 7753f73..c357a87 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -45,15 +45,12 @@
}
BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
- : ContinuousMemMapAllocSpace(name,
- MemMap::Invalid(),
- begin,
- begin,
- limit,
- kGcRetentionPolicyAlwaysCollect),
+ : ContinuousMemMapAllocSpace(
+ name, MemMap::Invalid(), begin, begin, limit, kGcRetentionPolicyAlwaysCollect),
growth_end_(limit),
- objects_allocated_(0), bytes_allocated_(0),
- block_lock_("Block lock"),
+ objects_allocated_(0),
+ bytes_allocated_(0),
+ lock_("Bump-pointer space lock"),
main_block_size_(0) {
// This constructor gets called only from Heap::PreZygoteFork(), which
// doesn't require a mark_bitmap.
@@ -67,8 +64,9 @@
mem_map.End(),
kGcRetentionPolicyAlwaysCollect),
growth_end_(mem_map_.End()),
- objects_allocated_(0), bytes_allocated_(0),
- block_lock_("Block lock", kBumpPointerSpaceBlockLock),
+ objects_allocated_(0),
+ bytes_allocated_(0),
+ lock_("Bump-pointer space lock", kBumpPointerSpaceBlockLock),
main_block_size_(0) {
mark_bitmap_ =
accounting::ContinuousSpaceBitmap::Create("bump-pointer space live bitmap",
@@ -87,14 +85,34 @@
SetEnd(Begin());
objects_allocated_.store(0, std::memory_order_relaxed);
bytes_allocated_.store(0, std::memory_order_relaxed);
- growth_end_ = Limit();
{
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
+ growth_end_ = Limit();
block_sizes_.clear();
main_block_size_ = 0;
}
}
+size_t BumpPointerSpace::ClampGrowthLimit(size_t new_capacity) {
+ CHECK(gUseUserfaultfd);
+ MutexLock mu(Thread::Current(), lock_);
+ CHECK_EQ(growth_end_, Limit());
+ uint8_t* end = End();
+ CHECK_LE(end, growth_end_);
+ size_t free_capacity = growth_end_ - end;
+ size_t clamp_size = Capacity() - new_capacity;
+ if (clamp_size > free_capacity) {
+ new_capacity += clamp_size - free_capacity;
+ }
+ SetLimit(Begin() + new_capacity);
+ growth_end_ = Limit();
+ GetMemMap()->SetSize(new_capacity);
+ if (GetMarkBitmap()->HeapBegin() != 0) {
+ GetMarkBitmap()->SetHeapSize(new_capacity);
+ }
+ return new_capacity;
+}
+
void BumpPointerSpace::Dump(std::ostream& os) const {
os << GetName() << " "
<< reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
@@ -102,7 +120,7 @@
}
size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
RevokeThreadLocalBuffersLocked(thread);
return 0U;
}
@@ -121,7 +139,7 @@
void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
if (kIsDebugBuild) {
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
DCHECK(!thread->HasTlab());
}
}
@@ -169,7 +187,7 @@
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
- MutexLock mu3(Thread::Current(), block_lock_);
+ MutexLock mu3(Thread::Current(), lock_);
// If we don't have any blocks, we don't have any thread local buffers. This check is required
// since there can exist multiple bump pointer spaces which exist at the same time.
if (!block_sizes_.empty()) {
@@ -187,7 +205,7 @@
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
- MutexLock mu3(Thread::Current(), block_lock_);
+ MutexLock mu3(Thread::Current(), lock_);
// If we don't have any blocks, we don't have any thread local buffers. This check is required
// since there can exist multiple bump pointer spaces which exist at the same time.
if (!block_sizes_.empty()) {
@@ -205,7 +223,7 @@
}
bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
RevokeThreadLocalBuffersLocked(self);
uint8_t* start = AllocBlock(bytes);
if (start == nullptr) {
@@ -245,7 +263,7 @@
end_.store(aligned_end, std::memory_order_relaxed);
// If we have blocks after the main one. Then just add the diff to the last
// block.
- MutexLock mu(self, block_lock_);
+ MutexLock mu(self, lock_);
if (!block_sizes_.empty()) {
block_sizes_.back() += diff;
}
@@ -255,7 +273,7 @@
std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_block_size) {
std::vector<size_t>* block_sizes = nullptr;
- MutexLock mu(self, block_lock_);
+ MutexLock mu(self, lock_);
if (!block_sizes_.empty()) {
block_sizes = new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end());
} else {
@@ -268,7 +286,7 @@
void BumpPointerSpace::SetBlockSizes(Thread* self,
const size_t main_block_size,
const size_t first_valid_idx) {
- MutexLock mu(self, block_lock_);
+ MutexLock mu(self, lock_);
main_block_size_ = main_block_size;
if (!block_sizes_.empty()) {
block_sizes_.erase(block_sizes_.begin(), block_sizes_.begin() + first_valid_idx);
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index bba1711..c2cac13 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -88,6 +88,14 @@
growth_end_ = Limit();
}
+ // Attempts to clamp the space limit to 'new_capacity'. If not possible, then
+ // clamps to whatever possible. Returns the new capacity. 'lock_' is used to
+ // ensure that TLAB allocations, which are the only ones which may be happening
+ // concurrently with this function are synchronized. The other Alloc* functions
+ // are either used in single-threaded mode, or when used in multi-threaded mode,
+ // then the space is used by GCs (like SS) which don't have clamping implemented.
+ size_t ClampGrowthLimit(size_t new_capacity) REQUIRES(!lock_);
+
// Override capacity so that we only return the possibly limited capacity
size_t Capacity() const override {
return growth_end_ - begin_;
@@ -103,21 +111,21 @@
}
// Reset the space to empty.
- void Clear() override REQUIRES(!block_lock_);
+ void Clear() override REQUIRES(!lock_);
void Dump(std::ostream& os) const override;
- size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!block_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!lock_);
size_t RevokeAllThreadLocalBuffers() override
- REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
- void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_);
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!lock_);
void AssertAllThreadLocalBuffersAreRevoked()
- REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_);
uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
+ REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_);
uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
+ REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_);
// Return the pre-determined allocated object count. This could be beneficial
// when we know that all the TLABs are revoked.
int32_t GetAccumulatedObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -135,7 +143,7 @@
BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
// Allocate a new TLAB, returns false if the allocation failed.
- bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
+ bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!lock_);
BumpPointerSpace* AsBumpPointerSpace() override {
return this;
@@ -143,9 +151,7 @@
// Go through all of the blocks and visit the continuous objects.
template <typename Visitor>
- ALWAYS_INLINE void Walk(Visitor&& visitor)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!block_lock_);
+ ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
@@ -165,27 +171,27 @@
BumpPointerSpace(const std::string& name, MemMap&& mem_map);
// Allocate a raw block of bytes.
- uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
- void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_);
+ uint8_t* AllocBlock(size_t bytes) REQUIRES(lock_);
+ void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(lock_);
// The main block is an unbounded block where objects go when there are no other blocks. This
// enables us to maintain tightly packed objects when you are not using thread local buffers for
// allocation. The main block starts at the space Begin().
- void UpdateMainBlock() REQUIRES(block_lock_);
+ void UpdateMainBlock() REQUIRES(lock_);
uint8_t* growth_end_;
AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions.
- Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// The objects at the start of the space are stored in the main block.
- size_t main_block_size_ GUARDED_BY(block_lock_);
+ size_t main_block_size_ GUARDED_BY(lock_);
// List of block sizes (in bytes) after the main-block. Needed for Walk().
// If empty then the space has only one long continuous block. Each TLAB
// allocation has one entry in this deque.
// Keeping block-sizes off-heap simplifies sliding compaction algorithms.
// The compaction algorithm should ideally compact all objects into the main
// block, thereby enabling erasing corresponding entries from here.
- std::deque<size_t> block_sizes_ GUARDED_BY(block_lock_);
+ std::deque<size_t> block_sizes_ GUARDED_BY(lock_);
private:
// Return the object which comes after obj, while ensuring alignment.
@@ -194,7 +200,7 @@
// Return a vector of block sizes on the space. Required by MarkCompact GC for
// walking black objects allocated after marking phase.
- std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!block_lock_);
+ std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!lock_);
// Once the MarkCompact decides the post-compact layout of the space in the
// pre-compaction pause, it calls this function to update the block sizes. It is
@@ -202,7 +208,7 @@
// into itself, and the index of first unconsumed block. This works as all the
// block sizes are ordered. Also updates 'end_' to reflect the change.
void SetBlockSizes(Thread* self, const size_t main_block_size, const size_t first_valid_idx)
- REQUIRES(!block_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_, Locks::mutator_lock_);
// Align end to the given alignment. This is done in MarkCompact GC when
// mutators are suspended so that upcoming TLAB allocations start with a new
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 80ed9b3..88543a0 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -390,6 +390,27 @@
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
}
+void FreeListSpace::ClampGrowthLimit(size_t new_capacity) {
+ MutexLock mu(Thread::Current(), lock_);
+ new_capacity = RoundUp(new_capacity, kAlignment);
+ CHECK_LE(new_capacity, Size());
+ size_t diff = Size() - new_capacity;
+ // If we don't have enough free-bytes at the end to clamp, then do the best
+ // that we can.
+ if (diff > free_end_) {
+ new_capacity = Size() - free_end_;
+ diff = free_end_;
+ }
+
+ size_t alloc_info_size = sizeof(AllocationInfo) * (new_capacity / kAlignment);
+ allocation_info_map_.SetSize(alloc_info_size);
+ mem_map_.SetSize(new_capacity);
+ // We don't need to change anything in 'free_blocks_' as the free block at
+ // the end of the space isn't in there.
+ free_end_ -= diff;
+ end_ -= diff;
+}
+
FreeListSpace::~FreeListSpace() {}
void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index d94f467..7611784 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -115,6 +115,8 @@
// GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
// End() from different allocations.
virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
+ // Clamp the space size to the given capacity.
+ virtual void ClampGrowthLimit(size_t capacity) = 0;
protected:
explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
@@ -164,6 +166,7 @@
bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS;
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
+ void ClampGrowthLimit(size_t capacity ATTRIBUTE_UNUSED) override {}
protected:
struct LargeObject {
@@ -199,6 +202,7 @@
void Dump(std::ostream& os) const override REQUIRES(!lock_);
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
+ void ClampGrowthLimit(size_t capacity) override REQUIRES(!lock_);
protected:
FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);