summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2023-06-21 16:47:24 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2023-06-23 08:11:05 +0000
commit69c3178c5736895110fea453bd1daf57a77b1374 (patch)
treedeaee379fd8c073adefb97cdc1c92083cabbe817
parent6d30df970f1ad209a87708fa03e8f746102fa42d (diff)
Reland "Use memset/madv_free instead of dontneed in foreground state."
This reverts commit 68fedbb0f33bb1e89012b63adc48d51470477dc6. Reason for revert: use mincore to know if a page is resident, and we can memset/madv_free it. If not, we should madv_dontneed. Test: test.py Change-Id: Ie31015ccdbfbb3fabce19a8a598108729206daaf
-rw-r--r--libartbase/base/mem_map.cc93
-rw-r--r--libartbase/base/mem_map.h12
-rw-r--r--runtime/base/gc_visited_arena_pool.cc2
-rw-r--r--runtime/gc/accounting/card_table.cc2
-rw-r--r--runtime/gc/accounting/space_bitmap.cc8
-rw-r--r--runtime/gc/accounting/space_bitmap.h4
-rw-r--r--runtime/gc/collector/concurrent_copying.cc25
-rw-r--r--runtime/gc/collector/garbage_collector.cc16
-rw-r--r--runtime/gc/collector/garbage_collector.h2
-rw-r--r--runtime/gc/collector/mark_compact.cc8
-rw-r--r--runtime/gc/heap.cc6
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/gc/space/region_space.cc24
-rw-r--r--runtime/gc/space/region_space.h5
14 files changed, 154 insertions, 55 deletions
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 04c11ede63..e291c083d5 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -839,20 +839,9 @@ void MemMap::ReleaseReservedMemory(size_t byte_count) {
}
}
-void MemMap::MadviseDontNeedAndZero() {
- if (base_begin_ != nullptr || base_size_ != 0) {
- if (!kMadviseZeroes) {
- memset(base_begin_, 0, base_size_);
- }
-#ifdef _WIN32
- // It is benign not to madvise away the pages here.
- PLOG(WARNING) << "MemMap::MadviseDontNeedAndZero does not madvise on Windows.";
-#else
- int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
- if (result == -1) {
- PLOG(WARNING) << "madvise failed";
- }
-#endif
+void MemMap::FillWithZero(bool release_eagerly) {
+ if (base_begin_ != nullptr && base_size_ != 0) {
+ ZeroMemory(base_begin_, base_size_, release_eagerly);
}
}
@@ -1246,7 +1235,28 @@ void MemMap::TryReadable() {
}
}
-void ZeroAndReleasePages(void* address, size_t length) {
+static void inline RawClearMemory(uint8_t* begin, uint8_t* end) {
+ std::fill(begin, end, 0);
+}
+
+#ifndef _WIN32
+static inline void ClearMemory(uint8_t* page_begin, size_t size, bool resident) {
+ DCHECK(IsAligned<kPageSize>(page_begin));
+ DCHECK(IsAligned<kPageSize>(page_begin + size));
+ if (resident) {
+ RawClearMemory(page_begin, page_begin + size);
+#ifdef MADV_FREE
+ bool res = madvise(page_begin, size, MADV_FREE);
+ CHECK_EQ(res, 0) << "madvise failed";
+#endif // MADV_FREE
+ } else {
+ bool res = madvise(page_begin, size, MADV_DONTNEED);
+ CHECK_EQ(res, 0) << "madvise failed";
+ }
+}
+#endif // _WIN32
+
+void ZeroMemory(void* address, size_t length, bool release_eagerly) {
if (length == 0) {
return;
}
@@ -1256,20 +1266,51 @@ void ZeroAndReleasePages(void* address, size_t length) {
uint8_t* const page_end = AlignDown(mem_end, kPageSize);
if (!kMadviseZeroes || page_begin >= page_end) {
// No possible area to madvise.
- std::fill(mem_begin, mem_end, 0);
- } else {
- // Spans one or more pages.
- DCHECK_LE(mem_begin, page_begin);
- DCHECK_LE(page_begin, page_end);
- DCHECK_LE(page_end, mem_end);
- std::fill(mem_begin, page_begin, 0);
+ RawClearMemory(mem_begin, mem_end);
+ return;
+ }
+ // Spans one or more pages.
+ DCHECK_LE(mem_begin, page_begin);
+ DCHECK_LE(page_begin, page_end);
+ DCHECK_LE(page_end, mem_end);
#ifdef _WIN32
- LOG(WARNING) << "ZeroAndReleasePages does not madvise on Windows.";
+ UNUSED(release_eagerly);
+ LOG(WARNING) << "ZeroMemory does not madvise on Windows.";
+ RawClearMemory(mem_begin, mem_end);
#else
- CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
-#endif
- std::fill(page_end, mem_end, 0);
+ RawClearMemory(mem_begin, page_begin);
+ RawClearMemory(page_end, mem_end);
+ if (!release_eagerly) {
+ size_t vec_len = (page_end - page_begin) / kPageSize;
+ std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
+ if (mincore(page_begin, page_end - page_begin, vec.get()) == 0) {
+ uint8_t* current_page = page_begin;
+ size_t current_size = kPageSize;
+ uint32_t old_state = vec[0] & 0x1;
+ for (size_t i = 1; i < vec_len; ++i) {
+ uint32_t new_state = vec[i] & 0x1;
+ if (old_state == new_state) {
+ current_size += kPageSize;
+ } else {
+ ClearMemory(current_page, current_size, old_state);
+ current_page = current_page + current_size;
+ current_size = kPageSize;
+ old_state = new_state;
+ }
+ }
+ ClearMemory(current_page, current_size, old_state);
+ return;
+ }
+ static bool logged_about_mincore = false;
+ if (!logged_about_mincore) {
+ PLOG(WARNING) << "mincore failed, falling back to madvise MADV_DONTNEED";
+ logged_about_mincore = true;
+ }
+ // mincore failed, fall through to MADV_DONTNEED.
}
+ bool res = madvise(page_begin, page_end - page_begin, MADV_DONTNEED);
+ CHECK_NE(res, -1) << "madvise failed";
+#endif
}
void MemMap::AlignBy(size_t alignment, bool align_both_ends) {
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 98fb69d87c..db85f08c08 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -242,7 +242,10 @@ class MemMap {
bool Protect(int prot);
- void MadviseDontNeedAndZero();
+ void FillWithZero(bool release_eagerly);
+ void MadviseDontNeedAndZero() {
+ FillWithZero(/* release_eagerly= */ true);
+ }
int MadviseDontFork();
int GetProtect() const {
@@ -437,8 +440,11 @@ inline void swap(MemMap& lhs, MemMap& rhs) {
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
-// Zero and release pages if possible, no requirements on alignments.
-void ZeroAndReleasePages(void* address, size_t length);
+// Zero and maybe release memory if possible, no requirements on alignments.
+void ZeroMemory(void* address, size_t length, bool release_eagerly);
+inline void ZeroAndReleaseMemory(void* address, size_t length) {
+ ZeroMemory(address, length, /* release_eagerly= */ true);
+}
} // namespace art
diff --git a/runtime/base/gc_visited_arena_pool.cc b/runtime/base/gc_visited_arena_pool.cc
index 52b3829401..039157a30c 100644
--- a/runtime/base/gc_visited_arena_pool.cc
+++ b/runtime/base/gc_visited_arena_pool.cc
@@ -52,7 +52,7 @@ void TrackedArena::Release() {
// MADV_REMOVE fails if invoked on anonymous mapping, which could happen
// if the arena is released before userfaultfd-GC starts using memfd. So
// use MADV_DONTNEED.
- ZeroAndReleasePages(Begin(), Size());
+ ZeroAndReleaseMemory(Begin(), Size());
}
std::fill_n(first_obj_array_.get(), Size() / kPageSize, nullptr);
bytes_allocated_ = 0;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index b8b328c795..248a6f1982 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -106,7 +106,7 @@ void CardTable::ClearCardRange(uint8_t* start, uint8_t* end) {
static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* start_card = CardFromAddr(start);
uint8_t* end_card = CardFromAddr(end);
- ZeroAndReleasePages(start_card, end_card - start_card);
+ ZeroAndReleaseMemory(start_card, end_card - start_card);
}
bool CardTable::AddrIsInCardTable(const void* addr) const {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index a0458d2ae1..d7fc5aeffd 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -148,9 +148,11 @@ std::string SpaceBitmap<kAlignment>::DumpMemAround(mirror::Object* obj) const {
}
template<size_t kAlignment>
-void SpaceBitmap<kAlignment>::Clear() {
+void SpaceBitmap<kAlignment>::Clear(bool release_eagerly) {
if (bitmap_begin_ != nullptr) {
- mem_map_.MadviseDontNeedAndZero();
+ // We currently always eagerly release the memory to the OS.
+ static constexpr bool kAlwaysEagerlyReleaseBitmapMemory = true;
+ mem_map_.FillWithZero(kAlwaysEagerlyReleaseBitmapMemory || release_eagerly);
}
}
@@ -170,7 +172,7 @@ void SpaceBitmap<kAlignment>::ClearRange(const mirror::Object* begin, const mirr
// Bitmap word boundaries.
const uintptr_t start_index = OffsetToIndex(begin_offset);
const uintptr_t end_index = OffsetToIndex(end_offset);
- ZeroAndReleasePages(reinterpret_cast<uint8_t*>(&bitmap_begin_[start_index]),
+ ZeroAndReleaseMemory(reinterpret_cast<uint8_t*>(&bitmap_begin_[start_index]),
(end_index - start_index) * sizeof(*bitmap_begin_));
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index e3189331c4..dbbea3a462 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -102,7 +102,9 @@ class SpaceBitmap {
bool AtomicTestAndSet(const mirror::Object* obj);
// Fill the bitmap with zeroes. Returns the bitmap's memory to the system as a side-effect.
- void Clear();
+ // If `release_eagerly` is true, this method will also try to give back the
+ // memory to the OS eagerly.
+ void Clear(bool release_eagerly = true);
// Clear a range covered by the bitmap using madvise if possible.
void ClearRange(const mirror::Object* begin, const mirror::Object* end);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 18a4edcbef..e6c4d9951d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -397,7 +397,7 @@ void ConcurrentCopying::BindBitmaps() {
// It is OK to clear the bitmap with mutators running since the only place it is read is
// VisitObjects which has exclusion with CC.
region_space_bitmap_ = region_space_->GetMarkBitmap();
- region_space_bitmap_->Clear();
+ region_space_bitmap_->Clear(ShouldEagerlyReleaseMemoryToOS());
}
}
}
@@ -467,7 +467,7 @@ void ConcurrentCopying::InitializePhase() {
LOG(INFO) << "GC end of InitializePhase";
}
if (use_generational_cc_ && !young_gen_) {
- region_space_bitmap_->Clear();
+ region_space_bitmap_->Clear(ShouldEagerlyReleaseMemoryToOS());
}
mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
// Mark all of the zygote large objects without graying them.
@@ -2808,14 +2808,26 @@ void ConcurrentCopying::ReclaimPhase() {
// Cleared bytes and objects, populated by the call to RegionSpace::ClearFromSpace below.
uint64_t cleared_bytes;
uint64_t cleared_objects;
+ bool should_eagerly_release_memory = ShouldEagerlyReleaseMemoryToOS();
{
TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
+ region_space_->ClearFromSpace(&cleared_bytes,
+ &cleared_objects,
+ /*clear_bitmap*/ !young_gen_,
+ should_eagerly_release_memory);
// `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
// RegionSpace::ClearFromSpace may clear empty unevac regions.
CHECK_GE(cleared_bytes, from_bytes);
CHECK_GE(cleared_objects, from_objects);
}
+
+ // If we need to release available memory to the OS, go over all free
+ // regions which the kernel might still cache.
+ if (should_eagerly_release_memory) {
+ TimingLogger::ScopedTiming split4("Release free regions", GetTimings());
+ region_space_->ReleaseFreeRegions();
+ }
+
// freed_bytes could conceivably be negative if we fall back to nonmoving space and have to
// pad to a larger size.
int64_t freed_bytes = (int64_t)cleared_bytes - (int64_t)to_bytes;
@@ -3775,6 +3787,7 @@ void ConcurrentCopying::FinishPhase() {
CHECK(revoked_mark_stacks_.empty());
CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
}
+ bool should_eagerly_release_memory = ShouldEagerlyReleaseMemoryToOS();
// kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
// positives.
if (!kVerifyNoMissingCardMarks && !use_generational_cc_) {
@@ -3782,8 +3795,8 @@ void ConcurrentCopying::FinishPhase() {
// We do not currently use the region space cards at all, madvise them away to save ram.
heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
} else if (use_generational_cc_ && !young_gen_) {
- region_space_inter_region_bitmap_.Clear();
- non_moving_space_inter_region_bitmap_.Clear();
+ region_space_inter_region_bitmap_.Clear(should_eagerly_release_memory);
+ non_moving_space_inter_region_bitmap_.Clear(should_eagerly_release_memory);
}
{
MutexLock mu(self, skipped_blocks_lock_);
@@ -3793,7 +3806,7 @@ void ConcurrentCopying::FinishPhase() {
ReaderMutexLock mu(self, *Locks::mutator_lock_);
{
WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
- heap_->ClearMarkedObjects();
+ heap_->ClearMarkedObjects(should_eagerly_release_memory);
}
if (kUseBakerReadBarrier && kFilterModUnionCards) {
TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 03a432dbf4..10eb4a02db 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -315,6 +315,22 @@ const Iteration* GarbageCollector::GetCurrentIteration() const {
return heap_->GetCurrentGcIteration();
}
+bool GarbageCollector::ShouldEagerlyReleaseMemoryToOS() const {
+ Runtime* runtime = Runtime::Current();
+ // Zygote isn't a memory heavy process, we should always instantly release memory to the OS.
+ if (runtime->IsZygote()) {
+ return true;
+ }
+ if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit) {
+ // Our behavior with explicit GCs is to always release any available memory.
+ return true;
+ }
+ // Keep on the memory if the app is in foreground. If it is in background or
+ // goes into the background (see invocation with cause kGcCauseCollectorTransition),
+ // release the memory.
+ return !runtime->InJankPerceptibleProcessState();
+}
+
void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
GetCurrentIteration()->freed_.Add(freed);
heap_->RecordFree(freed.objects, freed.bytes);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 948a868bd2..694d7a0e2b 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -143,6 +143,8 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
return is_transaction_active_;
}
+ bool ShouldEagerlyReleaseMemoryToOS() const;
+
protected:
// Run all of the GC phases.
virtual void RunPhases() = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index cd5a45b21c..c3c08b12b8 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -3456,7 +3456,7 @@ void MarkCompact::ProcessLinearAlloc() {
// processing any pages in this arena, then we can madvise the shadow size.
// Otherwise, we will double the memory use for linear-alloc.
if (!minor_fault_initialized_ && !others_processing) {
- ZeroAndReleasePages(arena_begin + diff, arena_size);
+ ZeroAndReleaseMemory(arena_begin + diff, arena_size);
}
}
}
@@ -3582,7 +3582,7 @@ void MarkCompact::CompactionPhase() {
// We will only iterate once if gKernelHasFaultRetry is true.
do {
// madvise the page so that we can get userfaults on it.
- ZeroAndReleasePages(conc_compaction_termination_page_, kPageSize);
+ ZeroAndReleaseMemory(conc_compaction_termination_page_, kPageSize);
// The following load triggers 'special' userfaults. When received by the
// thread-pool workers, they will exit out of the compaction task. This fault
// happens because we madvised the page.
@@ -4233,8 +4233,8 @@ void MarkCompact::FinishPhase() {
if (use_uffd_sigbus_ || !minor_fault_initialized_ || !shadow_to_space_map_.IsValid() ||
shadow_to_space_map_.Size() < (moving_first_objs_count_ + black_page_count_) * kPageSize) {
size_t adjustment = use_uffd_sigbus_ ? 0 : kPageSize;
- ZeroAndReleasePages(compaction_buffers_map_.Begin() + adjustment,
- compaction_buffers_map_.Size() - adjustment);
+ ZeroAndReleaseMemory(compaction_buffers_map_.Begin() + adjustment,
+ compaction_buffers_map_.Size() - adjustment);
} else if (shadow_to_space_map_.Size() == bump_pointer_space_->Capacity()) {
// Now that we are going to use minor-faults from next GC cycle, we can
// unmap the buffers used by worker threads.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 381271fded..25397662fe 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4245,16 +4245,16 @@ void Heap::RemoveRememberedSet(space::Space* space) {
CHECK(remembered_sets_.find(space) == remembered_sets_.end());
}
-void Heap::ClearMarkedObjects() {
+void Heap::ClearMarkedObjects(bool release_eagerly) {
// Clear all of the spaces' mark bitmaps.
for (const auto& space : GetContinuousSpaces()) {
if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
- space->GetMarkBitmap()->Clear();
+ space->GetMarkBitmap()->Clear(release_eagerly);
}
}
// Clear the marked objects in the discontinous space object sets.
for (const auto& space : GetDiscontinuousSpaces()) {
- space->GetMarkBitmap()->Clear();
+ space->GetMarkBitmap()->Clear(release_eagerly);
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3f9cb74060..3e7aaf3199 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -389,7 +389,7 @@ class Heap {
// Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
// Mutator lock is required for GetContinuousSpaces.
- void ClearMarkedObjects()
+ void ClearMarkedObjects(bool release_eagerly = true)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 60141d656b..9a66f8a800 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -393,16 +393,30 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table,
evac_region_ = &full_region_;
}
-static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
- ZeroAndReleasePages(begin, end - begin);
+static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end, bool release_eagerly) {
+ ZeroMemory(begin, end - begin, release_eagerly);
if (kProtectClearedRegions) {
CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
}
}
+void RegionSpace::ReleaseFreeRegions() {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0u; i < num_regions_; ++i) {
+ if (regions_[i].IsFree()) {
+ uint8_t* begin = regions_[i].Begin();
+ DCHECK_ALIGNED(begin, kPageSize);
+ DCHECK_ALIGNED(regions_[i].End(), kPageSize);
+ bool res = madvise(begin, regions_[i].End() - begin, MADV_DONTNEED);
+ CHECK_NE(res, -1) << "madvise failed";
+ }
+ }
+}
+
void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
/* out */ uint64_t* cleared_objects,
- const bool clear_bitmap) {
+ const bool clear_bitmap,
+ const bool release_eagerly) {
DCHECK(cleared_bytes != nullptr);
DCHECK(cleared_objects != nullptr);
*cleared_bytes = 0;
@@ -483,7 +497,7 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
// Madvise the memory ranges.
uint64_t start_time = NanoTime();
for (const auto &iter : madvise_list) {
- ZeroAndProtectRegion(iter.first, iter.second);
+ ZeroAndProtectRegion(iter.first, iter.second, release_eagerly);
}
madvise_time_ += NanoTime() - start_time;
@@ -1012,7 +1026,7 @@ void RegionSpace::Region::Clear(bool zero_and_release_pages) {
alloc_time_ = 0;
live_bytes_ = static_cast<size_t>(-1);
if (zero_and_release_pages) {
- ZeroAndProtectRegion(begin_, end_);
+ ZeroAndProtectRegion(begin_, end_, /* release_eagerly= */ true);
}
is_newly_allocated_ = false;
is_a_tlab_ = false;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 27b9e9c367..98f2060df1 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -313,7 +313,8 @@ class RegionSpace final : public ContinuousMemMapAllocSpace {
size_t ToSpaceSize() REQUIRES(!region_lock_);
void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
/* out */ uint64_t* cleared_objects,
- const bool clear_bitmap)
+ const bool clear_bitmap,
+ const bool release_eagerly)
REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
@@ -384,6 +385,8 @@ class RegionSpace final : public ContinuousMemMapAllocSpace {
return madvise_time_;
}
+ void ReleaseFreeRegions();
+
private:
RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);