summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/mark_compact.cc450
-rw-r--r--runtime/gc/collector/mark_compact.h27
-rw-r--r--runtime/gc/space/bump_pointer_space-walk-inl.h43
-rw-r--r--runtime/gc/space/bump_pointer_space.cc6
-rw-r--r--runtime/gc/space/bump_pointer_space.h5
5 files changed, 169 insertions, 362 deletions
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 209c9e32e5..e6ba007e5e 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -113,12 +113,13 @@ static uint64_t gUffdFeatures = 0;
static constexpr uint64_t kUffdFeaturesForMinorFault =
UFFD_FEATURE_MISSING_SHMEM | UFFD_FEATURE_MINOR_SHMEM;
static constexpr uint64_t kUffdFeaturesForSigbus = UFFD_FEATURE_SIGBUS;
-// A region which is more than kBlackDenseRegionThreshold percent live doesn't
-// need to be compacted as it is too densely packed.
-static constexpr uint kBlackDenseRegionThreshold = 85U;
+
// We consider SIGBUS feature necessary to enable this GC as it's superior than
-// threading-based implementation for janks. We may want minor-fault in future
-// to be available for making jit-code-cache updation concurrent, which uses shmem.
+// threading-based implementation for janks. However, since we have the latter
+// already implemented, for testing purposes, we allow choosing either of the
+// two at boot time in the constructor below.
+// We may want minor-fault in future to be available for making jit-code-cache
+// updation concurrent, which uses shmem.
bool KernelSupportsUffd() {
#ifdef __linux__
if (gHaveMremapDontunmap) {
@@ -446,7 +447,6 @@ MarkCompact::MarkCompact(Heap* heap)
moving_space_bitmap_(bump_pointer_space_->GetMarkBitmap()),
moving_space_begin_(bump_pointer_space_->Begin()),
moving_space_end_(bump_pointer_space_->Limit()),
- black_dense_end_(moving_space_begin_),
uffd_(kFdUnused),
sigbus_in_progress_count_{kSigbusCounterCompactionDoneMask, kSigbusCounterCompactionDoneMask},
compacting_(false),
@@ -703,14 +703,10 @@ void MarkCompact::InitializePhase() {
freed_objects_ = 0;
// The first buffer is used by gc-thread.
compaction_buffer_counter_.store(1, std::memory_order_relaxed);
+ from_space_slide_diff_ = from_space_begin_ - bump_pointer_space_->Begin();
black_allocations_begin_ = bump_pointer_space_->Limit();
- DCHECK_EQ(moving_space_begin_, bump_pointer_space_->Begin());
- from_space_slide_diff_ = from_space_begin_ - moving_space_begin_;
+ CHECK_EQ(moving_space_begin_, bump_pointer_space_->Begin());
moving_space_end_ = bump_pointer_space_->Limit();
- if (black_dense_end_ > moving_space_begin_) {
- moving_space_bitmap_->Clear();
- }
- black_dense_end_ = moving_space_begin_;
// TODO: Would it suffice to read it once in the constructor, which is called
// in zygote process?
pointer_size_ = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
@@ -767,48 +763,47 @@ void MarkCompact::RunPhases() {
bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
}
}
- bool perform_compaction;
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
ReclaimPhase();
- perform_compaction = PrepareForCompaction();
+ PrepareForCompaction();
}
- if (perform_compaction) {
+ {
// Compaction pause
ThreadFlipVisitor visitor(this);
FlipCallback callback(this);
runtime->GetThreadList()->FlipThreadRoots(
&visitor, &callback, this, GetHeap()->GetGcPauseListener());
+ }
- if (IsValidFd(uffd_)) {
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- CompactionPhase();
- }
+ if (IsValidFd(uffd_)) {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ CompactionPhase();
}
+
FinishPhase();
thread_running_gc_ = nullptr;
}
-void MarkCompact::InitMovingSpaceFirstObjects(size_t vec_len, size_t to_space_page_idx) {
+void MarkCompact::InitMovingSpaceFirstObjects(const size_t vec_len) {
+ // Find the first live word first.
+ size_t to_space_page_idx = 0;
uint32_t offset_in_chunk_word;
uint32_t offset;
mirror::Object* obj;
const uintptr_t heap_begin = moving_space_bitmap_->HeapBegin();
- // Find the first live word.
- size_t chunk_idx = to_space_page_idx * (gPageSize / kOffsetChunkSize);
- DCHECK_LT(chunk_idx, vec_len);
+ size_t chunk_idx;
// Find the first live word in the space
- for (; chunk_info_vec_[chunk_idx] == 0; chunk_idx++) {
+ for (chunk_idx = 0; chunk_info_vec_[chunk_idx] == 0; chunk_idx++) {
if (chunk_idx >= vec_len) {
// We don't have any live data on the moving-space.
- moving_first_objs_count_ = to_space_page_idx;
return;
}
}
DCHECK_LT(chunk_idx, vec_len);
- // Use live-words bitmap to find the first live word
+ // Use live-words bitmap to find the first word
offset_in_chunk_word = live_words_bitmap_->FindNthLiveWordOffset(chunk_idx, /*n*/ 0);
offset = chunk_idx * kBitsPerVectorWord + offset_in_chunk_word;
DCHECK(live_words_bitmap_->Test(offset)) << "offset=" << offset
@@ -817,7 +812,8 @@ void MarkCompact::InitMovingSpaceFirstObjects(size_t vec_len, size_t to_space_pa
<< " offset_in_word=" << offset_in_chunk_word
<< " word=" << std::hex
<< live_words_bitmap_->GetWord(chunk_idx);
- obj = moving_space_bitmap_->FindPrecedingObject(heap_begin + offset * kAlignment);
+ // The first object doesn't require using FindPrecedingObject().
+ obj = reinterpret_cast<mirror::Object*>(heap_begin + offset * kAlignment);
// TODO: add a check to validate the object.
pre_compact_offset_moving_space_[to_space_page_idx] = offset;
@@ -866,10 +862,10 @@ void MarkCompact::InitMovingSpaceFirstObjects(size_t vec_len, size_t to_space_pa
}
}
-size_t MarkCompact::InitNonMovingFirstObjects(uintptr_t begin,
- uintptr_t end,
- accounting::ContinuousSpaceBitmap* bitmap,
- ObjReference* first_objs_arr) {
+void MarkCompact::InitNonMovingSpaceFirstObjects() {
+ accounting::ContinuousSpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
+ uintptr_t begin = reinterpret_cast<uintptr_t>(non_moving_space_->Begin());
+ const uintptr_t end = reinterpret_cast<uintptr_t>(non_moving_space_->End());
mirror::Object* prev_obj;
size_t page_idx;
{
@@ -881,11 +877,11 @@ size_t MarkCompact::InitNonMovingFirstObjects(uintptr_t begin,
obj = o;
});
if (obj == nullptr) {
- // There are no live objects in the space
- return 0;
+ // There are no live objects in the non-moving space
+ return;
}
page_idx = DivideByPageSize(reinterpret_cast<uintptr_t>(obj) - begin);
- first_objs_arr[page_idx++].Assign(obj);
+ first_objs_non_moving_space_[page_idx++].Assign(obj);
prev_obj = obj;
}
// TODO: check obj is valid
@@ -900,7 +896,13 @@ size_t MarkCompact::InitNonMovingFirstObjects(uintptr_t begin,
// overlaps with this page as well.
if (prev_obj != nullptr && prev_obj_end > begin) {
DCHECK_LT(prev_obj, reinterpret_cast<mirror::Object*>(begin));
- first_objs_arr[page_idx].Assign(prev_obj);
+ first_objs_non_moving_space_[page_idx].Assign(prev_obj);
+ mirror::Class* klass = prev_obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ if (HasAddress(klass)) {
+ LOG(WARNING) << "found inter-page object " << prev_obj
+ << " in non-moving space with klass " << klass
+ << " in moving space";
+ }
} else {
prev_obj_end = 0;
// It's sufficient to only search for previous object in the preceding page.
@@ -913,13 +915,21 @@ size_t MarkCompact::InitNonMovingFirstObjects(uintptr_t begin,
+ RoundUp(prev_obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
}
if (prev_obj_end > begin) {
- first_objs_arr[page_idx].Assign(prev_obj);
+ mirror::Class* klass = prev_obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ if (HasAddress(klass)) {
+ LOG(WARNING) << "found inter-page object " << prev_obj
+ << " in non-moving space with klass " << klass
+ << " in moving space";
+ }
+ first_objs_non_moving_space_[page_idx].Assign(prev_obj);
} else {
// Find the first live object in this page
bitmap->VisitMarkedRange</*kVisitOnce*/ true>(
- begin, begin + gPageSize, [first_objs_arr, page_idx](mirror::Object* obj) {
- first_objs_arr[page_idx].Assign(obj);
- });
+ begin,
+ begin + gPageSize,
+ [this, page_idx] (mirror::Object* obj) {
+ first_objs_non_moving_space_[page_idx].Assign(obj);
+ });
}
// An empty entry indicates that the page has no live objects and hence
// can be skipped.
@@ -927,23 +937,20 @@ size_t MarkCompact::InitNonMovingFirstObjects(uintptr_t begin,
begin += gPageSize;
page_idx++;
}
- return page_idx;
+ non_moving_first_objs_count_ = page_idx;
}
-bool MarkCompact::PrepareForCompaction() {
+void MarkCompact::PrepareForCompaction() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- size_t chunk_info_per_page = gPageSize / kOffsetChunkSize;
- size_t vector_len = (black_allocations_begin_ - moving_space_begin_) / kOffsetChunkSize;
+ uint8_t* space_begin = bump_pointer_space_->Begin();
+ size_t vector_len = (black_allocations_begin_ - space_begin) / kOffsetChunkSize;
DCHECK_LE(vector_len, vector_length_);
- DCHECK_ALIGNED_PARAM(vector_length_, chunk_info_per_page);
- if (UNLIKELY(vector_len == 0)) {
- // Nothing to compact.
- return false;
- }
for (size_t i = 0; i < vector_len; i++) {
DCHECK_LE(chunk_info_vec_[i], kOffsetChunkSize);
DCHECK_EQ(chunk_info_vec_[i], live_words_bitmap_->LiveBytesInBitmapWord(i));
}
+ InitMovingSpaceFirstObjects(vector_len);
+ InitNonMovingSpaceFirstObjects();
// TODO: We can do a lot of neat tricks with this offset vector to tune the
// compaction as we wish. Originally, the compaction algorithm slides all
@@ -967,106 +974,29 @@ bool MarkCompact::PrepareForCompaction() {
// of the corresponding chunk. For old-to-new address computation we need
// every element to reflect total live-bytes till the corresponding chunk.
- size_t black_dense_idx = 0;
- GcCause gc_cause = GetCurrentIteration()->GetGcCause();
- if (gc_cause != kGcCauseExplicit && gc_cause != kGcCauseCollectorTransition &&
- !GetCurrentIteration()->GetClearSoftReferences()) {
- size_t live_bytes = 0, total_bytes = 0;
- size_t aligned_vec_len = RoundUp(vector_len, chunk_info_per_page);
- size_t num_pages = aligned_vec_len / chunk_info_per_page;
- size_t threshold_passing_marker = 0; // In number of pages
- std::vector<uint32_t> pages_live_bytes;
- pages_live_bytes.reserve(num_pages);
- // Identify the largest chunk towards the beginning of moving space which
- // passes the black-dense threshold.
- for (size_t i = 0; i < aligned_vec_len; i += chunk_info_per_page) {
- uint32_t page_live_bytes = 0;
- for (size_t j = 0; j < chunk_info_per_page; j++) {
- page_live_bytes += chunk_info_vec_[i + j];
- total_bytes += kOffsetChunkSize;
- }
- live_bytes += page_live_bytes;
- pages_live_bytes.push_back(page_live_bytes);
- if (live_bytes * 100U >= total_bytes * kBlackDenseRegionThreshold) {
- threshold_passing_marker = pages_live_bytes.size();
- }
- }
- DCHECK_EQ(pages_live_bytes.size(), num_pages);
- // Eliminate the pages at the end of the chunk which are lower than the threshold.
- if (threshold_passing_marker > 0) {
- auto iter = std::find_if(
- pages_live_bytes.rbegin() + (num_pages - threshold_passing_marker),
- pages_live_bytes.rend(),
- [](uint32_t bytes) { return bytes * 100U < gPageSize * kBlackDenseRegionThreshold; });
- black_dense_idx = (pages_live_bytes.rend() - iter) * chunk_info_per_page;
- }
- black_dense_end_ = moving_space_begin_ + black_dense_idx * kOffsetChunkSize;
- DCHECK_ALIGNED_PARAM(black_dense_end_, gPageSize);
-
- // Adjust for class allocated after black_dense_end_ while its object(s)
- // are earlier. This is required as we update the references in the
- // black-dense region in-place. And if the class pointer of some first
- // object for a page, which started in some preceding page, is already
- // updated, then we will read wrong class data like ref-offset bitmap.
- for (auto iter = class_after_obj_map_.rbegin();
- iter != class_after_obj_map_.rend() &&
- reinterpret_cast<uint8_t*>(iter->first.AsMirrorPtr()) >= black_dense_end_;
- iter++) {
- black_dense_end_ =
- std::min(black_dense_end_, reinterpret_cast<uint8_t*>(iter->second.AsMirrorPtr()));
- black_dense_end_ = AlignDown(black_dense_end_, gPageSize);
- }
- black_dense_idx = (black_dense_end_ - moving_space_begin_) / kOffsetChunkSize;
- DCHECK_LE(black_dense_idx, vector_len);
- if (black_dense_idx == vector_len) {
- // There is nothing to compact.
- return false;
- }
- InitNonMovingFirstObjects(reinterpret_cast<uintptr_t>(moving_space_begin_),
- reinterpret_cast<uintptr_t>(black_dense_end_),
- moving_space_bitmap_,
- first_objs_moving_space_);
- }
-
- InitMovingSpaceFirstObjects(vector_len, black_dense_idx / chunk_info_per_page);
- non_moving_first_objs_count_ =
- InitNonMovingFirstObjects(reinterpret_cast<uintptr_t>(non_moving_space_->Begin()),
- reinterpret_cast<uintptr_t>(non_moving_space_->End()),
- non_moving_space_->GetLiveBitmap(),
- first_objs_non_moving_space_);
+ // Live-bytes count is required to compute post_compact_end_ below.
+ uint32_t total;
// Update the vector one past the heap usage as it is required for black
// allocated objects' post-compact address computation.
- uint32_t total_bytes;
if (vector_len < vector_length_) {
vector_len++;
- total_bytes = 0;
+ total = 0;
} else {
// Fetch the value stored in the last element before it gets overwritten by
// std::exclusive_scan().
- total_bytes = chunk_info_vec_[vector_len - 1];
- }
- std::exclusive_scan(chunk_info_vec_ + black_dense_idx,
- chunk_info_vec_ + vector_len,
- chunk_info_vec_ + black_dense_idx,
- black_dense_idx * kOffsetChunkSize);
- total_bytes += chunk_info_vec_[vector_len - 1];
- post_compact_end_ = AlignUp(moving_space_begin_ + total_bytes, gPageSize);
- CHECK_EQ(post_compact_end_, moving_space_begin_ + moving_first_objs_count_ * gPageSize)
- << "moving_first_objs_count_:" << moving_first_objs_count_
- << " black_dense_idx:" << black_dense_idx << " vector_len:" << vector_len
- << " total_bytes:" << total_bytes
- << " black_dense_end:" << reinterpret_cast<void*>(black_dense_end_)
- << " chunk_info_per_page:" << chunk_info_per_page;
- black_objs_slide_diff_ = black_allocations_begin_ - post_compact_end_;
- // We shouldn't be consuming more space after compaction than pre-compaction.
- CHECK_GE(black_objs_slide_diff_, 0);
- if (black_objs_slide_diff_ == 0) {
- return false;
+ total = chunk_info_vec_[vector_len - 1];
}
+ std::exclusive_scan(chunk_info_vec_, chunk_info_vec_ + vector_len, chunk_info_vec_, 0);
+ total += chunk_info_vec_[vector_len - 1];
+
for (size_t i = vector_len; i < vector_length_; i++) {
DCHECK_EQ(chunk_info_vec_[i], 0u);
}
-
+ post_compact_end_ = AlignUp(space_begin + total, gPageSize);
+ CHECK_EQ(post_compact_end_, space_begin + moving_first_objs_count_ * gPageSize);
+ black_objs_slide_diff_ = black_allocations_begin_ - post_compact_end_;
+ // We shouldn't be consuming more space after compaction than pre-compaction.
+ CHECK_GE(black_objs_slide_diff_, 0);
// How do we handle compaction of heap portion used for allocations after the
// marking-pause?
// All allocations after the marking-pause are considered black (reachable)
@@ -1081,7 +1011,6 @@ bool MarkCompact::PrepareForCompaction() {
if (!uffd_initialized_) {
CreateUserfaultfd(/*post_fork=*/false);
}
- return true;
}
class MarkCompact::VerifyRootMarkedVisitor : public SingleRootVisitor {
@@ -1262,7 +1191,7 @@ class MarkCompact::RefsUpdateVisitor {
uint8_t* begin,
uint8_t* end)
: collector_(collector),
- moving_space_begin_(collector->black_dense_end_),
+ moving_space_begin_(collector->moving_space_begin_),
moving_space_end_(collector->moving_space_end_),
obj_(obj),
begin_(begin),
@@ -1965,33 +1894,25 @@ bool MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode, size_t end_i
}
} else {
DCHECK_GE(pre_compact_offset_moving_space_[idx], 0u);
- idx_addr = moving_space_begin_ + idx * gPageSize;
- if (idx_addr >= black_dense_end_) {
- idx_addr = moving_space_begin_ + pre_compact_offset_moving_space_[idx] * kAlignment;
- }
+ idx_addr = bump_pointer_space_->Begin() + pre_compact_offset_moving_space_[idx] * kAlignment;
reclaim_begin = idx_addr;
DCHECK_LE(reclaim_begin, black_allocations_begin_);
mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
- if (first_obj != nullptr) {
- if (reinterpret_cast<uint8_t*>(first_obj) < reclaim_begin) {
- DCHECK_LT(idx, moving_first_objs_count_);
- mirror::Object* obj = first_obj;
- for (size_t i = idx + 1; i < moving_first_objs_count_; i++) {
- obj = first_objs_moving_space_[i].AsMirrorPtr();
- if (obj == nullptr) {
- reclaim_begin = moving_space_begin_ + i * gPageSize;
- break;
- } else if (first_obj != obj) {
- DCHECK_LT(first_obj, obj);
- DCHECK_LT(reclaim_begin, reinterpret_cast<uint8_t*>(obj));
- reclaim_begin = reinterpret_cast<uint8_t*>(obj);
- break;
- }
- }
- if (obj == first_obj) {
- reclaim_begin = black_allocations_begin_;
+ if (reinterpret_cast<uint8_t*>(first_obj) < reclaim_begin) {
+ DCHECK_LT(idx, moving_first_objs_count_);
+ mirror::Object* obj = first_obj;
+ for (size_t i = idx + 1; i < moving_first_objs_count_; i++) {
+ obj = first_objs_moving_space_[i].AsMirrorPtr();
+ if (first_obj != obj) {
+ DCHECK_LT(first_obj, obj);
+ DCHECK_LT(reclaim_begin, reinterpret_cast<uint8_t*>(obj));
+ reclaim_begin = reinterpret_cast<uint8_t*>(obj);
+ break;
}
}
+ if (obj == first_obj) {
+ reclaim_begin = black_allocations_begin_;
+ }
}
reclaim_begin = AlignUp(reclaim_begin, gPageSize);
}
@@ -2052,7 +1973,8 @@ bool MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode, size_t end_i
cur_reclaimable_page_ = addr;
}
}
- last_reclaimable_page_ = std::min(reclaim_begin, last_reclaimable_page_);
+ CHECK_LE(reclaim_begin, last_reclaimable_page_);
+ last_reclaimable_page_ = reclaim_begin;
last_checked_reclaim_page_idx_ = idx;
return all_mapped;
}
@@ -2072,8 +1994,7 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
size_t page_status_arr_len = moving_first_objs_count_ + black_page_count_;
size_t idx = page_status_arr_len;
- size_t black_dense_end_idx = (black_dense_end_ - moving_space_begin_) / gPageSize;
- uint8_t* to_space_end = moving_space_begin_ + page_status_arr_len * gPageSize;
+ uint8_t* to_space_end = bump_pointer_space_->Begin() + page_status_arr_len * gPageSize;
uint8_t* pre_compact_page = black_allocations_begin_ + (black_page_count_ * gPageSize);
DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
@@ -2120,7 +2041,7 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
// Reserved page to be used if we can't find any reclaimable page for processing.
uint8_t* reserve_page = page;
size_t end_idx_for_mapping = idx;
- while (idx > black_dense_end_idx) {
+ while (idx > 0) {
idx--;
to_space_end -= gPageSize;
if (kMode == kFallbackMode) {
@@ -2156,33 +2077,6 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
end_idx_for_mapping = idx;
}
}
- while (idx > 0) {
- idx--;
- to_space_end -= gPageSize;
- mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
- if (first_obj != nullptr) {
- DoPageCompactionWithStateChange<kMode>(
- idx,
- to_space_end,
- to_space_end + from_space_slide_diff_,
- /*map_immediately=*/false,
- [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
- UpdateNonMovingPage(
- first_obj, to_space_end, from_space_slide_diff_, moving_space_bitmap_);
- });
- } else {
- // The page has no reachable object on it. Just declare it mapped.
- // Mutators shouldn't step on this page, which is asserted in sigbus
- // handler.
- DCHECK_EQ(moving_pages_status_[idx].load(std::memory_order_relaxed),
- static_cast<uint8_t>(PageState::kUnprocessed));
- moving_pages_status_[idx].store(static_cast<uint8_t>(PageState::kProcessedAndMapped),
- std::memory_order_release);
- }
- if (FreeFromSpacePages(idx, kMode, end_idx_for_mapping)) {
- end_idx_for_mapping = idx;
- }
- }
// map one last time to finish anything left.
if (kMode == kCopyMode && end_idx_for_mapping > 0) {
MapMovingSpacePages(idx,
@@ -2206,13 +2100,12 @@ size_t MarkCompact::MapMovingSpacePages(size_t start_idx,
size_t map_count = 0;
uint32_t cur_state = moving_pages_status_[arr_idx].load(std::memory_order_acquire);
// Find a contiguous range that can be mapped with single ioctl.
- for (uint32_t i = arr_idx, from_page = cur_state & ~kPageStateMask; i < arr_len;
- i++, map_count++, from_page += gPageSize) {
+ for (size_t i = arr_idx; i < arr_len; i++, map_count++) {
uint32_t s = moving_pages_status_[i].load(std::memory_order_acquire);
- uint32_t cur_from_page = s & ~kPageStateMask;
- if (GetPageStateFromWord(s) != PageState::kProcessed || cur_from_page != from_page) {
+ if (GetPageStateFromWord(s) != PageState::kProcessed) {
break;
}
+ DCHECK_EQ((cur_state & ~kPageStateMask) + (i - arr_idx) * gPageSize, s & ~kPageStateMask);
}
if (map_count == 0) {
@@ -2269,10 +2162,7 @@ size_t MarkCompact::MapMovingSpacePages(size_t start_idx,
return arr_len - start_idx;
}
-void MarkCompact::UpdateNonMovingPage(mirror::Object* first,
- uint8_t* page,
- ptrdiff_t from_space_diff,
- accounting::ContinuousSpaceBitmap* bitmap) {
+void MarkCompact::UpdateNonMovingPage(mirror::Object* first, uint8_t* page) {
DCHECK_LT(reinterpret_cast<uint8_t*>(first), page + gPageSize);
// For every object found in the page, visit the previous object. This ensures
// that we can visit without checking page-end boundary.
@@ -2281,43 +2171,41 @@ void MarkCompact::UpdateNonMovingPage(mirror::Object* first,
// TODO: Set kVisitNativeRoots to false once we implement concurrent
// compaction
mirror::Object* curr_obj = first;
- uint8_t* from_page = page + from_space_diff;
- uint8_t* from_page_end = from_page + gPageSize;
- bitmap->VisitMarkedRange(
- reinterpret_cast<uintptr_t>(first) + mirror::kObjectHeaderSize,
- reinterpret_cast<uintptr_t>(page + gPageSize),
- [&](mirror::Object* next_obj) {
- mirror::Object* from_obj = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uint8_t*>(curr_obj) + from_space_diff);
- if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
- RefsUpdateVisitor</*kCheckBegin*/ true, /*kCheckEnd*/ false> visitor(
- this, from_obj, from_page, from_page_end);
- MemberOffset begin_offset(page - reinterpret_cast<uint8_t*>(curr_obj));
- // Native roots shouldn't be visited as they are done when this
- // object's beginning was visited in the preceding page.
- from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false, /*kVisitNativeRoots*/ false>(
- visitor, begin_offset, MemberOffset(-1));
- } else {
- RefsUpdateVisitor</*kCheckBegin*/ false, /*kCheckEnd*/ false> visitor(
- this, from_obj, from_page, from_page_end);
- from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(
- visitor, MemberOffset(0), MemberOffset(-1));
- }
- curr_obj = next_obj;
- });
+ non_moving_space_bitmap_->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(first) + mirror::kObjectHeaderSize,
+ reinterpret_cast<uintptr_t>(page + gPageSize),
+ [&](mirror::Object* next_obj) {
+ // TODO: Once non-moving space update becomes concurrent, we'll
+ // require fetching the from-space address of 'curr_obj' and then call
+ // visitor on that.
+ if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/false>
+ visitor(this, curr_obj, page, page + gPageSize);
+ MemberOffset begin_offset(page - reinterpret_cast<uint8_t*>(curr_obj));
+ // Native roots shouldn't be visited as they are done when this
+ // object's beginning was visited in the preceding page.
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false, /*kVisitNativeRoots*/false>(
+ visitor, begin_offset, MemberOffset(-1));
+ } else {
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false>
+ visitor(this, curr_obj, page, page + gPageSize);
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
+ MemberOffset(0),
+ MemberOffset(-1));
+ }
+ curr_obj = next_obj;
+ });
- mirror::Object* from_obj =
- reinterpret_cast<mirror::Object*>(reinterpret_cast<uint8_t*>(curr_obj) + from_space_diff);
MemberOffset end_offset(page + gPageSize - reinterpret_cast<uint8_t*>(curr_obj));
if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
- RefsUpdateVisitor</*kCheckBegin*/ true, /*kCheckEnd*/ true> visitor(
- this, from_obj, from_page, from_page_end);
- from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false, /*kVisitNativeRoots*/ false>(
- visitor, MemberOffset(page - reinterpret_cast<uint8_t*>(curr_obj)), end_offset);
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true>
+ visitor(this, curr_obj, page, page + gPageSize);
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false, /*kVisitNativeRoots*/false>(
+ visitor, MemberOffset(page - reinterpret_cast<uint8_t*>(curr_obj)), end_offset);
} else {
- RefsUpdateVisitor</*kCheckBegin*/ false, /*kCheckEnd*/ true> visitor(
- this, from_obj, from_page, from_page_end);
- from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(visitor, MemberOffset(0), end_offset);
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true>
+ visitor(this, curr_obj, page, page + gPageSize);
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor, MemberOffset(0), end_offset);
}
}
@@ -2335,7 +2223,7 @@ void MarkCompact::UpdateNonMovingSpace() {
page -= gPageSize;
// null means there are no objects on the page to update references.
if (obj != nullptr) {
- UpdateNonMovingPage(obj, page, /*from_space_diff=*/0, non_moving_space_bitmap_);
+ UpdateNonMovingPage(obj, page);
}
}
}
@@ -2554,7 +2442,7 @@ class MarkCompact::ClassLoaderRootsUpdater : public ClassLoaderVisitor {
public:
explicit ClassLoaderRootsUpdater(MarkCompact* collector)
: collector_(collector),
- moving_space_begin_(collector->black_dense_end_),
+ moving_space_begin_(collector->moving_space_begin_),
moving_space_end_(collector->moving_space_end_) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
@@ -2589,7 +2477,7 @@ class MarkCompact::LinearAllocPageUpdater {
public:
explicit LinearAllocPageUpdater(MarkCompact* collector)
: collector_(collector),
- moving_space_begin_(collector->black_dense_end_),
+ moving_space_begin_(collector->moving_space_begin_),
moving_space_end_(collector->moving_space_end_),
last_page_touched_(false) {}
@@ -3123,7 +3011,6 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
DCHECK_LT(page_idx, moving_first_objs_count_ + black_page_count_);
mirror::Object* first_obj = first_objs_moving_space_[page_idx].AsMirrorPtr();
if (first_obj == nullptr) {
- DCHECK_GT(fault_page, post_compact_end_);
// Install zero-page in the entire remaining tlab to avoid multiple ioctl invocations.
uint8_t* end = AlignDown(self->GetTlabEnd(), gPageSize);
if (fault_page < self->GetTlabStart() || fault_page >= end) {
@@ -3171,42 +3058,37 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
raw_state,
static_cast<uint8_t>(PageState::kMutatorProcessing),
std::memory_order_acquire)) {
- if (fault_page < black_dense_end_) {
- UpdateNonMovingPage(first_obj, fault_page, from_space_slide_diff_, moving_space_bitmap_);
- buf = fault_page + from_space_slide_diff_;
- } else {
- if (UNLIKELY(buf == nullptr)) {
- uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed);
- // The buffer-map is one page bigger as the first buffer is used by GC-thread.
- CHECK_LE(idx, kMutatorCompactionBufferCount);
- buf = compaction_buffers_map_.Begin() + idx * gPageSize;
- DCHECK(compaction_buffers_map_.HasAddress(buf));
- self->SetThreadLocalGcBuffer(buf);
- }
+ if (UNLIKELY(buf == nullptr)) {
+ uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed);
+ // The buffer-map is one page bigger as the first buffer is used by GC-thread.
+ CHECK_LE(idx, kMutatorCompactionBufferCount);
+ buf = compaction_buffers_map_.Begin() + idx * gPageSize;
+ DCHECK(compaction_buffers_map_.HasAddress(buf));
+ self->SetThreadLocalGcBuffer(buf);
+ }
- if (fault_page < post_compact_end_) {
- // The page has to be compacted.
- CompactPage(first_obj,
- pre_compact_offset_moving_space_[page_idx],
- buf,
- /*needs_memset_zero=*/true);
- } else {
- DCHECK_NE(first_obj, nullptr);
- DCHECK_GT(pre_compact_offset_moving_space_[page_idx], 0u);
- uint8_t* pre_compact_page = black_allocations_begin_ + (fault_page - post_compact_end_);
- uint32_t first_chunk_size = black_alloc_pages_first_chunk_size_[page_idx];
- mirror::Object* next_page_first_obj = nullptr;
- if (page_idx + 1 < moving_first_objs_count_ + black_page_count_) {
- next_page_first_obj = first_objs_moving_space_[page_idx + 1].AsMirrorPtr();
- }
- DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
- SlideBlackPage(first_obj,
- next_page_first_obj,
- first_chunk_size,
- pre_compact_page,
- buf,
- /*needs_memset_zero=*/true);
+ if (fault_page < post_compact_end_) {
+ // The page has to be compacted.
+ CompactPage(first_obj,
+ pre_compact_offset_moving_space_[page_idx],
+ buf,
+ /*needs_memset_zero=*/true);
+ } else {
+ DCHECK_NE(first_obj, nullptr);
+ DCHECK_GT(pre_compact_offset_moving_space_[page_idx], 0u);
+ uint8_t* pre_compact_page = black_allocations_begin_ + (fault_page - post_compact_end_);
+ uint32_t first_chunk_size = black_alloc_pages_first_chunk_size_[page_idx];
+ mirror::Object* next_page_first_obj = nullptr;
+ if (page_idx + 1 < moving_first_objs_count_ + black_page_count_) {
+ next_page_first_obj = first_objs_moving_space_[page_idx + 1].AsMirrorPtr();
}
+ DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
+ SlideBlackPage(first_obj,
+ next_page_first_obj,
+ first_chunk_size,
+ pre_compact_page,
+ buf,
+ /*needs_memset_zero=*/true);
}
// Nobody else would simultaneously modify this page's state so an
// atomic store is sufficient. Use 'release' order to guarantee that
@@ -4125,7 +4007,7 @@ void MarkCompact::VisitRoots(mirror::Object*** roots,
size_t count,
const RootInfo& info) {
if (compacting_) {
- uint8_t* moving_space_begin = black_dense_end_;
+ uint8_t* moving_space_begin = moving_space_begin_;
uint8_t* moving_space_end = moving_space_end_;
for (size_t i = 0; i < count; ++i) {
UpdateRoot(roots[i], moving_space_begin, moving_space_end, info);
@@ -4142,7 +4024,7 @@ void MarkCompact::VisitRoots(mirror::CompressedReference<mirror::Object>** roots
const RootInfo& info) {
// TODO: do we need to check if the root is null or not?
if (compacting_) {
- uint8_t* moving_space_begin = black_dense_end_;
+ uint8_t* moving_space_begin = moving_space_begin_;
uint8_t* moving_space_end = moving_space_end_;
for (size_t i = 0; i < count; ++i) {
UpdateRoot(roots[i], moving_space_begin, moving_space_end, info);
@@ -4160,12 +4042,8 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
if (compacting_) {
if (is_black) {
return PostCompactBlackObjAddr(obj);
- } else if (moving_space_bitmap_->Test(obj)) {
- if (reinterpret_cast<uint8_t*>(obj) < black_dense_end_) {
- return obj;
- } else {
- return PostCompactOldObjAddr(obj);
- }
+ } else if (live_words_bitmap_->Test(obj)) {
+ return PostCompactOldObjAddr(obj);
} else {
return nullptr;
}
@@ -4225,15 +4103,11 @@ void MarkCompact::FinishPhase() {
ZeroAndReleaseMemory(compaction_buffers_map_.Begin(), compaction_buffers_map_.Size());
info_map_.MadviseDontNeedAndZero();
live_words_bitmap_->ClearBitmap();
- if (moving_space_begin_ == black_dense_end_) {
- moving_space_bitmap_->Clear();
- } else {
- DCHECK_LT(moving_space_begin_, black_dense_end_);
- DCHECK_LE(black_dense_end_, moving_space_end_);
- moving_space_bitmap_->ClearRange(reinterpret_cast<mirror::Object*>(black_dense_end_),
- reinterpret_cast<mirror::Object*>(moving_space_end_));
- }
- bump_pointer_space_->SetBlackDenseRegionSize(black_dense_end_ - moving_space_begin_);
+ // TODO: We can clear this bitmap right before compaction pause. But in that
+ // case we need to ensure that we don't assert on this bitmap afterwards.
+ // Also, we would still need to clear it here again as we may have to use the
+ // bitmap for black-allocations (see UpdateMovingSpaceBlackAllocations()).
+ moving_space_bitmap_->Clear();
if (UNLIKELY(is_zygote && IsValidFd(uffd_))) {
// This unregisters all ranges as a side-effect.
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index dd9fefb2a9..56a6a17196 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -330,10 +330,8 @@ class MarkCompact final : public GarbageCollector {
// on the fly.
void CompactionPause() REQUIRES(Locks::mutator_lock_);
// Compute offsets (in chunk_info_vec_) and other data structures required
- // during concurrent compaction. Also determines a black-dense region at the
- // beginning of the moving space which is not compacted. Returns false if
- // performing compaction isn't required.
- bool PrepareForCompaction() REQUIRES_SHARED(Locks::mutator_lock_);
+ // during concurrent compaction.
+ void PrepareForCompaction() REQUIRES_SHARED(Locks::mutator_lock_);
// Copy gPageSize live bytes starting from 'offset' (within the moving space),
// which must be within 'obj', into the gPageSize sized memory pointed by 'addr'.
@@ -357,30 +355,22 @@ class MarkCompact final : public GarbageCollector {
CompactionFn func)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Update all the objects in the given non-moving page. 'first' object
+ // Update all the objects in the given non-moving space page. 'first' object
// could have started in some preceding page.
- void UpdateNonMovingPage(mirror::Object* first,
- uint8_t* page,
- ptrdiff_t from_space_diff,
- accounting::ContinuousSpaceBitmap* bitmap)
+ void UpdateNonMovingPage(mirror::Object* first, uint8_t* page)
REQUIRES_SHARED(Locks::mutator_lock_);
// Update all the references in the non-moving space.
void UpdateNonMovingSpace() REQUIRES_SHARED(Locks::mutator_lock_);
// For all the pages in non-moving space, find the first object that overlaps
// with the pages' start address, and store in first_objs_non_moving_space_ array.
- size_t InitNonMovingFirstObjects(uintptr_t begin,
- uintptr_t end,
- accounting::ContinuousSpaceBitmap* bitmap,
- ObjReference* first_objs_arr)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ void InitNonMovingSpaceFirstObjects() REQUIRES_SHARED(Locks::mutator_lock_);
// In addition to the first-objects for every post-compact moving space page,
// also find offsets within those objects from where the contents should be
// copied to the page. The offsets are relative to the moving-space's
// beginning. Store the computed first-object and offset in first_objs_moving_space_
// and pre_compact_offset_moving_space_ respectively.
- void InitMovingSpaceFirstObjects(size_t vec_len, size_t to_space_page_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ void InitMovingSpaceFirstObjects(const size_t vec_len) REQUIRES_SHARED(Locks::mutator_lock_);
// Gather the info related to black allocations from bump-pointer space to
// enable concurrent sliding of these pages.
@@ -741,11 +731,6 @@ class MarkCompact final : public GarbageCollector {
// clamped.
uint8_t* const moving_space_begin_;
uint8_t* moving_space_end_;
- // Set to moving_space_begin_ if compacting the entire moving space.
- // Otherwise, set to a page-aligned address such that [moving_space_begin_,
- // black_dense_end_) is considered to be densely populated with reachable
- // objects and hence is not compacted.
- uint8_t* black_dense_end_;
// moving-space's end pointer at the marking pause. All allocations beyond
// this will be considered black in the current GC cycle. Aligned up to page
// size.
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
index 38e02d5829..86af0451de 100644
--- a/runtime/gc/space/bump_pointer_space-walk-inl.h
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -34,7 +34,6 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) {
uint8_t* pos = Begin();
uint8_t* end = End();
uint8_t* main_end = pos;
- size_t black_dense_size;
std::unique_ptr<std::vector<size_t>> block_sizes_copy;
// Internal indirection w/ NO_THREAD_SAFETY_ANALYSIS. Optimally, we'd like to have an annotation
// like
@@ -65,30 +64,6 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) {
} else {
block_sizes_copy.reset(new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end()));
}
-
- black_dense_size = black_dense_region_size_;
- }
-
- // black_dense_region_size_ will be non-zero only in case of moving-space of CMC GC.
- if (black_dense_size > 0) {
- // Objects are not packed in this case, and therefore the bitmap is needed
- // to walk this part of the space.
- // Remember the last object visited using bitmap to be able to fetch its size.
- mirror::Object* last_obj = nullptr;
- auto return_obj_visit = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
- visitor(obj);
- last_obj = obj;
- };
- GetMarkBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(pos),
- reinterpret_cast<uintptr_t>(pos + black_dense_size),
- return_obj_visit);
- pos += black_dense_size;
- if (last_obj != nullptr) {
- // If the last object visited using bitmap was large enough to go past the
- // black-dense region, then we need to adjust for that to be able to visit
- // objects one after the other below.
- pos = std::max(pos, reinterpret_cast<uint8_t*>(GetNextObject(last_obj)));
- }
}
// Walk all of the objects in the main block first.
while (pos < main_end) {
@@ -106,23 +81,7 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) {
}
// Walk the other blocks (currently only TLABs).
if (block_sizes_copy != nullptr) {
- size_t iter = 0;
- size_t num_blks = block_sizes_copy->size();
- // Skip blocks which are already visited above as part of black-dense region.
- for (uint8_t* ptr = main_end; iter < num_blks; iter++) {
- size_t block_size = (*block_sizes_copy)[iter];
- ptr += block_size;
- if (ptr > pos) {
- // Adjust block-size in case 'pos' is in the middle of the block.
- if (static_cast<ssize_t>(block_size) > ptr - pos) {
- (*block_sizes_copy)[iter] -= ptr - pos;
- }
- break;
- }
- }
-
- for (; iter < num_blks; iter++) {
- size_t block_size = (*block_sizes_copy)[iter];
+ for (size_t block_size : *block_sizes_copy) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index ad9c68da3e..17e08357e8 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -303,12 +303,6 @@ void BumpPointerSpace::SetBlockSizes(Thread* self,
end_.store(Begin() + size, std::memory_order_relaxed);
}
-void BumpPointerSpace::SetBlackDenseRegionSize(size_t size) {
- DCHECK_ALIGNED_PARAM(size, gPageSize);
- MutexLock mu(Thread::Current(), lock_);
- black_dense_region_size_ = size;
-}
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 7dad977c44..77beb476e6 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -193,9 +193,6 @@ class EXPORT BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// The compaction algorithm should ideally compact all objects into the main
// block, thereby enabling erasing corresponding entries from here.
std::deque<size_t> block_sizes_ GUARDED_BY(lock_);
- // Size of the black-dense region that is to be walked using mark-bitmap and
- // not object-by-object.
- size_t black_dense_region_size_ GUARDED_BY(lock_) = 0;
private:
// Return the object which comes after obj, while ensuring alignment.
@@ -218,8 +215,6 @@ class EXPORT BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// mutators are suspended so that upcoming TLAB allocations start with a new
// page. Adjust's heap's bytes_allocated accordingly. Returns the aligned end.
uint8_t* AlignEnd(Thread* self, size_t alignment, Heap* heap) REQUIRES(Locks::mutator_lock_);
- // Called only by CMC GC at the end of GC.
- void SetBlackDenseRegionSize(size_t size) REQUIRES(!lock_);
friend class collector::MarkSweep;
friend class collector::MarkCompact;