summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/mark_compact.cc158
-rw-r--r--runtime/gc/collector/mark_compact.h4
2 files changed, 123 insertions, 39 deletions
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 821281134e..4d220e331e 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -865,7 +865,6 @@ void MarkCompact::RunPhases() {
ReclaimPhase();
perform_compaction = PrepareForCompaction();
}
-
if (perform_compaction) {
// Compaction pause
ThreadFlipVisitor visitor(this);
@@ -880,11 +879,10 @@ void MarkCompact::RunPhases() {
} else {
if (use_generational_) {
DCHECK_IMPLIES(post_compact_end_ != nullptr, post_compact_end_ == black_allocations_begin_);
- mid_gen_end_ = black_allocations_begin_;
}
post_compact_end_ = black_allocations_begin_;
}
- FinishPhase();
+ FinishPhase(perform_compaction);
GetHeap()->PostGcVerification(this);
thread_running_gc_ = nullptr;
}
@@ -1044,9 +1042,9 @@ size_t MarkCompact::InitNonMovingFirstObjects(uintptr_t begin,
//
// 1. During marking-phase, 'mid_gen_end_' segregates young and mid generations.
// Before starting compaction, in PrepareForCompaction(), we set it to the
-// corresponding post-compact addresses, aligned down to page-size. Therefore,
-// some object's beginning portion maybe in old-gen, while the rest is in mid-gen.
-// Aligning down is essential as mid_gen_end_ becomes old_gen_end_ at the end of
+// corresponding post-compact addresses, aligned up to page-size. Therefore,
+// some object's beginning portion maybe in mid-gen, while the rest is in young-gen.
+// Aligning up is essential as mid_gen_end_ becomes old_gen_end_ at the end of
// GC cycle, and the latter has to be page-aligned as old-gen pages are
// processed differently (no compaction).
//
@@ -1072,6 +1070,7 @@ bool MarkCompact::PrepareForCompaction() {
DCHECK_ALIGNED_PARAM(vector_length_, chunk_info_per_page);
if (UNLIKELY(vector_len == 0)) {
// Nothing to compact. Entire heap is empty.
+ black_dense_end_ = mid_gen_end_ = moving_space_begin_;
return false;
}
for (size_t i = 0; i < vector_len; i++) {
@@ -1113,6 +1112,7 @@ bool MarkCompact::PrepareForCompaction() {
if (black_dense_idx == vector_len) {
// There is nothing live in young-gen.
DCHECK_EQ(old_gen_end_, black_allocations_begin_);
+ mid_gen_end_ = black_allocations_begin_;
return false;
}
InitNonMovingFirstObjects(reinterpret_cast<uintptr_t>(moving_space_begin_),
@@ -1170,6 +1170,7 @@ bool MarkCompact::PrepareForCompaction() {
DCHECK_LE(black_dense_idx, vector_len);
if (black_dense_idx == vector_len) {
// There is nothing to compact. All the in-use pages are completely full.
+ mid_gen_end_ = black_allocations_begin_;
return false;
}
InitNonMovingFirstObjects(reinterpret_cast<uintptr_t>(moving_space_begin_),
@@ -1184,7 +1185,7 @@ bool MarkCompact::PrepareForCompaction() {
non_moving_first_objs_count_ =
InitNonMovingFirstObjects(reinterpret_cast<uintptr_t>(non_moving_space_->Begin()),
reinterpret_cast<uintptr_t>(non_moving_space_->End()),
- non_moving_space_->GetLiveBitmap(),
+ non_moving_space_bitmap_,
first_objs_non_moving_space_);
// Update the vector one past the heap usage as it is required for black
// allocated objects' post-compact address computation.
@@ -1217,12 +1218,12 @@ bool MarkCompact::PrepareForCompaction() {
}
if (black_objs_slide_diff_ == 0) {
// Regardless of the gc-type, there are no pages to be compacted.
- black_dense_end_ = black_allocations_begin_;
+ mid_gen_end_ = black_dense_end_;
return false;
}
if (use_generational_) {
// Current value of mid_gen_end_ represents end of 'pre-compacted' mid-gen,
- // which was done at of previous GC. Compute, 'post-compacted' end of
+ // which was done at the end of previous GC. Compute, 'post-compacted' end of
// mid-gen, which will be consumed by old-gen at the end of this GC cycle.
DCHECK_NE(mid_gen_end_, nullptr);
mirror::Object* first_obj = nullptr;
@@ -1244,19 +1245,23 @@ bool MarkCompact::PrepareForCompaction() {
}
// It's important to page-align mid-gen boundary. However, that means
// there could be an object overlapping that boundary. We will deal with
- // the consequences of that at different places. Aligning down is
- // preferred (over aligning up) to avoid pre-mature promotion of young
- // objects to old-gen.
- mid_gen_end_ = AlignDown(reinterpret_cast<uint8_t*>(first_obj), gPageSize);
+ // the consequences of that at different places. Aligning up is important
+ // to ensure that we don't de-promote an object from old-gen back to
+ // young-gen. Otherwise, we may skip dirtying card for such an object if
+ // it contains native-roots to young-gen.
+ mid_gen_end_ = AlignUp(reinterpret_cast<uint8_t*>(first_obj), gPageSize);
} else {
// Young-gen is empty.
mid_gen_end_ = post_compact_end_;
}
- }
- if (old_gen_end_ < mid_gen_end_) {
- mid_to_old_promo_bit_vec_.reset(new BitVector((mid_gen_end_ - old_gen_end_) / kObjectAlignment,
- /*expandable=*/false,
- Allocator::GetCallocAllocator()));
+ DCHECK_LE(mid_gen_end_, post_compact_end_);
+ // We need this temporary bitmap only when running in generational mode.
+ if (old_gen_end_ < mid_gen_end_) {
+ mid_to_old_promo_bit_vec_.reset(
+ new BitVector((mid_gen_end_ - old_gen_end_) / kObjectAlignment,
+ /*expandable=*/false,
+ Allocator::GetCallocAllocator()));
+ }
}
// How do we handle compaction of heap portion used for allocations after the
// marking-pause?
@@ -1458,6 +1463,9 @@ void MarkCompact::ReclaimPhase() {
// Unbind the live and mark bitmaps.
GetHeap()->UnBindBitmaps();
}
+ // After sweeping and unbinding, we will need to use non-moving space'
+ // live-bitmap, instead of mark-bitmap.
+ non_moving_space_bitmap_ = non_moving_space_->GetLiveBitmap();
}
// We want to avoid checking for every reference if it's within the page or
@@ -3180,7 +3188,6 @@ void MarkCompact::UpdateClassTableClasses(Runtime* runtime, bool immune_class_ta
void MarkCompact::CompactionPause() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime* runtime = Runtime::Current();
- non_moving_space_bitmap_ = non_moving_space_->GetLiveBitmap();
if (kIsDebugBuild) {
DCHECK_EQ(thread_running_gc_, Thread::Current());
// TODO(Simulator): Test that this should not operate on the simulated stack when the simulator
@@ -4420,16 +4427,11 @@ class MarkCompact::RefFieldsVisitor {
public:
ALWAYS_INLINE RefFieldsVisitor(MarkCompact* const mark_compact, mirror::Object* obj)
: mark_compact_(mark_compact),
- // Conceptually we only need to check for references in [mid_gen_end_,
- // moving_space_end_) range, but we enlarge (towards the beginning) it
- // because later in PrepareForCompaction() we will align-down the
- // mid-gen boundary, which means we may not promote some mid-gen objects
- // on the first page to old-gen.
- young_gen_begin_(mark_compact->old_gen_end_),
+ young_gen_begin_(mark_compact->mid_gen_end_),
young_gen_end_(mark_compact->moving_space_end_),
dirty_card_(false),
check_refs_to_young_gen_(mark_compact->use_generational_ &&
- (reinterpret_cast<uint8_t*>(obj) < mark_compact->mid_gen_end_ ||
+ (reinterpret_cast<uint8_t*>(obj) < young_gen_begin_ ||
reinterpret_cast<uint8_t*>(obj) >= young_gen_end_)) {}
bool ShouldDirtyCard() const { return dirty_card_; }
@@ -4766,20 +4768,63 @@ void MarkCompact::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
-void MarkCompact::FinishPhase() {
+class MarkCompact::DetectOldToMidRefVisitor {
+ public:
+ explicit DetectOldToMidRefVisitor(mirror::Object* begin, mirror::Object* end)
+ : mid_gen_begin_(begin), mid_gen_end_(end), dirty_card_(false) {}
+
+ void ClearDirtyCard() { dirty_card_ = false; }
+ bool GetDirtyCard() const { return dirty_card_; }
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj,
+ MemberOffset offset,
+ [[maybe_unused]] bool is_static) const
+ REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
+ CheckReference(obj->GetFieldObject<mirror::Object>(offset));
+ }
+
+ ALWAYS_INLINE void operator()([[maybe_unused]] ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) const
+ REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
+ CheckReference(ref.Ptr());
+ }
+
+ // Native roots are already covered during marking.
+ void VisitRootIfNonNull([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
+ UNIMPLEMENTED(FATAL);
+ }
+
+ void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
+ UNIMPLEMENTED(FATAL);
+ }
+
+ private:
+ void CheckReference(mirror::Object* ref) const {
+ dirty_card_ |= ref >= mid_gen_begin_ && ref < mid_gen_end_;
+ }
+
+ mirror::Object* mid_gen_begin_;
+ mirror::Object* mid_gen_end_;
+ mutable bool dirty_card_;
+};
+
+void MarkCompact::FinishPhase(bool performed_compaction) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
GetCurrentIteration()->SetScannedBytes(bytes_scanned_);
bool is_zygote = Runtime::Current()->IsZygote();
compacting_ = false;
marking_done_ = false;
-
uint8_t* mark_bitmap_clear_end = black_dense_end_;
+ // Whether compaction is performend or not, we always set post_compact_end_
+ // before reaching here.
+ CHECK_NE(post_compact_end_, nullptr);
if (use_generational_) {
{
ReaderMutexLock mu(thread_running_gc_, *Locks::mutator_lock_);
// We need to retain class-after-object map for old-gen as that won't
// be created in next young-gc.
- //
// Find the first class which is getting promoted to old-gen.
auto iter = class_after_obj_map_.lower_bound(
ObjReference::FromMirrorPtr(reinterpret_cast<mirror::Object*>(old_gen_end_)));
@@ -4844,6 +4889,14 @@ void MarkCompact::FinishPhase() {
mid_to_old_promo_bit_vec_->CopyTo(&bitmap_begin[index],
mid_to_old_promo_bit_vec_->GetSizeOf());
mid_to_old_promo_bit_vec_.reset(nullptr);
+ } else if (!performed_compaction) {
+ // We typically only retain the mark-bitmap for the old-generation as the
+ // objects following it are expected to be contiguous. However, when
+ // compaction is not performed, we may have decided to tolerate few holes
+ // here and there. So we have to retain the bitmap for the entire
+ // 'compacted' portion of the heap, which is up to mid-gen-end.
+ DCHECK_LE(old_gen_end_, post_compact_end_);
+ mark_bitmap_clear_end = post_compact_end_;
}
// Promote all mid-gen objects to old-gen and young-gen objects to mid-gen
// for next GC cycle.
@@ -4861,6 +4914,10 @@ void MarkCompact::FinishPhase() {
}
} else {
class_after_obj_map_.clear();
+ if (!performed_compaction) {
+ DCHECK_LE(old_gen_end_, post_compact_end_);
+ mark_bitmap_clear_end = post_compact_end_;
+ }
}
// Black-dense region, which requires bitmap for object-walk, could be larger
// than old-gen. Therefore, until next GC retain the bitmap for entire
@@ -4902,16 +4959,41 @@ void MarkCompact::FinishPhase() {
WriterMutexLock mu2(thread_running_gc_, *Locks::heap_bitmap_lock_);
heap_->ClearMarkedObjects();
if (use_generational_) {
- // Clear the bits set temporarily for black allocations in non-moving
- // space in UpdateNonMovingSpaceBlackAllocations() so that objects are
- // considered for GC in next cycle.
- accounting::ObjectStack* stack = heap_->GetAllocationStack();
- const StackReference<mirror::Object>* limit = stack->End();
- for (StackReference<mirror::Object>* it = stack->Begin(); it != limit; ++it) {
- mirror::Object* obj = it->AsMirrorPtr();
- if (obj != nullptr && non_moving_space_bitmap_->HasAddress(obj)) {
- non_moving_space_bitmap_->Clear(obj);
+ if (performed_compaction) {
+ // Clear the bits set temporarily for black allocations in non-moving
+ // space in UpdateNonMovingSpaceBlackAllocations(), which is called when
+ // we perform compaction, so that objects are considered for GC in next cycle.
+ accounting::ObjectStack* stack = heap_->GetAllocationStack();
+ const StackReference<mirror::Object>* limit = stack->End();
+ for (StackReference<mirror::Object>* it = stack->Begin(); it != limit; ++it) {
+ mirror::Object* obj = it->AsMirrorPtr();
+ if (obj != nullptr && non_moving_space_bitmap_->HasAddress(obj)) {
+ non_moving_space_bitmap_->Clear(obj);
+ }
}
+ } else {
+ // Since we didn't perform compaction, we need to identify old objects
+ // referring to the mid-gen.
+ DetectOldToMidRefVisitor visitor(reinterpret_cast<mirror::Object*>(old_gen_end_),
+ reinterpret_cast<mirror::Object*>(mid_gen_end_));
+ accounting::CardTable* card_table = heap_->GetCardTable();
+ auto obj_visitor = [card_table, &visitor](mirror::Object* obj) {
+ uint8_t* card = card_table->CardFromAddr(obj);
+ if (*card == accounting::CardTable::kCardDirty) {
+ return;
+ }
+ visitor.ClearDirtyCard();
+ // Native-roots are captured during marking and the corresponding cards are already
+ // dirtied above.
+ obj->VisitReferences</*kVisitNativeRoots=*/false>(visitor, visitor);
+ if (visitor.GetDirtyCard()) {
+ *card = accounting::CardTable::kCardDirty;
+ }
+ };
+ moving_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(moving_space_begin_),
+ reinterpret_cast<uintptr_t>(old_gen_end_),
+ obj_visitor);
+ non_moving_space_bitmap_->VisitAllMarked(obj_visitor);
}
}
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 0f15f52b58..8392bfec96 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -330,7 +330,8 @@ class MarkCompact final : public GarbageCollector {
// mirror::Class.
bool IsValidObject(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
void InitializePhase();
- void FinishPhase() REQUIRES(!Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !lock_);
+ void FinishPhase(bool performed_compaction)
+ REQUIRES(!Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !lock_);
void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
void CompactionPhase() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -945,6 +946,7 @@ class MarkCompact final : public GarbageCollector {
class ClassLoaderRootsUpdater;
class LinearAllocPageUpdater;
class ImmuneSpaceUpdateObjVisitor;
+ class DetectOldToMidRefVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
};