2-phase full-heap CC
Introduce a phase before enbling read-barrier during full-heap GC
cycles:
1) To compute latest per-region live-bytes info for accureate
"should be evacuated" decision for every region.
2) To mark most of the live objects before enabling read-barrier so that
graying them can be avoided, thereby reducing number of read-barrier slowpath
invocations.
Test: art/test/testrunner/testrrunner.py --target
Bug: 112720851
Test: art/test/testrunner/testrunner.py --64
Change-Id: Ief4e92dba27aded153e600eeffc072b66d5917b5
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3160422..1014c0e 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -37,14 +37,15 @@
mirror::Object* ref,
accounting::ContinuousSpaceBitmap* bitmap) {
if (kEnableGenerationalConcurrentCopyingCollection
- && young_gen_
&& !done_scanning_.load(std::memory_order_acquire)) {
- // Everything in the unevac space should be marked for generational CC except for large objects.
- DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " "
+ // Everything in the unevac space should be marked for young generation CC,
+ // except for large objects.
+ DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
+ << ref << " "
<< ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
- // Since the mark bitmap is still filled in from last GC, we can not use that or else the
- // mutator may see references to the from space. Instead, use the baker pointer itself as
- // the mark bit.
+ // Since the mark bitmap is still filled in from last GC (or from marking phase of 2-phase CC,
+ // we can not use that or else the mutator may see references to the from space. Instead, use
+ // the baker pointer itself as the mark bit.
if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
// TODO: We don't actually need to scan this object later, we just need to clear the gray
// bit.
@@ -244,7 +245,7 @@
DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
- } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+ } else if (!kEnableGenerationalConcurrentCopyingCollection
|| done_scanning_.load(std::memory_order_acquire)) {
// If the card table scanning is not finished yet, then only read-barrier
// state should be checked. Checking the mark bitmap is unreliable as there
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 53aa9ba..861f0d3 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -188,6 +188,11 @@
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
InitializePhase();
+ // In case of forced evacuation, all regions are evacuated and hence no
+ // need to compute live_bytes.
+ if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) {
+ MarkingPhase();
+ }
}
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
// Switch to read barrier mark entrypoints before we gray the objects. This is required in case
@@ -201,7 +206,7 @@
FlipThreadRoots();
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
- MarkingPhase();
+ CopyingPhase();
}
// Verify no from space refs. This causes a pause.
if (kEnableNoFromSpaceRefsVerification) {
@@ -299,12 +304,22 @@
DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
- // Age all of the cards for the region space so that we know which evac regions to scan.
- Runtime::Current()->GetHeap()->GetCardTable()->ModifyCardsAtomic(
- space->Begin(),
- space->End(),
- AgeCardVisitor(),
- VoidFunctor());
+ if (young_gen_) {
+ // Age all of the cards for the region space so that we know which evac regions to scan.
+ heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(),
+ space->End(),
+ AgeCardVisitor(),
+ VoidFunctor());
+ } else {
+ // In a full-heap GC cycle, the card-table corresponding to region-space and
+ // non-moving space can be cleared, because this cycle only needs to
+ // capture writes during the marking phase of this cycle to catch
+ // objects that skipped marking due to heap mutation. Furthermore,
+ // if the next GC is a young-gen cycle, then it only needs writes to
+ // be captured after the thread-flip of this GC cycle, as that is when
+ // the young-gen for the next GC cycle starts getting populated.
+ heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
+ }
} else {
if (space == region_space_) {
// It is OK to clear the bitmap with mutators running since the only place it is read is
@@ -381,6 +396,7 @@
if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
region_space_bitmap_->Clear();
}
+ mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
// Mark all of the zygote large objects without graying them.
MarkZygoteLargeObjects();
}
@@ -471,7 +487,7 @@
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
- if (kVerifyNoMissingCardMarks) {
+ if (kVerifyNoMissingCardMarks && cc->young_gen_) {
cc->VerifyNoMissingCardMarks();
}
CHECK_EQ(thread, self);
@@ -485,9 +501,11 @@
}
{
TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
- // Only change live bytes for full CC.
+ // Only change live bytes for 1-phase full heap CC.
cc->region_space_->SetFromSpace(
- cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
+ cc->rb_table_,
+ evac_mode,
+ /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection);
}
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -496,9 +514,7 @@
cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
}
cc->is_marking_ = true;
- cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal,
- std::memory_order_relaxed);
- if (kIsDebugBuild && !cc->young_gen_) {
+ if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) {
cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
}
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -860,6 +876,460 @@
ConcurrentCopying* const collector_;
};
+template <bool kAtomicTestAndSet>
+class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor {
+ public:
+ explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self)
+ : collector_(cc), self_(self) {}
+
+ void VisitRoots(mirror::Object*** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object** root = roots[i];
+ mirror::Object* ref = *root;
+ if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+ collector_->PushOntoMarkStack(self_, ref);
+ }
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::CompressedReference<mirror::Object>* const root = roots[i];
+ if (!root->IsNull()) {
+ mirror::Object* ref = root->AsMirrorPtr();
+ if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+ collector_->PushOntoMarkStack(self_, ref);
+ }
+ }
+ }
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+ Thread* const self_;
+};
+
+class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
+ public:
+ RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
+ bool disable_weak_ref_access)
+ : concurrent_copying_(concurrent_copying),
+ disable_weak_ref_access_(disable_weak_ref_access) {
+ }
+
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+ // Note: self is not necessarily equal to thread since thread may be suspended.
+ Thread* const self = Thread::Current();
+ CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+ << thread->GetState() << " thread " << thread << " self " << self;
+ // Revoke thread local mark stacks.
+ accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
+ if (tl_mark_stack != nullptr) {
+ MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
+ concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
+ thread->SetThreadLocalMarkStack(nullptr);
+ }
+ // Disable weak ref access.
+ if (disable_weak_ref_access_) {
+ thread->SetWeakRefAccessEnabled(false);
+ }
+ // If thread is a running mutator, then act on behalf of the garbage collector.
+ // See the code in ThreadList::RunCheckpoint.
+ concurrent_copying_->GetBarrier().Pass(self);
+ }
+
+ protected:
+ ConcurrentCopying* const concurrent_copying_;
+
+ private:
+ const bool disable_weak_ref_access_;
+};
+
+class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint :
+ public RevokeThreadLocalMarkStackCheckpoint {
+ public:
+ explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) :
+ RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {}
+
+ void Run(Thread* thread) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
+ // only.
+ CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
+ thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+ // Barrier handling is done in the base class' Run() below.
+ RevokeThreadLocalMarkStackCheckpoint::Run(thread);
+ }
+};
+
+void ConcurrentCopying::CaptureThreadRootsForMarking() {
+ TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings());
+ if (kVerboseMode) {
+ LOG(INFO) << "time=" << region_space_->Time();
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+ }
+ Thread* const self = Thread::Current();
+ CaptureThreadRootsForMarkingAndCheckpoint check_point(this);
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ gc_barrier_->Init(self, 0);
+ size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr);
+ // If there are no threads to wait which implys that all the checkpoint functions are finished,
+ // then no need to release the mutator lock.
+ if (barrier_count == 0) {
+ return;
+ }
+ Locks::mutator_lock_->SharedUnlock(self);
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ gc_barrier_->Increment(self, barrier_count);
+ }
+ Locks::mutator_lock_->SharedLock(self);
+ if (kVerboseMode) {
+ LOG(INFO) << "time=" << region_space_->Time();
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+ LOG(INFO) << "GC end of CaptureThreadRootsForMarking";
+ }
+}
+
+// Used to scan ref fields of an object.
+template <bool kHandleInterRegionRefs>
+class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor {
+ public:
+ explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector,
+ size_t obj_region_idx)
+ : collector_(collector),
+ obj_region_idx_(obj_region_idx),
+ contains_inter_region_idx_(false) {}
+
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+ DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_);
+ DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj));
+ CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset));
+ }
+
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
+ DCHECK(klass->IsTypeOfReferenceClass());
+ // If the referent is not null, then we must re-visit the object during
+ // copying phase to enqueue it for delayed processing and setting
+ // read-barrier state to gray to ensure that call to GetReferent() triggers
+ // the read-barrier. We use same data structure that is used to remember
+ // objects with inter-region refs for this purpose too.
+ if (kHandleInterRegionRefs
+ && !contains_inter_region_idx_
+ && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) {
+ contains_inter_region_idx_ = true;
+ }
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CheckReference(root->AsMirrorPtr());
+ }
+
+ bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return contains_inter_region_idx_;
+ }
+
+ private:
+ void CheckReference(mirror::Object* ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (ref == nullptr) {
+ // Nothing to do.
+ return;
+ }
+ if (!collector_->TestAndSetMarkBitForRef(ref)) {
+ collector_->PushOntoLocalMarkStack(ref);
+ }
+ if (kHandleInterRegionRefs && !contains_inter_region_idx_) {
+ size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref);
+ // If a region-space object refers to an outside object, we will have a
+ // mismatch of region idx, but the object need not be re-visited in
+ // copying phase.
+ if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) {
+ contains_inter_region_idx_ = true;
+ }
+ }
+ }
+
+ ConcurrentCopying* const collector_;
+ const size_t obj_region_idx_;
+ mutable bool contains_inter_region_idx_;
+};
+
+void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
+ DCHECK(ref != nullptr);
+ DCHECK(!immune_spaces_.ContainsObject(ref));
+ DCHECK(TestMarkBitmapForRef(ref));
+ size_t obj_region_idx = static_cast<size_t>(-1);
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref);
+ // Add live bytes to the corresponding region
+ if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) {
+ // Newly Allocated regions are always chosen for evacuation. So no need
+ // to update live_bytes_.
+ size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>();
+ size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+ region_space_->AddLiveBytes(ref, alloc_size);
+ }
+ }
+ ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
+ visitor(this, obj_region_idx);
+ ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor, visitor);
+ // Mark the corresponding card dirty if the object contains any
+ // inter-region reference.
+ if (visitor.ContainsInterRegionRefs()) {
+ heap_->GetCardTable()->MarkCard(ref);
+ }
+}
+
+template <bool kAtomic>
+bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
+ accounting::ContinuousSpaceBitmap* bitmap = nullptr;
+ accounting::LargeObjectBitmap* los_bitmap = nullptr;
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ bitmap = region_space_bitmap_;
+ } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+ bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+ } else if (immune_spaces_.ContainsObject(ref)) {
+ // References to immune space objects are always live.
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+ return true;
+ } else {
+ // Should be a large object. Must be page aligned and the LOS must exist.
+ if (kIsDebugBuild
+ && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // It must be heap corruption. Remove memory protection and dump data.
+ region_space_->Unprotect();
+ heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+ MemberOffset(0),
+ ref,
+ /* fatal */ true);
+ }
+ los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+ }
+ if (kAtomic) {
+ return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref);
+ } else {
+ return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref);
+ }
+}
+
+bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ return region_space_bitmap_->Test(ref);
+ } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+ return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref);
+ } else if (immune_spaces_.ContainsObject(ref)) {
+ // References to immune space objects are always live.
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+ return true;
+ } else {
+ // Should be a large object. Must be page aligned and the LOS must exist.
+ if (kIsDebugBuild
+ && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // It must be heap corruption. Remove memory protection and dump data.
+ region_space_->Unprotect();
+ heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+ MemberOffset(0),
+ ref,
+ /* fatal */ true);
+ }
+ return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref);
+ }
+}
+
+void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) {
+ if (kIsDebugBuild) {
+ Thread *self = Thread::Current();
+ DCHECK_EQ(thread_running_gc_, self);
+ DCHECK(self->GetThreadLocalMarkStack() == nullptr);
+ }
+ DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal);
+ gc_mark_stack_->PushBack(ref);
+}
+
+void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() {
+ // Process thread-local mark stack containing thread roots
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
+ /* checkpoint_callback */ nullptr,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ AddLiveBytesAndScanRef(ref);
+ });
+
+ while (!gc_mark_stack_->IsEmpty()) {
+ mirror::Object* ref = gc_mark_stack_->PopBack();
+ AddLiveBytesAndScanRef(ref);
+ }
+}
+
+class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor {
+ public:
+ explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false>
+ visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1));
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor, visitor);
+ }
+
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+};
+
+/* Invariants for two-phase CC
+ * ===========================
+ * A) Definitions
+ * ---------------
+ * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack
+ * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged
+ * 3) Black-dirty: marked in bitmap, and corresponding card is dirty
+ * 4) Gray: marked in bitmap, and exists in mark stack
+ * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is
+ * dirty, and exists in mark stack
+ * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack
+ *
+ * B) Before marking phase
+ * -----------------------
+ * 1) All objects are white
+ * 2) Cards are either clean or aged (cannot be asserted without a STW pause)
+ * 3) Mark bitmap is cleared
+ * 4) Mark stack is empty
+ *
+ * C) During marking phase
+ * ------------------------
+ * 1) If a black object holds an inter-region or white reference, then its
+ * corresponding card is dirty. In other words, it changes from being
+ * black-clean to black-dirty
+ * 2) No black-clean object points to a white object
+ *
+ * D) After marking phase
+ * -----------------------
+ * 1) There are no gray objects
+ * 2) All newly allocated objects are in from space
+ * 3) No white object can be reachable, directly or otherwise, from a
+ * black-clean object
+ *
+ * E) During copying phase
+ * ------------------------
+ * 1) Mutators cannot observe white and black-dirty objects
+ * 2) New allocations are in to-space (newly allocated regions are part of to-space)
+ * 3) An object in mark stack must have its rb_state = Gray
+ *
+ * F) During card table scan
+ * --------------------------
+ * 1) Referents corresponding to root references are gray or in to-space
+ * 2) Every path from an object that is read or written by a mutator during
+ * this period to a dirty black object goes through some gray object.
+ * Mutators preserve this by graying black objects as needed during this
+ * period. Ensures that a mutator never encounters a black dirty object.
+ *
+ * G) After card table scan
+ * ------------------------
+ * 1) There are no black-dirty objects
+ * 2) Referents corresponding to root references are gray, black-clean or in
+ * to-space
+ *
+ * H) After copying phase
+ * -----------------------
+ * 1) Mark stack is empty
+ * 2) No references into evacuated from-space
+ * 3) No reference to an object which is unmarked and is also not in newly
+ * allocated region. In other words, no reference to white objects.
+*/
+
+void ConcurrentCopying::MarkingPhase() {
+ TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
+ if (kVerboseMode) {
+ LOG(INFO) << "GC MarkingPhase";
+ }
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ Thread* const self = Thread::Current();
+ // Clear live_bytes_ of every non-free region, except the ones that are newly
+ // allocated.
+ region_space_->SetAllRegionLiveBytesZero();
+ if (kIsDebugBuild) {
+ region_space_->AssertAllRegionLiveBytesZeroOrCleared();
+ }
+ // Scan immune spaces
+ {
+ TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ ImmuneSpaceCaptureRefsVisitor visitor(this);
+ if (table != nullptr) {
+ table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor);
+ } else {
+ WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ card_table->Scan<false>(
+ live_bitmap,
+ space->Begin(),
+ space->Limit(),
+ visitor,
+ accounting::CardTable::kCardDirty - 1);
+ }
+ }
+ }
+ // Scan runtime roots
+ {
+ TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
+ CaptureRootsForMarkingVisitor visitor(this, self);
+ Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots);
+ }
+ {
+ // TODO: don't visit the transaction roots if it's not active.
+ TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings());
+ CaptureRootsForMarkingVisitor visitor(this, self);
+ Runtime::Current()->VisitNonThreadRoots(&visitor);
+ }
+ // Capture thread roots
+ CaptureThreadRootsForMarking();
+ // Process mark stack
+ ProcessMarkStackForMarkingAndComputeLiveBytes();
+
+ // Age the cards.
+ for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace() || space->IsZygoteSpace()) {
+ // Image and zygote spaces are already handled since we gray the objects in the pause.
+ continue;
+ }
+ card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
+ }
+
+ if (kVerboseMode) {
+ LOG(INFO) << "GC end of MarkingPhase";
+ }
+}
+
template <bool kNoUnEvac>
void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
Scan<kNoUnEvac>(obj);
@@ -876,12 +1346,13 @@
}
// Concurrently mark roots that are guarded by read barriers and process the mark stack.
-void ConcurrentCopying::MarkingPhase() {
- TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
+void ConcurrentCopying::CopyingPhase() {
+ TimingLogger::ScopedTiming split("CopyingPhase", GetTimings());
if (kVerboseMode) {
- LOG(INFO) << "GC MarkingPhase";
+ LOG(INFO) << "GC CopyingPhase";
}
Thread* self = Thread::Current();
+ accounting::CardTable* const card_table = heap_->GetCardTable();
if (kIsDebugBuild) {
MutexLock mu(self, *Locks::thread_list_lock_);
CHECK(weak_ref_access_enabled_);
@@ -894,7 +1365,7 @@
if (kUseBakerReadBarrier) {
gc_grays_immune_objects_ = false;
}
- if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ if (kEnableGenerationalConcurrentCopyingCollection) {
if (kVerboseMode) {
LOG(INFO) << "GC ScanCardsForSpace";
}
@@ -912,34 +1383,45 @@
continue;
}
// Scan all of the objects on dirty cards in unevac from space, and non moving space. These
- // are from previous GCs and may reference things in the from space.
+ // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things
+ // in the from space.
//
// Note that we do not need to process the large-object space (the only discontinuous space)
// as it contains only large string objects and large primitive array objects, that have no
// reference to other objects, except their class. There is no need to scan these large
// objects, as the String class and the primitive array classes are expected to never move
- // during a minor (young-generation) collection:
+ // during a collection:
// - In the case where we run with a boot image, these classes are part of the image space,
// which is an immune space.
// - In the case where we run without a boot image, these classes are allocated in the
// non-moving space (see art::ClassLinker::InitWithoutImage).
- Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
+ card_table->Scan<false>(
space->GetMarkBitmap(),
space->Begin(),
space->End(),
[this, space](mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Don't push or gray unevac refs.
- if (kIsDebugBuild && space == region_space_) {
- // We may get unevac large objects.
- if (!region_space_->IsInUnevacFromSpace(obj)) {
- CHECK(region_space_bitmap_->Test(obj));
- region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
- LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+ // TODO: This code may be refactored to avoid scanning object while
+ // done_scanning_ is false by setting rb_state to gray, and pushing the
+ // object on mark stack. However, it will also require clearing the
+ // corresponding mark-bit and, for region space objects,
+ // decrementing the object's size from the corresponding region's
+ // live_bytes.
+ if (young_gen_) {
+ // Don't push or gray unevac refs.
+ if (kIsDebugBuild && space == region_space_) {
+ // We may get unevac large objects.
+ if (!region_space_->IsInUnevacFromSpace(obj)) {
+ CHECK(region_space_bitmap_->Test(obj));
+ region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
+ LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+ }
}
+ ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+ } else if (space != region_space_ || region_space_->IsInUnevacFromSpace(obj)) {
+ ScanDirtyObject</*kNoUnEvac*/ false>(obj);
}
- ScanDirtyObject</*kNoUnEvac*/ true>(obj);
},
accounting::CardTable::kCardDirty - 1);
}
@@ -962,10 +1444,13 @@
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
} else {
- // TODO: Scan only the aged cards.
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
+ WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ card_table->Scan<false>(
+ live_bitmap,
+ space->Begin(),
+ space->Limit(),
+ visitor,
+ accounting::CardTable::kCardDirty - 1);
}
}
}
@@ -1074,7 +1559,7 @@
CHECK(weak_ref_access_enabled_);
}
if (kVerboseMode) {
- LOG(INFO) << "GC end of MarkingPhase";
+ LOG(INFO) << "GC end of CopyingPhase";
}
}
@@ -1434,40 +1919,6 @@
ConcurrentCopying* const collector_;
};
-class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
- public:
- RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
- bool disable_weak_ref_access)
- : concurrent_copying_(concurrent_copying),
- disable_weak_ref_access_(disable_weak_ref_access) {
- }
-
- void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
- // Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
- CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
- << thread->GetState() << " thread " << thread << " self " << self;
- // Revoke thread local mark stacks.
- accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
- if (tl_mark_stack != nullptr) {
- MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
- concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
- thread->SetThreadLocalMarkStack(nullptr);
- }
- // Disable weak ref access.
- if (disable_weak_ref_access_) {
- thread->SetWeakRefAccessEnabled(false);
- }
- // If thread is a running mutator, then act on behalf of the garbage collector.
- // See the code in ThreadList::RunCheckpoint.
- concurrent_copying_->GetBarrier().Pass(self);
- }
-
- private:
- ConcurrentCopying* const concurrent_copying_;
- const bool disable_weak_ref_access_;
-};
-
void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
Closure* checkpoint_callback) {
Thread* self = Thread::Current();
@@ -1525,7 +1976,11 @@
if (mark_stack_mode == kMarkStackModeThreadLocal) {
// Process the thread-local mark stacks and the GC mark stack.
count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
- /* checkpoint_callback= */ nullptr);
+ /* checkpoint_callback= */ nullptr,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProcessMarkStackRef(ref);
+ });
while (!gc_mark_stack_->IsEmpty()) {
mirror::Object* to_ref = gc_mark_stack_->PopBack();
ProcessMarkStackRef(to_ref);
@@ -1581,8 +2036,10 @@
return count == 0;
}
+template <typename Processor>
size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
- Closure* checkpoint_callback) {
+ Closure* checkpoint_callback,
+ const Processor& processor) {
// Run a checkpoint to collect all thread local mark stacks and iterate over them all.
RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
size_t count = 0;
@@ -1596,7 +2053,7 @@
for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
mirror::Object* to_ref = p->AsMirrorPtr();
- ProcessMarkStackRef(to_ref);
+ processor(to_ref);
++count;
}
{
@@ -1647,6 +2104,12 @@
perform_scan = true;
// Only add to the live bytes if the object was not already marked and we are not the young
// GC.
+ // Why add live bytes even after 2-phase GC?
+ // We need to ensure that if there is a unevac region with any live
+ // objects, then its live_bytes must be non-zero. Otherwise,
+ // ClearFromSpace() will clear the region. Considering, that we may skip
+ // live objects during marking phase of 2-phase GC, we have to take care
+ // of such objects here.
add_to_live_bytes = true;
}
break;
@@ -1788,7 +2251,12 @@
DisableWeakRefAccessCallback dwrac(this);
// Process the thread local mark stacks one last time after switching to the shared mark stack
// mode and disable weak ref accesses.
- ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true,
+ &dwrac,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProcessMarkStackRef(ref);
+ });
if (kVerboseMode) {
LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
}
@@ -2054,7 +2522,7 @@
uint64_t cleared_objects;
{
TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
// `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
// RegionSpace::ClearFromSpace may clear empty unevac regions.
CHECK_GE(cleared_bytes, from_bytes);
@@ -2363,7 +2831,7 @@
DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
- } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+ } else if (!kEnableGenerationalConcurrentCopyingCollection
|| done_scanning_.load(std::memory_order_acquire)) {
// Read the comment in IsMarkedInUnevacFromSpace()
accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
@@ -2954,7 +3422,7 @@
los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
DCHECK(los_bitmap->HasAddress(ref));
}
- if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ if (kEnableGenerationalConcurrentCopyingCollection) {
// The sticky-bit CC collector is only compatible with Baker-style read barriers.
DCHECK(kUseBakerReadBarrier);
// Not done scanning, use AtomicSetReadBarrierPointer.
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index e251fbc..4442ad5 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -79,6 +79,8 @@
void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void FinishPhase() REQUIRES(!mark_stack_lock_,
@@ -205,7 +207,10 @@
void VerifyNoMissingCardMarks()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
+ template <typename Processor>
+ size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
+ Closure* checkpoint_callback,
+ const Processor& processor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -302,6 +307,15 @@
// Set the read barrier mark entrypoints to non-null.
void ActivateReadBarrierEntrypoints();
+ void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
+ void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ template <bool kAtomic = false>
+ bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
@@ -382,7 +396,7 @@
// Generational "sticky", only trace through dirty objects in region space.
const bool young_gen_;
// If true, the GC thread is done scanning marked objects on dirty and aged
- // card (see ConcurrentCopying::MarkingPhase).
+ // card (see ConcurrentCopying::CopyingPhase).
Atomic<bool> done_scanning_;
// The skipped blocks are memory blocks/chucks that were copies of
@@ -448,6 +462,10 @@
class VerifyNoFromSpaceRefsFieldVisitor;
class VerifyNoFromSpaceRefsVisitor;
class VerifyNoMissingCardMarkVisitor;
+ class ImmuneSpaceCaptureRefsVisitor;
+ template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
+ class CaptureThreadRootsForMarkingAndCheckpoint;
+ template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 5ff1270..dbec4ea 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -205,9 +205,10 @@
continue;
}
if (r->IsLarge()) {
- // Avoid visiting dead large objects since they may contain dangling pointers to the
- // from-space.
- DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+ // We may visit a large object with live_bytes = 0 here. However, it is
+ // safe as it cannot contain dangling pointers because corresponding regions
+ // (and regions corresponding to dead referents) cannot be allocated for new
+ // allocations without first clearing regions' live_bytes and state.
mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
DCHECK(obj->GetClass() != nullptr);
visitor(obj);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 21cae93..98b140e 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -319,6 +319,7 @@
state == RegionState::kRegionStateLarge) &&
type == RegionType::kRegionTypeToSpace);
bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
+ bool is_newly_allocated = r->IsNewlyAllocated();
if (should_evacuate) {
r->SetAsFromSpace();
DCHECK(r->IsInFromSpace());
@@ -329,6 +330,17 @@
if (UNLIKELY(state == RegionState::kRegionStateLarge &&
type == RegionType::kRegionTypeToSpace)) {
prev_large_evacuated = should_evacuate;
+ // In 2-phase full heap GC, this function is called after marking is
+ // done. So, it is possible that some newly allocated large object is
+ // marked but its live_bytes is still -1. We need to clear the
+ // mark-bit otherwise the live_bytes will not be updated in
+ // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
+ // logic.
+ if (kEnableGenerationalConcurrentCopyingCollection
+ && !should_evacuate
+ && is_newly_allocated) {
+ GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
+ }
num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
DCHECK_GT(num_expected_large_tails, 0U);
}
@@ -367,7 +379,8 @@
}
void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
- /* out */ uint64_t* cleared_objects) {
+ /* out */ uint64_t* cleared_objects,
+ const bool clear_bitmap) {
DCHECK(cleared_bytes != nullptr);
DCHECK(cleared_objects != nullptr);
*cleared_bytes = 0;
@@ -395,13 +408,18 @@
// (see b/62194020).
uint8_t* clear_block_begin = nullptr;
uint8_t* clear_block_end = nullptr;
- auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
+ auto clear_region = [this, &clear_block_begin, &clear_block_end, clear_bitmap](Region* r) {
r->Clear(/*zero_and_release_pages=*/false);
if (clear_block_end != r->Begin()) {
// Region `r` is not adjacent to the current clear block; zero and release
// pages within the current block and restart a new clear block at the
// beginning of region `r`.
ZeroAndProtectRegion(clear_block_begin, clear_block_end);
+ if (clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(clear_block_begin),
+ reinterpret_cast<mirror::Object*>(clear_block_end));
+ }
clear_block_begin = r->Begin();
}
// Add region `r` to the clear block.
@@ -426,20 +444,23 @@
// It is also better to clear these regions now instead of at the end of the next GC to
// save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
// live percent evacuation logic.
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ clear_region(r);
size_t free_regions = 1;
// Also release RAM for large tails.
while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
- DCHECK(r->IsLarge());
clear_region(®ions_[i + free_regions]);
++free_regions;
}
- *cleared_bytes += r->BytesAllocated();
- *cleared_objects += r->ObjectsAllocated();
num_non_free_regions_ -= free_regions;
- clear_region(r);
- GetLiveBitmap()->ClearRange(
- reinterpret_cast<mirror::Object*>(r->Begin()),
- reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ // When clear_bitmap is true, this clearing of bitmap is taken care in
+ // clear_region().
+ if (!clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ }
continue;
}
r->SetUnevacFromSpaceAsToSpace();
@@ -519,6 +540,11 @@
}
// Clear pages for the last block since clearing happens when a new block opens.
ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
+ if (clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(clear_block_begin),
+ reinterpret_cast<mirror::Object*>(clear_block_end));
+ }
// Update non_free_region_index_limit_.
SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8810f8c..0d5ebcc 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -228,6 +228,11 @@
return false;
}
+ bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK_LT(idx, num_regions_);
+ return regions_[idx].IsNewlyAllocated();
+ }
+
bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
if (HasAddress(ref)) {
Region* r = RefToRegionUnlocked(ref);
@@ -291,7 +296,9 @@
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects)
+ void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
+ /* out */ uint64_t* cleared_objects,
+ const bool clear_bitmap)
REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
@@ -310,6 +317,40 @@
}
}
+ void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
+ Region* r = ®ions_[i];
+ // Newly allocated regions don't need up-to-date live_bytes_ for deciding
+ // whether to be evacuated or not. See Region::ShouldBeEvacuated().
+ if (!r->IsFree() && !r->IsNewlyAllocated()) {
+ r->ZeroLiveBytes();
+ }
+ }
+ }
+
+ size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(HasAddress(ref));
+ uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
+ size_t reg_idx = offset / kRegionSize;
+ DCHECK_LT(reg_idx, num_regions_);
+ Region* reg = ®ions_[reg_idx];
+ DCHECK_EQ(reg->Idx(), reg_idx);
+ DCHECK(reg->Contains(ref));
+ return reg_idx;
+ }
+ // Return -1 as region index for references outside this region space.
+ size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+ if (HasAddress(ref)) {
+ return RegionIdxForRefUnchecked(ref);
+ } else {
+ return static_cast<size_t>(-1);
+ }
+ }
+
void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
@@ -515,11 +556,10 @@
ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
void AddLiveBytes(size_t live_bytes) {
- DCHECK(IsInUnevacFromSpace());
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace());
DCHECK(!IsLargeTail());
DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
- // For large allocations, we always consider all bytes in the
- // regions live.
+ // For large allocations, we always consider all bytes in the regions live.
live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
DCHECK_LE(live_bytes_, BytesAllocated());
}