summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2015-12-22 11:09:18 -0800
committer Hiroshi Yamauchi <yamauchi@google.com> 2016-05-23 10:50:36 -0700
commit8e67465aa57ee58425be8812c8dba2f7f59cdc2e (patch)
treeb9ec80a0978d3a4d42a38efe1cee0e9a01696b47
parent20eef176101924d5047895214bad4e73b8ae35ec (diff)
Avoid the need for the black color for the baker-style read barrier.
We used to set marked-through non-moving objects to black to distinguish between an unmarked object and a marked-through object (both would be white without black). This was to avoid a rare case where a marked-through (white) object would be incorrectly set to gray for a second time (and left gray) after it's marked through (white/unmarked -> gray/marked -> white/marked-through -> gray/incorrect). If an object is left gray, the invariant would be broken that all objects are white when GC isn't running. Also, we needed to have an extra pass over non-moving objects to change them from black to white after the marking phase. To avoid the need for the black color, we use a 'false gray' stack to detect such rare cases and register affected objects on it and change the objects to white at the end of the marking phase. This saves some GC time because we can avoid the gray-to-black CAS per non-moving object as well as the extra pass over non-moving objects. Ritzperf EAAC (N6): Avg GC time: 232 -> 183 ms (-21%) Total GC time: 15.3 -> 14.1 s (-7.7%) Bug: 12687968 Change-Id: Idb29c3dcb745b094bcf6abc4db646dac9cbd1f71
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h57
-rw-r--r--runtime/gc/collector/concurrent_copying.cc266
-rw-r--r--runtime/gc/collector/concurrent_copying.h11
-rw-r--r--runtime/gc/reference_queue.cc28
-rw-r--r--runtime/gc/space/region_space.cc11
-rw-r--r--runtime/gc/space/region_space.h11
6 files changed, 156 insertions, 228 deletions
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 26f5ad3df5..64fa4344d6 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -28,6 +28,47 @@ namespace art {
namespace gc {
namespace collector {
+inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegionOrImmuneSpace(
+ mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
+ // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
+ // to gray even though the object has already been marked through. This happens if a mutator
+ // thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the
+ // object (changes it from white to gray and back to white), and the thread runs and
+ // incorrectly changes it from white to gray. We need to detect such "false gray" cases and
+ // change the objects back to white at the end of marking.
+ if (kUseBakerReadBarrier) {
+ // Test the bitmap first to reduce the chance of false gray cases.
+ if (bitmap->Test(ref)) {
+ return ref;
+ }
+ }
+ // This may or may not succeed, which is ok because the object may already be gray.
+ bool cas_success = false;
+ if (kUseBakerReadBarrier) {
+ cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
+ }
+ if (bitmap->AtomicTestAndSet(ref)) {
+ // Already marked.
+ if (kUseBakerReadBarrier &&
+ cas_success &&
+ // The object could be white here if a thread gets preempted after a success at the
+ // above AtomicSetReadBarrierPointer, GC has marked through it, and the thread runs up
+ // to this point.
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ // Register a "false-gray" object to change it from gray to white at the end of marking.
+ PushOntoFalseGrayStack(ref);
+ }
+ } else {
+ // Newly marked.
+ if (kUseBakerReadBarrier) {
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+ }
+ PushOntoMarkStack(ref);
+ }
+ return ref;
+}
+
inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
if (from_ref == nullptr) {
return nullptr;
@@ -68,21 +109,7 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
return to_ref;
}
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
- // This may or may not succeed, which is ok.
- if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- mirror::Object* to_ref = from_ref;
- if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
- // Already marked.
- } else {
- // Newly marked.
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(to_ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(to_ref);
- }
- return to_ref;
+ return MarkUnevacFromSpaceRegionOrImmuneSpace(from_ref, region_space_bitmap_);
}
case space::RegionSpace::RegionType::kRegionTypeNone:
return MarkNonMoving(from_ref);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d393f0b1d2..3f8f6284c0 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -165,6 +165,10 @@ void ConcurrentCopying::InitializePhase() {
<< reinterpret_cast<void*>(region_space_->Limit());
}
CheckEmptyMarkStack();
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ CHECK(false_gray_stack_.empty());
+ }
immune_spaces_.Reset();
bytes_moved_.StoreRelaxed(0);
objects_moved_.StoreRelaxed(0);
@@ -247,6 +251,9 @@ class FlipCallback : public Closure {
}
cc->is_marking_ = true;
cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
+ if (kIsDebugBuild) {
+ cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
+ }
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
CHECK(Runtime::Current()->IsAotCompiler());
TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
@@ -314,17 +321,7 @@ class ConcurrentCopyingImmuneSpaceObjVisitor {
DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
<< "Immune space object must be already marked";
}
- // This may or may not succeed, which is ok.
- if (kUseBakerReadBarrier) {
- obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- if (cc_bitmap->AtomicTestAndSet(obj)) {
- // Already marked. Do nothing.
- } else {
- // Newly marked. Set the gray bit and push it onto the mark stack.
- CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
- collector_->PushOntoMarkStack(obj);
- }
+ collector_->MarkUnevacFromSpaceRegionOrImmuneSpace(obj, cc_bitmap);
}
private:
@@ -459,6 +456,9 @@ void ConcurrentCopying::MarkingPhase() {
Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
// Marking is done. Disable marking.
DisableMarking();
+ if (kUseBakerReadBarrier) {
+ ProcessFalseGrayStack();
+ }
CheckEmptyMarkStack();
}
@@ -548,6 +548,32 @@ void ConcurrentCopying::DisableMarking() {
mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
}
+void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
+ CHECK(kUseBakerReadBarrier);
+ DCHECK(ref != nullptr);
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ false_gray_stack_.push_back(ref);
+}
+
+void ConcurrentCopying::ProcessFalseGrayStack() {
+ CHECK(kUseBakerReadBarrier);
+ // Change the objects on the false gray stack from gray to white.
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ for (mirror::Object* obj : false_gray_stack_) {
+ DCHECK(IsMarked(obj));
+ // The object could be white here if a thread got preempted after a success at the
+ // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
+ // still gray), and the thread ran to register it onto the false gray stack.
+ if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
+ ReadBarrier::WhitePtr());
+ DCHECK(success);
+ }
+ }
+ false_gray_stack_.clear();
+}
+
+
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
EmptyCheckpoint check_point(this);
@@ -655,8 +681,8 @@ accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
return heap_->live_stack_.get();
}
-// The following visitors are that used to verify that there's no
-// references to the from-space left after marking.
+// The following visitors are used to verify that there's no references to the from-space left after
+// marking.
class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
public:
explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
@@ -670,20 +696,9 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
}
collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
if (kUseBakerReadBarrier) {
- if (collector_->RegionSpace()->IsInToSpace(ref)) {
- CHECK(ref->GetReadBarrierPointer() == nullptr)
- << "To-space ref " << ref << " " << PrettyTypeOf(ref)
- << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
- } else {
- CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
- (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
- collector_->IsOnAllocStack(ref)))
- << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
- << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
- << " but isn't on the alloc stack (and has white rb_ptr)."
- << " Is it in the non-moving space="
- << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
- }
+ CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ << "Ref " << ref << " " << PrettyTypeOf(ref)
+ << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
}
}
@@ -749,18 +764,8 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
if (kUseBakerReadBarrier) {
- if (collector->RegionSpace()->IsInToSpace(obj)) {
- CHECK(obj->GetReadBarrierPointer() == nullptr)
- << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
- } else {
- CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
- (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
- collector->IsOnAllocStack(obj)))
- << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
- << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
- << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
- << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
- }
+ CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
}
}
@@ -1069,7 +1074,6 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
}
// Scan ref fields.
Scan(to_ref);
- // Mark the gray ref as white or black.
if (kUseBakerReadBarrier) {
DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
<< " " << to_ref << " " << to_ref->GetReadBarrierPointer()
@@ -1079,41 +1083,34 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
!IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
- // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
- // will change it to black or white later in ReferenceQueue::DequeuePendingReference().
+ // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
+ // will change it to white later in ReferenceQueue::DequeuePendingReference().
DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
} else {
- // We may occasionally leave a Reference black or white in the queue if its referent happens to
- // be concurrently marked after the Scan() call above has enqueued the Reference, in which case
- // the above IsInToSpace() evaluates to true and we change the color from gray to black or white
- // here in this else block.
+ // We may occasionally leave a reference white in the queue if its referent happens to be
+ // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
+ // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
+ // else block.
if (kUseBakerReadBarrier) {
- if (region_space_->IsInToSpace(to_ref)) {
- // If to-space, change from gray to white.
- bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
- ReadBarrier::GrayPtr(),
- ReadBarrier::WhitePtr());
- DCHECK(success) << "Must succeed as we won the race.";
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
- } else {
- // If non-moving space/unevac from space, change from gray
- // to black. We can't change gray to white because it's not
- // safe to use CAS if two threads change values in opposite
- // directions (A->B and B->A). So, we change it to black to
- // indicate non-moving objects that have been marked
- // through. Note we'd need to change from black to white
- // later (concurrently).
- bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
- ReadBarrier::GrayPtr(),
- ReadBarrier::BlackPtr());
- DCHECK(success) << "Must succeed as we won the race.";
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
- }
+ bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
+ ReadBarrier::GrayPtr(),
+ ReadBarrier::WhitePtr());
+ DCHECK(success) << "Must succeed as we won the race.";
}
}
#else
DCHECK(!kUseBakerReadBarrier);
#endif
+
+ if (region_space_->IsInUnevacFromSpace(to_ref)) {
+ // Add to the live bytes per unevacuated from space. Note this code is always run by the
+ // GC-running thread (no synchronization required).
+ DCHECK(region_space_bitmap_->Test(to_ref));
+ // Disable the read barrier in SizeOf for performance, which is safe.
+ size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
+ size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+ region_space_->AddLiveBytes(to_ref, alloc_size);
+ }
if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
visitor(to_ref);
@@ -1226,61 +1223,6 @@ void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
-class ConcurrentCopyingClearBlackPtrsVisitor {
- public:
- explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
- : collector_(cc) {}
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
- DCHECK(obj != nullptr);
- DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
- DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
- obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
- DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
-// Clear the black ptrs in non-moving objects back to white.
-void ConcurrentCopying::ClearBlackPtrs() {
- CHECK(kUseBakerReadBarrier);
- TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
- ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
- for (auto& space : heap_->GetContinuousSpaces()) {
- if (space == region_space_) {
- continue;
- }
- accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- if (kVerboseMode) {
- LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
- }
- mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
- }
- space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
- large_object_space->GetMarkBitmap()->VisitMarkedRange(
- reinterpret_cast<uintptr_t>(large_object_space->Begin()),
- reinterpret_cast<uintptr_t>(large_object_space->End()),
- visitor);
- // Objects on the allocation stack?
- if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
- size_t count = GetAllocationStack()->Size();
- auto* it = GetAllocationStack()->Begin();
- auto* end = GetAllocationStack()->End();
- for (size_t i = 0; i < count; ++i, ++it) {
- CHECK_LT(it, end);
- mirror::Object* obj = it->AsMirrorPtr();
- if (obj != nullptr) {
- // Must have been cleared above.
- CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
- }
- }
- }
-}
-
void ConcurrentCopying::ReclaimPhase() {
TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
if (kVerboseMode) {
@@ -1338,20 +1280,12 @@ void ConcurrentCopying::ReclaimPhase() {
}
{
- TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
- ComputeUnevacFromSpaceLiveRatio();
- }
-
- {
TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
region_space_->ClearFromSpace();
}
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- if (kUseBakerReadBarrier) {
- ClearBlackPtrs();
- }
Sweep(false);
SwapBitmaps();
heap_->UnBindBitmaps();
@@ -1373,39 +1307,6 @@ void ConcurrentCopying::ReclaimPhase() {
}
}
-class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
- public:
- explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
- : collector_(cc) {}
- void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
- DCHECK(ref != nullptr);
- DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
- DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
- // Clear the black ptr.
- ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
- }
- size_t obj_size = ref->SizeOf();
- size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
- collector_->region_space_->AddLiveBytes(ref, alloc_size);
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
-// Compute how much live objects are left in regions.
-void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
- region_space_->AssertAllRegionLiveBytesZeroOrCleared();
- ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
- region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
- reinterpret_cast<uintptr_t>(region_space_->Limit()),
- visitor);
-}
-
// Assert the to-space invariant.
void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
mirror::Object* ref) {
@@ -1999,19 +1900,7 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
<< "Immune space object must be already marked";
}
- // This may or may not succeed, which is ok.
- if (kUseBakerReadBarrier) {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- if (cc_bitmap->AtomicTestAndSet(ref)) {
- // Already marked.
- } else {
- // Newly marked.
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(ref);
- }
+ MarkUnevacFromSpaceRegionOrImmuneSpace(ref, cc_bitmap);
} else {
// Use the mark bitmap.
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -2024,13 +1913,13 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
// Already marked.
if (kUseBakerReadBarrier) {
DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
}
} else if (is_los && los_bitmap->Test(ref)) {
// Already marked in LOS.
if (kUseBakerReadBarrier) {
DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
}
} else {
// Not marked.
@@ -2046,15 +1935,34 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
}
} else {
+ // For the baker-style RB, we need to handle 'false-gray' cases. See the
+ // kRegionTypeUnevacFromSpace-case comment in Mark().
+ if (kUseBakerReadBarrier) {
+ // Test the bitmap first to reduce the chance of false gray cases.
+ if ((!is_los && mark_bitmap->Test(ref)) ||
+ (is_los && los_bitmap->Test(ref))) {
+ return ref;
+ }
+ }
// Not marked or on the allocation stack. Try to mark it.
// This may or may not succeed, which is ok.
+ bool cas_success = false;
if (kUseBakerReadBarrier) {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
}
if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
// Already marked.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
+ }
} else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
// Already marked in LOS.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
+ }
} else {
// Newly marked.
if (kUseBakerReadBarrier) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 76315fe7cc..e9ff618ff3 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -160,8 +160,6 @@ class ConcurrentCopying : public GarbageCollector {
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void SweepLargeObjects(bool swap_bitmaps)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
- void ClearBlackPtrs()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
@@ -185,10 +183,19 @@ class ConcurrentCopying : public GarbageCollector {
void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegionOrImmuneSpace(mirror::Object* from_ref,
+ accounting::SpaceBitmap<kObjectAlignment>* bitmap)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void PushOntoFalseGrayStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ void ProcessFalseGrayStack() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
+ std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<accounting::ObjectStack*> revoked_mark_stacks_
GUARDED_BY(mark_stack_lock_);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 03ab9a1a73..6088a43ab1 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -68,31 +68,19 @@ mirror::Reference* ReferenceQueue::DequeuePendingReference() {
Heap* heap = Runtime::Current()->GetHeap();
if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
heap->ConcurrentCopyingCollector()->IsActive()) {
- // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to black or white.
+ // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to white.
// We check IsActive() above because we don't want to do this when the zygote compaction
// collector (SemiSpace) is running.
CHECK(ref != nullptr);
collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
- const bool is_moving = concurrent_copying->RegionSpace()->IsInToSpace(ref);
- if (ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- if (is_moving) {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
- } else {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::BlackPtr());
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr());
- }
+ mirror::Object* rb_ptr = ref->GetReadBarrierPointer();
+ if (rb_ptr == ReadBarrier::GrayPtr()) {
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
+ CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
} else {
- // In ConcurrentCopying::ProcessMarkStackRef() we may leave a black or white Reference in the
- // queue and find it here, which is OK. Check that the color makes sense depending on whether
- // the Reference is moving or not and that the referent has been marked.
- if (is_moving) {
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
- << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer();
- } else {
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr())
- << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer();
- }
+ // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
+ // find it here, which is OK.
+ CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
// The referent could be null if it's cleared by a mutator (Reference.clear()).
if (referent != nullptr) {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 9a2d0c6d37..5d710bf49b 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -216,17 +216,6 @@ void RegionSpace::ClearFromSpace() {
evac_region_ = nullptr;
}
-void RegionSpace::AssertAllRegionLiveBytesZeroOrCleared() {
- if (kIsDebugBuild) {
- MutexLock mu(Thread::Current(), region_lock_);
- for (size_t i = 0; i < num_regions_; ++i) {
- Region* r = &regions_[i];
- size_t live_bytes = r->LiveBytes();
- CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
- }
- }
-}
-
void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
size_t /* failed_alloc_bytes */) {
size_t max_contiguous_allocation = 0;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 14e800595c..4e8dfe820d 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -215,7 +215,16 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
reg->AddLiveBytes(alloc_size);
}
- void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_);
+ void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = &regions_[i];
+ size_t live_bytes = r->LiveBytes();
+ CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
+ }
+ }
+ }
void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_);