summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h4
-rw-r--r--runtime/gc/collector/concurrent_copying.cc8
-rw-r--r--runtime/gc/collector/concurrent_copying.h2
3 files changed, 8 insertions, 6 deletions
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 5a042ccbb8..3095f9f679 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -38,7 +38,7 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
accounting::ContinuousSpaceBitmap* bitmap) {
if (kEnableGenerationalConcurrentCopyingCollection
&& young_gen_
- && !done_scanning_.load(std::memory_order_relaxed)) {
+ && !done_scanning_.load(std::memory_order_acquire)) {
// Everything in the unevac space should be marked for generational CC except for large objects.
DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " "
<< ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
@@ -244,7 +244,7 @@ inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_re
DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
if (kEnableGenerationalConcurrentCopyingCollection
&& young_gen_
- && !done_scanning_.load(std::memory_order_relaxed)) {
+ && !done_scanning_.load(std::memory_order_acquire)) {
return from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState();
}
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index a9ba54a4ff..53fd1f42cf 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -367,7 +367,7 @@ void ConcurrentCopying::InitializePhase() {
}
}
if (kEnableGenerationalConcurrentCopyingCollection) {
- done_scanning_.store(false, std::memory_order_relaxed);
+ done_scanning_.store(false, std::memory_order_release);
}
BindBitmaps();
if (kVerboseMode) {
@@ -878,7 +878,7 @@ void ConcurrentCopying::MarkingPhase() {
}
TimingLogger::ScopedTiming split2("ScanCardsForSpace", GetTimings());
WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
- CHECK_EQ(done_scanning_.load(std::memory_order_relaxed), false);
+ CHECK(!done_scanning_.load(std::memory_order_relaxed));
if (kIsDebugBuild) {
// Leave some time for mutators to race ahead to try and find races between the GC card
// scanning and mutators reading references.
@@ -912,7 +912,7 @@ void ConcurrentCopying::MarkingPhase() {
accounting::CardTable::kCardDirty - 1);
}
// Done scanning unevac space.
- done_scanning_.store(true, std::memory_order_seq_cst);
+ done_scanning_.store(true, std::memory_order_release);
if (kVerboseMode) {
LOG(INFO) << "GC end of ScanCardsForSpace";
}
@@ -2893,7 +2893,7 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
// The sticky-bit CC collector is only compatible with Baker-style read barriers.
DCHECK(kUseBakerReadBarrier);
// Not done scanning, use AtomicSetReadBarrierPointer.
- if (!done_scanning_) {
+ if (!done_scanning_.load(std::memory_order_acquire)) {
// Since the mark bitmap is still filled in from last GC, we can not use that or else the
// mutator may see references to the from space. Instead, use the Baker pointer itself as
// the mark bit.
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index e1ff97a941..0ebe6f0c25 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -360,6 +360,8 @@ class ConcurrentCopying : public GarbageCollector {
// Generational "sticky", only trace through dirty objects in region space.
const bool young_gen_;
+ // If true, the GC thread is done scanning marked objects on dirty and aged
+ // card (see ConcurrentCopying::MarkingPhase).
Atomic<bool> done_scanning_;
// The skipped blocks are memory blocks/chucks that were copies of