summaryrefslogtreecommitdiff
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/allocation_record.cc4
-rw-r--r--runtime/gc/allocation_record.h1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc32
-rw-r--r--runtime/gc/collector/mark_sweep.cc3
-rw-r--r--runtime/gc/collector/mark_sweep.h2
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc13
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h6
-rw-r--r--runtime/gc/heap.cc1
-rw-r--r--runtime/gc/heap.h1
-rw-r--r--runtime/gc/reference_processor.cc7
-rw-r--r--runtime/gc/system_weak.h14
-rw-r--r--runtime/gc/system_weak_test.cc10
12 files changed, 53 insertions, 41 deletions
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index d921900933..e18a955251 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -181,7 +181,6 @@ void AllocRecordObjectMap::DisallowNewAllocationRecords() {
}
void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
- CHECK(kUseReadBarrier);
new_record_condition_.Broadcast(Thread::Current());
}
@@ -291,6 +290,9 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self,
// Wait for GC's sweeping to complete and allow new records
while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
new_record_condition_.WaitHoldingLocks(self);
}
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index c8b2b89702..90cff6a8c4 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -261,7 +261,6 @@ class AllocRecordObjectMap {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords()
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 2e72adadd6..8353b26a9a 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -514,26 +514,6 @@ void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
}
-class EmptyCheckpoint : public Closure {
- public:
- explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
- : concurrent_copying_(concurrent_copying) {
- }
-
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
- // Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
- CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
- << thread->GetState() << " thread " << thread << " self " << self;
- // If thread is a running mutator, then act on behalf of the garbage collector.
- // See the code in ThreadList::RunCheckpoint.
- concurrent_copying_->GetBarrier().Pass(self);
- }
-
- private:
- ConcurrentCopying* const concurrent_copying_;
-};
-
// Used to visit objects in the immune spaces.
inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
DCHECK(obj != nullptr);
@@ -835,10 +815,10 @@ void ConcurrentCopying::ProcessFalseGrayStack() {
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
- EmptyCheckpoint check_point(this);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
- gc_barrier_->Init(self, 0);
- size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+ Barrier* barrier = thread_list->EmptyCheckpointBarrier();
+ barrier->Init(self, 0);
+ size_t barrier_count = thread_list->RunEmptyCheckpoint();
// If there are no threads to wait which implys that all the checkpoint functions are finished,
// then no need to release the mutator lock.
if (barrier_count == 0) {
@@ -848,7 +828,7 @@ void ConcurrentCopying::IssueEmptyCheckpoint() {
Locks::mutator_lock_->SharedUnlock(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
- gc_barrier_->Increment(self, barrier_count);
+ barrier->Increment(self, barrier_count);
}
Locks::mutator_lock_->SharedLock(self);
}
@@ -1253,6 +1233,10 @@ bool ConcurrentCopying::ProcessMarkStackOnce() {
}
gc_mark_stack_->Reset();
} else if (mark_stack_mode == kMarkStackModeShared) {
+ // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
+ // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
+ // disabled at this point.
+ IssueEmptyCheckpoint();
// Process the shared GC mark stack with a lock.
{
MutexLock mu(self, mark_stack_lock_);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7b73e43ad2..673a97e82d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -608,8 +608,7 @@ void MarkSweep::MarkNonThreadRoots() {
void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Visit all runtime roots and clear dirty flags.
- Runtime::Current()->VisitConcurrentRoots(
- this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
+ Runtime::Current()->VisitConcurrentRoots(this, flags);
}
class MarkSweep::DelayReferenceReferentVisitor {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 19c2e9aaba..a94cb279cf 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -98,7 +98,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void MarkConcurrentRoots(VisitRootFlags flags)
+ virtual void MarkConcurrentRoots(VisitRootFlags flags)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index bb7e854ea1..a2dbe3f7a0 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -56,6 +56,19 @@ void StickyMarkSweep::MarkReachableObjects() {
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
}
+void StickyMarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ // Visit all runtime roots and clear dirty flags including class loader. This is done to prevent
+ // incorrect class unloading since the GC does not card mark when storing store the class during
+ // object allocation. Doing this for each allocation would be slow.
+ // Since the card is not dirty, it means the object may not get scanned. This can cause class
+ // unloading to occur even though the class and class loader are reachable through the object's
+ // class.
+ Runtime::Current()->VisitConcurrentRoots(
+ this,
+ static_cast<VisitRootFlags>(flags | kVisitRootFlagClassLoader));
+}
+
void StickyMarkSweep::Sweep(bool swap_bitmaps ATTRIBUTE_UNUSED) {
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 100ca64ee4..45f912f63a 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -33,6 +33,12 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
~StickyMarkSweep() {}
+ virtual void MarkConcurrentRoots(VisitRootFlags flags)
+ OVERRIDE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 19760afed7..ddc38526bd 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4065,7 +4065,6 @@ void Heap::DisallowNewAllocationRecords() const {
}
void Heap::BroadcastForNewAllocationRecords() const {
- CHECK(kUseReadBarrier);
// Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
// be set to false while some threads are waiting for system weak access in
// AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e8eb69e35c..0c671d269d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -797,7 +797,6 @@ class Heap {
REQUIRES(!Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords() const
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 798ecd3d87..2cde7d5731 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -55,7 +55,6 @@ void ReferenceProcessor::DisableSlowPath(Thread* self) {
}
void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
- CHECK(kUseReadBarrier);
MutexLock mu(self, *Locks::reference_processor_lock_);
condition_.Broadcast(self);
}
@@ -99,6 +98,9 @@ ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
}
}
}
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
return reference->GetReferent();
@@ -270,6 +272,9 @@ bool ReferenceProcessor::MakeCircularListIfUnenqueued(
// Wait untul we are done processing reference.
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
// At this point, since the sentinel of the reference is live, it is guaranteed to not be
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 887059b57f..e5cddfc6f9 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -30,7 +30,8 @@ class AbstractSystemWeakHolder {
virtual void Allow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void Disallow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual void Broadcast() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ // See Runtime::BroadcastForNewSystemWeaks for the broadcast_for_checkpoint definition.
+ virtual void Broadcast(bool broadcast_for_checkpoint) = 0;
virtual void Sweep(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
@@ -61,10 +62,8 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
allow_new_system_weak_ = false;
}
- void Broadcast() OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_)
+ void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
REQUIRES(!allow_disallow_lock_) {
- CHECK(kUseReadBarrier);
MutexLock mu(Thread::Current(), allow_disallow_lock_);
new_weak_condition_.Broadcast(Thread::Current());
}
@@ -75,10 +74,15 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
}
protected:
- void Wait(Thread* self) REQUIRES_SHARED(allow_disallow_lock_) {
+ void Wait(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
// Wait for GC's sweeping to complete and allow new records
while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
new_weak_condition_.WaitHoldingLocks(self);
}
}
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index af8a444903..9b601c0753 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -58,12 +58,14 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_++;
}
- void Broadcast() OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_)
+ void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
REQUIRES(!allow_disallow_lock_) {
- SystemWeakHolder::Broadcast();
+ SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
- allow_count_++;
+ if (!broadcast_for_checkpoint) {
+ // Don't count the broadcasts for running checkpoints.
+ allow_count_++;
+ }
}
void Sweep(IsMarkedVisitor* visitor) OVERRIDE