Some fixes for the CC collector.
- Remove a DCHECK in DisableMarkingCheckpoint, which caused
occasional (false) failures.
- Check the thread-local GetWeakRefAccessEnabled in boxed lambdas weak
access.
- Add missing BroadcastForNewAllocationRecords and
BroadcastForNewWeakBoxedLambdas. The lack of the former caused
occasional deadlocks in the ddmc test.
- Remove the 'ensure system weaks disallowed' calls, which weren't
useful and dead.
Bug: 12687968
Change-Id: I33850c8d12e6e1a3aed1c2bb18eba263cbab76e8
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 16c9354..369e408 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -167,14 +167,21 @@
}
void AllocRecordObjectMap::AllowNewAllocationRecords() {
+ CHECK(!kUseReadBarrier);
allow_new_record_ = true;
new_record_condition_.Broadcast(Thread::Current());
}
void AllocRecordObjectMap::DisallowNewAllocationRecords() {
+ CHECK(!kUseReadBarrier);
allow_new_record_ = false;
}
+void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
+ CHECK(kUseReadBarrier);
+ new_record_condition_.Broadcast(Thread::Current());
+}
+
struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 0a4f532..ffdfd31 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -277,6 +277,9 @@
void AllowNewAllocationRecords()
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
+ void BroadcastForNewAllocationRecords()
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
EntryList::iterator Begin()
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index a5bc60a..57af959 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -499,7 +499,8 @@
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
// Disable the thread-local is_gc_marking flag.
- DCHECK(thread->GetIsGcMarking());
+ // Note a thread that has just started right before this checkpoint may have already this flag
+ // set to false, which is ok.
thread->SetIsGcMarking(false);
// If thread is a running mutator, then act on behalf of the garbage collector.
// See the code in ThreadList::RunCheckpoint.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4bc44d3..961b80f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3844,6 +3844,7 @@
}
void Heap::AllowNewAllocationRecords() const {
+ CHECK(!kUseReadBarrier);
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (IsAllocTrackingEnabled()) {
@@ -3853,6 +3854,7 @@
}
void Heap::DisallowNewAllocationRecords() const {
+ CHECK(!kUseReadBarrier);
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (IsAllocTrackingEnabled()) {
@@ -3861,6 +3863,16 @@
}
}
+void Heap::BroadcastForNewAllocationRecords() const {
+ CHECK(kUseReadBarrier);
+ if (IsAllocTrackingEnabled()) {
+ MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+ if (IsAllocTrackingEnabled()) {
+ GetAllocationRecords()->BroadcastForNewAllocationRecords();
+ }
+ }
+}
+
// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
class StackCrawlState {
public:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 8bffe5e..d0d0be3 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -753,6 +753,10 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
+ void BroadcastForNewAllocationRecords() const
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
+
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
// Create a new alloc space and compact default alloc space to it.
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2be570a..0235067 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -212,13 +212,6 @@
return nullptr;
}
-void InternTable::EnsureNewWeakInternsDisallowed() {
- // Lock and unlock once to ensure that no threads are still in the
- // middle of adding new interns.
- MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
-}
-
void InternTable::BroadcastForNewInterns() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
@@ -460,6 +453,7 @@
}
void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
+ CHECK(!kUseReadBarrier);
weak_root_state_ = new_state;
if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
weak_intern_condition_.Broadcast(Thread::Current());
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index ae9f7a7..24c5af9 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -96,11 +96,7 @@
void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
- void DisallowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- void AllowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- void EnsureNewInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- void EnsureNewWeakInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
// Adds all of the resolved image strings from the image space into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls.
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 8060e3d..b1c5cf0 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -541,6 +541,7 @@
}
void JavaVMExt::DisallowNewWeakGlobals() {
+ CHECK(!kUseReadBarrier);
Thread* const self = Thread::Current();
MutexLock mu(self, weak_globals_lock_);
// DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
@@ -551,19 +552,13 @@
}
void JavaVMExt::AllowNewWeakGlobals() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, weak_globals_lock_);
allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
weak_globals_add_condition_.Broadcast(self);
}
-void JavaVMExt::EnsureNewWeakGlobalsDisallowed() {
- // Lock and unlock once to ensure that no threads are still in the
- // middle of adding new weak globals.
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- CHECK(!allow_accessing_weak_globals_.LoadSequentiallyConsistent());
-}
-
void JavaVMExt::BroadcastForNewWeakGlobals() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index d68a85f..87430c8 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -108,8 +108,6 @@
void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void EnsureNewWeakGlobalsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 64a6076..26575fd 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -139,7 +139,8 @@
void BoxTable::BlockUntilWeaksAllowed() {
Thread* self = Thread::Current();
- while (UNLIKELY(allow_new_weaks_ == false)) {
+ while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
+ (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
}
}
@@ -184,6 +185,7 @@
}
void BoxTable::DisallowNewWeakBoxedLambdas() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -191,6 +193,7 @@
}
void BoxTable::AllowNewWeakBoxedLambdas() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -198,10 +201,11 @@
new_weaks_condition_.Broadcast(self);
}
-void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() {
+void BoxTable::BroadcastForNewWeakBoxedLambdas() {
+ CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
- CHECK_NE(allow_new_weaks_, false);
+ new_weaks_condition_.Broadcast(self);
}
bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 312d811..9ffda66 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -67,8 +67,8 @@
void AllowNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
- // GC callback: Verify that the state is now blocking anyone from touching the map.
- void EnsureNewWeakBoxedLambdasDisallowed()
+ // GC callback: Unblock any readers who have been queued waiting to touch the map.
+ void BroadcastForNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
BoxTable();
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index da6ee25..95fcb67 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1126,24 +1126,19 @@
}
void MonitorList::DisallowNewMonitors() {
+ CHECK(!kUseReadBarrier);
MutexLock mu(Thread::Current(), monitor_list_lock_);
allow_new_monitors_ = false;
}
void MonitorList::AllowNewMonitors() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
allow_new_monitors_ = true;
monitor_add_condition_.Broadcast(self);
}
-void MonitorList::EnsureNewMonitorsDisallowed() {
- // Lock and unlock once to ensure that no threads are still in the
- // middle of adding new monitors.
- MutexLock mu(Thread::Current(), monitor_list_lock_);
- CHECK(!allow_new_monitors_);
-}
-
void MonitorList::BroadcastForNewMonitors() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 346e866..2e7dbc1 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -295,7 +295,6 @@
REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
- void EnsureNewMonitorsDisallowed() REQUIRES(!monitor_list_lock_);
void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
// Returns how many monitors were deflated.
size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 25bb827..4797564 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1515,6 +1515,7 @@
}
void Runtime::DisallowNewSystemWeaks() {
+ CHECK(!kUseReadBarrier);
monitor_list_->DisallowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
@@ -1523,6 +1524,7 @@
}
void Runtime::AllowNewSystemWeaks() {
+ CHECK(!kUseReadBarrier);
monitor_list_->AllowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
@@ -1530,20 +1532,15 @@
lambda_box_table_->AllowNewWeakBoxedLambdas();
}
-void Runtime::EnsureNewSystemWeaksDisallowed() {
- // Lock and unlock the system weak locks once to ensure that no
- // threads are still in the middle of adding new system weaks.
- monitor_list_->EnsureNewMonitorsDisallowed();
- intern_table_->EnsureNewWeakInternsDisallowed();
- java_vm_->EnsureNewWeakGlobalsDisallowed();
- lambda_box_table_->EnsureNewWeakBoxedLambdasDisallowed();
-}
-
void Runtime::BroadcastForNewSystemWeaks() {
+ // This is used for the read barrier case that uses the thread-local
+ // Thread::GetWeakRefAccessEnabled() flag.
CHECK(kUseReadBarrier);
monitor_list_->BroadcastForNewMonitors();
intern_table_->BroadcastForNewInterns();
java_vm_->BroadcastForNewWeakGlobals();
+ heap_->BroadcastForNewAllocationRecords();
+ lambda_box_table_->BroadcastForNewWeakBoxedLambdas();
}
void Runtime::SetInstructionSet(InstructionSet instruction_set) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index bd21db1..a35eac1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -303,7 +303,6 @@
void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
- void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If