summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2015-09-02 16:16:58 -0700
committer Hiroshi Yamauchi <yamauchi@google.com> 2015-09-03 10:45:21 -0700
commitfdbd13c7af91a042eda753e436eeebf0e1937250 (patch)
treec1fb370c9a4a30b9e589802c9c75dcc4919fc6e9
parentfe3879e6011f629d0dd6b04fab00b9496bd4ea08 (diff)
Some fixes for the CC collector.
- Remove a DCHECK in DisableMarkingCheckpoint, which caused occasional (false) failures. - Check the thread-local GetWeakRefAccessEnabled in boxed lambdas weak access. - Add missing BroadcastForNewAllocationRecords and BroadcastForNewWeakBoxedLambdas. The lack of the former caused occasional deadlocks in the ddmc test. - Remove the 'ensure system weaks disallowed' calls, which weren't useful and dead. Bug: 12687968 Change-Id: I33850c8d12e6e1a3aed1c2bb18eba263cbab76e8
-rw-r--r--runtime/gc/allocation_record.cc7
-rw-r--r--runtime/gc/allocation_record.h3
-rw-r--r--runtime/gc/collector/concurrent_copying.cc3
-rw-r--r--runtime/gc/heap.cc12
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/intern_table.cc8
-rw-r--r--runtime/intern_table.h4
-rw-r--r--runtime/java_vm_ext.cc9
-rw-r--r--runtime/java_vm_ext.h2
-rw-r--r--runtime/lambda/box_table.cc10
-rw-r--r--runtime/lambda/box_table.h4
-rw-r--r--runtime/monitor.cc9
-rw-r--r--runtime/monitor.h1
-rw-r--r--runtime/runtime.cc15
-rw-r--r--runtime/runtime.h1
15 files changed, 48 insertions, 44 deletions
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 16c9354137..369e4083a2 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -167,14 +167,21 @@ void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
}
void AllocRecordObjectMap::AllowNewAllocationRecords() {
+ CHECK(!kUseReadBarrier);
allow_new_record_ = true;
new_record_condition_.Broadcast(Thread::Current());
}
void AllocRecordObjectMap::DisallowNewAllocationRecords() {
+ CHECK(!kUseReadBarrier);
allow_new_record_ = false;
}
+void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
+ CHECK(kUseReadBarrier);
+ new_record_condition_.Broadcast(Thread::Current());
+}
+
struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 0a4f53226d..ffdfd31ce6 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -277,6 +277,9 @@ class AllocRecordObjectMap {
void AllowNewAllocationRecords()
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
+ void BroadcastForNewAllocationRecords()
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
EntryList::iterator Begin()
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index a5bc60a912..57af95940d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -499,7 +499,8 @@ class DisableMarkingCheckpoint : public Closure {
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
// Disable the thread-local is_gc_marking flag.
- DCHECK(thread->GetIsGcMarking());
+ // Note a thread that has just started right before this checkpoint may have already this flag
+ // set to false, which is ok.
thread->SetIsGcMarking(false);
// If thread is a running mutator, then act on behalf of the garbage collector.
// See the code in ThreadList::RunCheckpoint.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4bc44d3fd9..961b80f4ae 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3844,6 +3844,7 @@ void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
}
void Heap::AllowNewAllocationRecords() const {
+ CHECK(!kUseReadBarrier);
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (IsAllocTrackingEnabled()) {
@@ -3853,6 +3854,7 @@ void Heap::AllowNewAllocationRecords() const {
}
void Heap::DisallowNewAllocationRecords() const {
+ CHECK(!kUseReadBarrier);
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (IsAllocTrackingEnabled()) {
@@ -3861,6 +3863,16 @@ void Heap::DisallowNewAllocationRecords() const {
}
}
+void Heap::BroadcastForNewAllocationRecords() const {
+ CHECK(kUseReadBarrier);
+ if (IsAllocTrackingEnabled()) {
+ MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+ if (IsAllocTrackingEnabled()) {
+ GetAllocationRecords()->BroadcastForNewAllocationRecords();
+ }
+ }
+}
+
// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
class StackCrawlState {
public:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 8bffe5e6e9..d0d0be3826 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -753,6 +753,10 @@ class Heap {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
+ void BroadcastForNewAllocationRecords() const
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
+
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
// Create a new alloc space and compact default alloc space to it.
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2be570ac85..02350679e9 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -212,13 +212,6 @@ mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
return nullptr;
}
-void InternTable::EnsureNewWeakInternsDisallowed() {
- // Lock and unlock once to ensure that no threads are still in the
- // middle of adding new interns.
- MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
-}
-
void InternTable::BroadcastForNewInterns() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
@@ -460,6 +453,7 @@ void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
}
void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
+ CHECK(!kUseReadBarrier);
weak_root_state_ = new_state;
if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
weak_intern_condition_.Broadcast(Thread::Current());
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index ae9f7a7acd..24c5af938c 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -96,11 +96,7 @@ class InternTable {
void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
- void DisallowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- void AllowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- void EnsureNewInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- void EnsureNewWeakInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
// Adds all of the resolved image strings from the image space into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls.
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 8060e3dff0..b1c5cf0465 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -541,6 +541,7 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
}
void JavaVMExt::DisallowNewWeakGlobals() {
+ CHECK(!kUseReadBarrier);
Thread* const self = Thread::Current();
MutexLock mu(self, weak_globals_lock_);
// DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
@@ -551,19 +552,13 @@ void JavaVMExt::DisallowNewWeakGlobals() {
}
void JavaVMExt::AllowNewWeakGlobals() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, weak_globals_lock_);
allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
weak_globals_add_condition_.Broadcast(self);
}
-void JavaVMExt::EnsureNewWeakGlobalsDisallowed() {
- // Lock and unlock once to ensure that no threads are still in the
- // middle of adding new weak globals.
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- CHECK(!allow_accessing_weak_globals_.LoadSequentiallyConsistent());
-}
-
void JavaVMExt::BroadcastForNewWeakGlobals() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index d68a85f230..87430c800b 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -108,8 +108,6 @@ class JavaVMExt : public JavaVM {
void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void EnsureNewWeakGlobalsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 64a6076aea..26575fd995 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -139,7 +139,8 @@ BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const
void BoxTable::BlockUntilWeaksAllowed() {
Thread* self = Thread::Current();
- while (UNLIKELY(allow_new_weaks_ == false)) {
+ while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
+ (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
}
}
@@ -184,6 +185,7 @@ void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
}
void BoxTable::DisallowNewWeakBoxedLambdas() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -191,6 +193,7 @@ void BoxTable::DisallowNewWeakBoxedLambdas() {
}
void BoxTable::AllowNewWeakBoxedLambdas() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -198,10 +201,11 @@ void BoxTable::AllowNewWeakBoxedLambdas() {
new_weaks_condition_.Broadcast(self);
}
-void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() {
+void BoxTable::BroadcastForNewWeakBoxedLambdas() {
+ CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
- CHECK_NE(allow_new_weaks_, false);
+ new_weaks_condition_.Broadcast(self);
}
bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 312d811b9b..9ffda6658f 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -67,8 +67,8 @@ class BoxTable FINAL {
void AllowNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
- // GC callback: Verify that the state is now blocking anyone from touching the map.
- void EnsureNewWeakBoxedLambdasDisallowed()
+ // GC callback: Unblock any readers who have been queued waiting to touch the map.
+ void BroadcastForNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
BoxTable();
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index da6ee259b5..95fcb67ea1 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1126,24 +1126,19 @@ MonitorList::~MonitorList() {
}
void MonitorList::DisallowNewMonitors() {
+ CHECK(!kUseReadBarrier);
MutexLock mu(Thread::Current(), monitor_list_lock_);
allow_new_monitors_ = false;
}
void MonitorList::AllowNewMonitors() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
allow_new_monitors_ = true;
monitor_add_condition_.Broadcast(self);
}
-void MonitorList::EnsureNewMonitorsDisallowed() {
- // Lock and unlock once to ensure that no threads are still in the
- // middle of adding new monitors.
- MutexLock mu(Thread::Current(), monitor_list_lock_);
- CHECK(!allow_new_monitors_);
-}
-
void MonitorList::BroadcastForNewMonitors() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 346e8662b2..2e7dbc1d5e 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -295,7 +295,6 @@ class MonitorList {
REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
- void EnsureNewMonitorsDisallowed() REQUIRES(!monitor_list_lock_);
void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
// Returns how many monitors were deflated.
size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 25bb827b58..4797564237 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1515,6 +1515,7 @@ ArtMethod* Runtime::CreateCalleeSaveMethod() {
}
void Runtime::DisallowNewSystemWeaks() {
+ CHECK(!kUseReadBarrier);
monitor_list_->DisallowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
@@ -1523,6 +1524,7 @@ void Runtime::DisallowNewSystemWeaks() {
}
void Runtime::AllowNewSystemWeaks() {
+ CHECK(!kUseReadBarrier);
monitor_list_->AllowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
@@ -1530,20 +1532,15 @@ void Runtime::AllowNewSystemWeaks() {
lambda_box_table_->AllowNewWeakBoxedLambdas();
}
-void Runtime::EnsureNewSystemWeaksDisallowed() {
- // Lock and unlock the system weak locks once to ensure that no
- // threads are still in the middle of adding new system weaks.
- monitor_list_->EnsureNewMonitorsDisallowed();
- intern_table_->EnsureNewWeakInternsDisallowed();
- java_vm_->EnsureNewWeakGlobalsDisallowed();
- lambda_box_table_->EnsureNewWeakBoxedLambdasDisallowed();
-}
-
void Runtime::BroadcastForNewSystemWeaks() {
+ // This is used for the read barrier case that uses the thread-local
+ // Thread::GetWeakRefAccessEnabled() flag.
CHECK(kUseReadBarrier);
monitor_list_->BroadcastForNewMonitors();
intern_table_->BroadcastForNewInterns();
java_vm_->BroadcastForNewWeakGlobals();
+ heap_->BroadcastForNewAllocationRecords();
+ lambda_box_table_->BroadcastForNewWeakBoxedLambdas();
}
void Runtime::SetInstructionSet(InstructionSet instruction_set) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index bd21db16c4..a35eac1af8 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -303,7 +303,6 @@ class Runtime {
void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
- void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If