summaryrefslogtreecommitdiff
path: root/runtime/lambda
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2015-09-02 16:16:58 -0700
committer Hiroshi Yamauchi <yamauchi@google.com> 2015-09-03 10:45:21 -0700
commitfdbd13c7af91a042eda753e436eeebf0e1937250 (patch)
treec1fb370c9a4a30b9e589802c9c75dcc4919fc6e9 /runtime/lambda
parentfe3879e6011f629d0dd6b04fab00b9496bd4ea08 (diff)
Some fixes for the CC collector.
- Remove a DCHECK in DisableMarkingCheckpoint, which caused occasional (false) failures. - Check the thread-local GetWeakRefAccessEnabled in boxed lambdas weak access. - Add missing BroadcastForNewAllocationRecords and BroadcastForNewWeakBoxedLambdas. The lack of the former caused occasional deadlocks in the ddmc test. - Remove the 'ensure system weaks disallowed' calls, which weren't useful and dead. Bug: 12687968 Change-Id: I33850c8d12e6e1a3aed1c2bb18eba263cbab76e8
Diffstat (limited to 'runtime/lambda')
-rw-r--r--runtime/lambda/box_table.cc10
-rw-r--r--runtime/lambda/box_table.h4
2 files changed, 9 insertions, 5 deletions
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 64a6076aea..26575fd995 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -139,7 +139,8 @@ BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const
void BoxTable::BlockUntilWeaksAllowed() {
Thread* self = Thread::Current();
- while (UNLIKELY(allow_new_weaks_ == false)) {
+ while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
+ (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
}
}
@@ -184,6 +185,7 @@ void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
}
void BoxTable::DisallowNewWeakBoxedLambdas() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -191,6 +193,7 @@ void BoxTable::DisallowNewWeakBoxedLambdas() {
}
void BoxTable::AllowNewWeakBoxedLambdas() {
+ CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -198,10 +201,11 @@ void BoxTable::AllowNewWeakBoxedLambdas() {
new_weaks_condition_.Broadcast(self);
}
-void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() {
+void BoxTable::BroadcastForNewWeakBoxedLambdas() {
+ CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::lambda_table_lock_);
- CHECK_NE(allow_new_weaks_, false);
+ new_weaks_condition_.Broadcast(self);
}
bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 312d811b9b..9ffda6658f 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -67,8 +67,8 @@ class BoxTable FINAL {
void AllowNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
- // GC callback: Verify that the state is now blocking anyone from touching the map.
- void EnsureNewWeakBoxedLambdasDisallowed()
+ // GC callback: Unblock any readers who have been queued waiting to touch the map.
+ void BroadcastForNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
BoxTable();