summaryrefslogtreecommitdiff
path: root/runtime/base/mutex.cc
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2022-10-13 02:47:24 +0000
committer Hans Boehm <hboehm@google.com> 2022-10-13 02:47:24 +0000
commitebd76406bf5fa74185998bc29f0f27c20fa2e683 (patch)
treebeec1accc089bbbc432a140dfebe28bb7278a909 /runtime/base/mutex.cc
parentfd20a745227aa7cae7a08728bb29e5bfce64ea87 (diff)
Revert "Revert^2 "Thread suspension cleanup and deadlock fix""
This reverts commit fd20a745227aa7cae7a08728bb29e5bfce64ea87. Reason for revert: Lots of libartd failures due to new checkpoint lock level check. Change-Id: I0cf88ff893f8743a9a830a49489807d0921199a3
Diffstat (limited to 'runtime/base/mutex.cc')
-rw-r--r--runtime/base/mutex.cc13
1 files changed, 4 insertions, 9 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 2d6f178487..01d7e73774 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -245,12 +245,11 @@ void BaseMutex::DumpAll(std::ostream& os) {
}
void BaseMutex::CheckSafeToWait(Thread* self) {
- if (!kDebugLocking) {
- return;
- }
if (self == nullptr) {
CheckUnattachedThread(level_);
- } else {
+ return;
+ }
+ if (kDebugLocking) {
CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
<< "Waiting on unacquired mutex: " << name_;
bool bad_mutexes_held = false;
@@ -566,7 +565,6 @@ bool Mutex::IsDumpFrequent(Thread* thread, uint64_t try_times) {
}
}
-template<bool check>
bool Mutex::ExclusiveTryLock(Thread* self) {
DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
@@ -597,7 +595,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
#endif
DCHECK_EQ(GetExclusiveOwnerTid(), 0);
exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
- RegisterAsLocked(self, check);
+ RegisterAsLocked(self);
}
recursion_count_++;
if (kDebugLocking) {
@@ -608,9 +606,6 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
return true;
}
-template bool Mutex::ExclusiveTryLock<false>(Thread* self);
-template bool Mutex::ExclusiveTryLock<true>(Thread* self);
-
bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
// Spin a small number of times, since this affects our ability to respond to suspension
// requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable