summaryrefslogtreecommitdiff
path: root/runtime/base/mutex.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/base/mutex.cc')
-rw-r--r--runtime/base/mutex.cc13
1 files changed, 4 insertions, 9 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 2d6f178487..01d7e73774 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -245,12 +245,11 @@ void BaseMutex::DumpAll(std::ostream& os) {
}
void BaseMutex::CheckSafeToWait(Thread* self) {
- if (!kDebugLocking) {
- return;
- }
if (self == nullptr) {
CheckUnattachedThread(level_);
- } else {
+ return;
+ }
+ if (kDebugLocking) {
CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
<< "Waiting on unacquired mutex: " << name_;
bool bad_mutexes_held = false;
@@ -566,7 +565,6 @@ bool Mutex::IsDumpFrequent(Thread* thread, uint64_t try_times) {
}
}
-template<bool check>
bool Mutex::ExclusiveTryLock(Thread* self) {
DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
@@ -597,7 +595,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
#endif
DCHECK_EQ(GetExclusiveOwnerTid(), 0);
exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
- RegisterAsLocked(self, check);
+ RegisterAsLocked(self);
}
recursion_count_++;
if (kDebugLocking) {
@@ -608,9 +606,6 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
return true;
}
-template bool Mutex::ExclusiveTryLock<false>(Thread* self);
-template bool Mutex::ExclusiveTryLock<true>(Thread* self);
-
bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
// Spin a small number of times, since this affects our ability to respond to suspension
// requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable