Implement monitors.
Change-Id: Ifc7a801f9cbcdfbc1e1af5c905261dfadaa60f45
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 1ab48f6..deb1df0 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -130,6 +130,7 @@
src/stub_arm.cc \
src/stub_x86.cc \
src/sun_misc_Unsafe.cc \
+ src/sync.cc \
src/thread.cc \
src/thread_list.cc \
src/utf.cc \
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 232390a..b9c6ed8 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -16,10 +16,10 @@
#include "heap.h"
#include "intern_table.h"
#include "logging.h"
-#include "monitor.h"
#include "object.h"
#include "runtime.h"
#include "space.h"
+#include "sync.h"
#include "thread.h"
#include "utils.h"
@@ -56,6 +56,35 @@
"[Ljava/lang/StackTraceElement;",
};
+class ObjectLock {
+ public:
+ explicit ObjectLock(Object* object) : self_(Thread::Current()), obj_(object) {
+ CHECK(object != NULL);
+ obj_->MonitorEnter(self_);
+ }
+
+ ~ObjectLock() {
+ obj_->MonitorExit(self_);
+ }
+
+ void Wait() {
+ return Monitor::Wait(self_, obj_, 0, 0, false);
+ }
+
+ void Notify() {
+ obj_->Notify();
+ }
+
+ void NotifyAll() {
+ obj_->NotifyAll();
+ }
+
+ private:
+ Thread* self_;
+ Object* obj_;
+ DISALLOW_COPY_AND_ASSIGN(ObjectLock);
+};
+
ClassLinker* ClassLinker::Create(const std::vector<const DexFile*>& boot_class_path,
const std::vector<const DexFile*>& class_path,
InternTable* intern_table, bool image) {
@@ -1387,9 +1416,10 @@
Class* super_class = klass->GetSuperClass();
if (super_class->GetStatus() != Class::kStatusInitialized) {
CHECK(!super_class->IsInterface());
- klass->MonitorExit();
+ Thread* self = Thread::Current();
+ klass->MonitorEnter(self);
bool super_initialized = InitializeClass(super_class);
- klass->MonitorEnter();
+ klass->MonitorExit(self);
// TODO: check for a pending exception
if (!super_initialized) {
klass->SetStatus(Class::kStatusError);
@@ -1407,10 +1437,11 @@
return true;
}
- c->MonitorExit();
+ Thread* self = Thread::Current();
+ c->MonitorEnter(self);
InitializeClass(c);
- c->MonitorEnter();
- return !Thread::Current()->IsExceptionPending();
+ c->MonitorExit(self);
+ return !self->IsExceptionPending();
}
StaticStorageBase* ClassLinker::InitializeStaticStorageFromCode(uint32_t type_idx,
diff --git a/src/compiler/Dalvik.h b/src/compiler/Dalvik.h
index f4f94e2..b2c62bf 100644
--- a/src/compiler/Dalvik.h
+++ b/src/compiler/Dalvik.h
@@ -30,6 +30,7 @@
#include "class_linker.h"
#include "compiler.h"
#include "dex_cache.h"
+#include "sync.h"
#include "utils.h"
// From Common.h
diff --git a/src/compiler/codegen/arm/Thumb2/Gen.cc b/src/compiler/codegen/arm/Thumb2/Gen.cc
index 2f63085..fbdfb2e 100644
--- a/src/compiler/codegen/arm/Thumb2/Gen.cc
+++ b/src/compiler/codegen/arm/Thumb2/Gen.cc
@@ -922,7 +922,7 @@
ArmLIR* hopBranch;
oatFlushAllRegs(cUnit);
- assert(art::Monitor::kLwShapeThin == 0);
+ DCHECK_EQ(LW_SHAPE_THIN, 0);
loadValueDirectFixed(cUnit, rlSrc, r1); // Get obj
oatLockCallTemps(cUnit); // Prepare for explicit register usage
genNullCheck(cUnit, rlSrc.sRegLow, r1, mir);
@@ -930,12 +930,10 @@
newLIR3(cUnit, kThumb2Ldrex, r2, r1,
Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
// Align owner
- opRegImm(cUnit, kOpLsl, r3, art::Monitor::kLwLockOwnerShift);
+ opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT);
// Is lock unheld on lock or held by us (==threadId) on unlock?
- newLIR4(cUnit, kThumb2Bfi, r3, r2, 0, art::Monitor::kLwLockOwnerShift
- - 1);
- newLIR3(cUnit, kThumb2Bfc, r2, art::Monitor::kLwHashStateShift,
- art::Monitor::kLwLockOwnerShift - 1);
+ newLIR4(cUnit, kThumb2Bfi, r3, r2, 0, LW_LOCK_OWNER_SHIFT - 1);
+ newLIR3(cUnit, kThumb2Bfc, r2, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
hopBranch = newLIR2(cUnit, kThumb2Cbnz, r2, 0);
newLIR4(cUnit, kThumb2Strex, r2, r3, r1,
Object::MonitorOffset().Int32Value() >> 2);
@@ -972,7 +970,7 @@
ArmLIR* hopTarget;
ArmLIR* hopBranch;
- assert(art::Monitor::kLwShapeThin == 0);
+ DCHECK_EQ(LW_SHAPE_THIN, 0);
oatFlushAllRegs(cUnit);
loadValueDirectFixed(cUnit, rlSrc, r1); // Get obj
oatLockCallTemps(cUnit); // Prepare for explicit register usage
@@ -980,12 +978,10 @@
loadWordDisp(cUnit, r1, Object::MonitorOffset().Int32Value(), r2); // Get lock
loadWordDisp(cUnit, rSELF, Thread::IdOffset().Int32Value(), r3);
// Is lock unheld on lock or held by us (==threadId) on unlock?
- opRegRegImm(cUnit, kOpAnd, r12, r2, (art::Monitor::kLwHashStateMask <<
- art::Monitor::kLwHashStateShift));
+ opRegRegImm(cUnit, kOpAnd, r12, r2, (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
// Align owner
- opRegImm(cUnit, kOpLsl, r3, art::Monitor::kLwLockOwnerShift);
- newLIR3(cUnit, kThumb2Bfc, r2, art::Monitor::kLwHashStateShift,
- art::Monitor::kLwLockOwnerShift - 1);
+ opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT);
+ newLIR3(cUnit, kThumb2Bfc, r2, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
opRegReg(cUnit, kOpSub, r2, r3);
hopBranch = opCondBranch(cUnit, kArmCondNe);
oatGenMemBarrier(cUnit, kSY);
diff --git a/src/compiler_test.cc b/src/compiler_test.cc
index 5207561..712a3a0 100644
--- a/src/compiler_test.cc
+++ b/src/compiler_test.cc
@@ -358,9 +358,4 @@
AssertStaticIntMethod(20664, LoadDex("Invoke"), "Invoke", "test0", "(I)I", 912);
}
-TEST_F(CompilerTest, DISABLED_LARGE_SystemMethodsTest) {
- CompileAll(NULL); // This test calls a bunch of stuff from libcore.
- AssertStaticIntMethod(123, LoadDex("SystemMethods"), "SystemMethods", "test5", "()I");
-}
-
} // namespace art
diff --git a/src/image_test.cc b/src/image_test.cc
index 8f3f0b7..7ecdf3c 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -86,7 +86,7 @@
EXPECT_TRUE(klass != NULL) << descriptor;
EXPECT_LT(boot_base, reinterpret_cast<byte*>(klass)) << descriptor;
EXPECT_LT(reinterpret_cast<byte*>(klass), boot_limit) << descriptor;
- EXPECT_TRUE(klass->GetMonitor() == NULL); // address should have been removed from monitor
+ EXPECT_EQ(*klass->GetRawLockWordAddress(), 0); // address should have been removed from monitor
}
}
diff --git a/src/image_writer.h b/src/image_writer.h
index e91be01..c1480ff 100644
--- a/src/image_writer.h
+++ b/src/image_writer.h
@@ -31,7 +31,7 @@
// we use the lock word to store the offset of the object in the image
void AssignImageOffset(Object* object) {
DCHECK(object != NULL);
- DCHECK(object->GetMonitor() == NULL); // should be no lock
+ DCHECK(object->monitor_ == 0); // should be no lock
SetImageOffset(object, image_top_);
image_top_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
DCHECK_LT(image_top_, image_->GetLength());
@@ -39,25 +39,25 @@
static void SetImageOffset(Object* object, size_t offset) {
DCHECK(object != NULL);
// should be no lock (but it might be forward referenced interned string)
- DCHECK(object->GetMonitor() == NULL || object->IsString());
+ DCHECK(object->monitor_ == 0 || object->IsString());
DCHECK_NE(0U, offset);
- object->SetMonitor(reinterpret_cast<Monitor*>(offset));
+ object->monitor_ = offset;
}
static size_t IsImageOffsetAssigned(const Object* object) {
DCHECK(object != NULL);
- size_t offset = reinterpret_cast<size_t>(object->GetMonitor());
+ size_t offset = object->monitor_;
return offset != 0U;
}
static size_t GetImageOffset(const Object* object) {
DCHECK(object != NULL);
- size_t offset = reinterpret_cast<size_t>(object->GetMonitor());
+ size_t offset = object->monitor_;
DCHECK_NE(0U, offset);
return offset;
}
static void ResetImageOffset(Object* object) {
DCHECK(object != NULL);
- DCHECK(object->GetMonitor() != NULL); // should be an offset
- object->SetMonitor(reinterpret_cast<Monitor*>(0));
+ DCHECK(object->monitor_ != 0); // should be an offset
+ object->monitor_ = 0;
}
bool InSourceSpace(const Object* object) {
diff --git a/src/java_lang_Thread.cc b/src/java_lang_Thread.cc
index 286a211..d2f1349 100644
--- a/src/java_lang_Thread.cc
+++ b/src/java_lang_Thread.cc
@@ -53,31 +53,26 @@
jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject javaThread, jobject javaObject) {
ThreadListLock lock;
- //Thread* thread = Thread::FromManagedThread(env, javaThread);
- //Object* object = dvmDecodeIndirectRef(env, javaObject);
- //if (object == NULL) {
- //dvmThrowNullPointerException("object == null");
- //return JNI_FALSE;
- //}
- //Thread* thread = Thread::FromManagedThread(env, javaThread);
- //int result = dvmHoldsLock(thread, object);
- //return result;
- UNIMPLEMENTED(FATAL);
- return JNI_FALSE;
+ Object* object = Decode<Object*>(env, javaObject);
+ if (object == NULL) {
+ Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null");
+ return JNI_FALSE;
+ }
+ Thread* thread = Thread::FromManagedThread(env, javaThread);
+ return thread->HoldsLock(object);
}
void Thread_nativeInterrupt(JNIEnv* env, jobject javaThread) {
ThreadListLock lock;
- UNIMPLEMENTED(FATAL);
- //Thread* thread = Thread::FromManagedThread(env, javaThread);
- //if (thread != NULL) {
- //dvmThreadInterrupt(thread);
- //}
+ Thread* thread = Thread::FromManagedThread(env, javaThread);
+ if (thread != NULL) {
+ thread->Interrupt();
+ }
}
void Thread_nativeSetName(JNIEnv* env, jobject javaThread, jstring javaName) {
ThreadListLock lock;
- UNIMPLEMENTED(WARNING);
+ // TODO: needed for debugging (DDMS) support.
//Thread* thread = Thread::FromManagedThread(env, javaThread);
//StringObject* nameStr = (StringObject*) dvmDecodeIndirectRef(env, javaName);
//int threadId = (thread != NULL) ? thread->threadId : -1;
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index b9b1ab0..5b4bf21 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -464,9 +464,9 @@
handle_(handle),
class_loader_(class_loader),
jni_on_load_lock_("JNI_OnLoad lock"),
+ jni_on_load_cond_("JNI_OnLoad"),
jni_on_load_thread_id_(Thread::Current()->GetThinLockId()),
jni_on_load_result_(kPending) {
- CHECK_PTHREAD_CALL(pthread_cond_init, (&jni_on_load_cond_, NULL), "jni_on_load_cond_");
}
Object* GetClassLoader() {
@@ -498,7 +498,7 @@
<< "JNI_OnLoad...]";
}
ScopedThreadStateChange tsc(self, Thread::kVmWait);
- CHECK_PTHREAD_CALL(pthread_cond_wait, (&jni_on_load_cond_, jni_on_load_lock_.GetImpl()), "JNI_OnLoad");
+ jni_on_load_cond_.Wait(jni_on_load_lock_);
}
bool okay = (jni_on_load_result_ == kOkay);
@@ -515,7 +515,7 @@
// Broadcast a wakeup to anybody sleeping on the condition variable.
MutexLock mu(jni_on_load_lock_);
- CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&jni_on_load_cond_), "JNI_OnLoad");
+ jni_on_load_cond_.Broadcast();
}
void* FindSymbol(const std::string& symbol_name) {
@@ -541,7 +541,7 @@
// Guards remaining items.
Mutex jni_on_load_lock_;
// Wait for JNI_OnLoad in other thread.
- pthread_cond_t jni_on_load_cond_;
+ ConditionVariable jni_on_load_cond_;
// Recursive invocation guard.
uint32_t jni_on_load_thread_id_;
// Result of earlier JNI_OnLoad call.
@@ -2186,13 +2186,13 @@
static jint MonitorEnter(JNIEnv* env, jobject java_object) {
ScopedJniThreadState ts(env);
- Decode<Object*>(ts, java_object)->MonitorEnter();
+ Decode<Object*>(ts, java_object)->MonitorEnter(ts.Self());
return ts.Self()->IsExceptionPending() ? JNI_ERR : JNI_OK;
}
static jint MonitorExit(JNIEnv* env, jobject java_object) {
ScopedJniThreadState ts(env);
- Decode<Object*>(ts, java_object)->MonitorExit();
+ Decode<Object*>(ts, java_object)->MonitorExit(ts.Self());
return ts.Self()->IsExceptionPending() ? JNI_ERR : JNI_OK;
}
diff --git a/src/monitor.h b/src/monitor.h
deleted file mode 100644
index b82c463..0000000
--- a/src/monitor.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-
-#ifndef ART_SRC_MONITOR_H_
-#define ART_SRC_MONITOR_H_
-
-#include "logging.h"
-#include "macros.h"
-
-namespace art {
-
-class Monitor {
- public:
-
- // Lock constants used by compiler
- static const uint32_t kLwLockOwnerShift = 3;
- static const uint32_t kLwHashStateShift = 1;
- static const uint32_t kLwHashStateMask = 0x3;
- static const uint32_t kLwShapeThin = 0;
-
- void Enter() {
- }
-
- void Exit() {
- }
-
- void Notify() {
- }
-
- void NotifyAll() {
- }
-
- void Wait() {
- }
-
- void Wait(int64_t timeout) {
- Wait(timeout, 0);
- }
-
- void Wait(int64_t timeout, int32_t nanos) {
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Monitor);
-
-};
-
-class MonitorLock {
- public:
-
- MonitorLock(Monitor* monitor) : monitor_(monitor) {
- CHECK(monitor != NULL);
- monitor_->Enter();
- }
-
- ~MonitorLock() {
- monitor_->Exit();
- }
-
- void Wait(int64_t millis = 0) {
- monitor_->Wait(millis);
- }
-
- void Notify() {
- monitor_->Notify();
- }
-
- void NotifyAll() {
- monitor_->NotifyAll();
- }
-
- private:
- Monitor* const monitor_;
- DISALLOW_COPY_AND_ASSIGN(MonitorLock);
-};
-
-} // namespace art
-
-#endif // ART_SRC_MONITOR_H_
diff --git a/src/mutex.cc b/src/mutex.cc
index fcd16ee..27bb627 100644
--- a/src/mutex.cc
+++ b/src/mutex.cc
@@ -79,4 +79,37 @@
return art::GetTid();
}
+ConditionVariable::ConditionVariable(const std::string& name) : name_(name) {
+ CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, NULL));
+}
+
+ConditionVariable::~ConditionVariable() {
+ CHECK_MUTEX_CALL(pthread_cond_destroy, (&cond_));
+}
+
+void ConditionVariable::Broadcast() {
+ CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
+}
+
+void ConditionVariable::Signal() {
+ CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
+}
+
+void ConditionVariable::Wait(Mutex& mutex) {
+ CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, mutex.GetImpl()));
+}
+
+void ConditionVariable::TimedWait(Mutex& mutex, const timespec& ts) {
+#ifdef HAVE_TIMEDWAIT_MONOTONIC
+#define TIMEDWAIT pthread_cond_timedwait_monotonic
+#else
+#define TIMEDWAIT pthread_cond_timedwait
+#endif
+ int rc = TIMEDWAIT(&cond_, mutex.GetImpl(), &ts);
+ if (rc != 0 && rc != ETIMEDOUT) {
+ errno = rc;
+ PLOG(FATAL) << "TimedWait failed for " << name_;
+ }
+}
+
} // namespace
diff --git a/src/mutex.h b/src/mutex.h
index cbfd5a0..43194e9 100644
--- a/src/mutex.h
+++ b/src/mutex.h
@@ -82,6 +82,22 @@
DISALLOW_COPY_AND_ASSIGN(MutexLock);
};
+class ConditionVariable {
+ public:
+ ConditionVariable(const std::string& name);
+ ~ConditionVariable();
+
+ void Broadcast();
+ void Signal();
+ void Wait(Mutex& mutex);
+ void TimedWait(Mutex& mutex, const timespec& ts);
+
+ private:
+ pthread_cond_t cond_;
+ std::string name_;
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
} // namespace art
#endif // ART_SRC_MUTEX_H_
diff --git a/src/object.cc b/src/object.cc
index b5e66d9..1e0d5c6 100644
--- a/src/object.cc
+++ b/src/object.cc
@@ -17,6 +17,7 @@
#include "dex_cache.h"
#include "dex_file.h"
#include "runtime.h"
+#include "sync.h"
namespace art {
@@ -25,6 +26,30 @@
return GetClass() == GetClass()->GetDescriptor()->GetClass();
}
+uint32_t Object::GetLockOwner() {
+ return Monitor::GetLockOwner(monitor_);
+}
+
+void Object::MonitorEnter(Thread* thread) {
+ Monitor::MonitorEnter(thread, this);
+}
+
+void Object::MonitorExit(Thread* thread) {
+ Monitor::MonitorExit(thread, this);
+}
+
+void Object::Notify() {
+ Monitor::Notify(Thread::Current(), this);
+}
+
+void Object::NotifyAll() {
+ Monitor::NotifyAll(Thread::Current(), this);
+}
+
+void Object::Wait(int64_t ms, int32_t ns) {
+ Monitor::Wait(Thread::Current(), this, ms, ns, true);
+}
+
// TODO: get global references for these
Class* Field::java_lang_reflect_Field_ = NULL;
diff --git a/src/object.h b/src/object.h
index 5ecd995..50da121 100644
--- a/src/object.h
+++ b/src/object.h
@@ -27,8 +27,6 @@
#include "heap.h"
#include "logging.h"
#include "macros.h"
-#include "monitor.h"
-#include "monitor.h"
#include "offsets.h"
#include "runtime.h"
#include "stringpiece.h"
@@ -204,8 +202,7 @@
}
Class* GetClass() const {
- return
- GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
+ return GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
}
void SetClass(Class* new_klass);
@@ -223,45 +220,25 @@
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
}
- Monitor* GetMonitor() const {
- return GetFieldPtr<Monitor*>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), false);
+ volatile int32_t* GetRawLockWordAddress() {
+ byte* raw_addr = reinterpret_cast<byte*>(this) + OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value();
+ int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
+ return const_cast<volatile int32_t*>(word_addr);
}
- void SetMonitor(Monitor* monitor) {
- // TODO: threading - compare-and-set
- SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), monitor, false);
- }
+ uint32_t GetLockOwner();
- void MonitorEnter(Thread* thread = NULL) {
- // TODO: use thread to get lock id
- GetMonitor()->Enter();
- }
+ void MonitorEnter(Thread* thread);
- void MonitorExit(Thread* thread = NULL) {
- // TODO: use thread to get lock id
- GetMonitor()->Exit();
- }
+ void MonitorExit(Thread* thread);
- void Notify() {
- GetMonitor()->Notify();
- }
+ void Notify();
- void NotifyAll() {
- GetMonitor()->NotifyAll();
- }
+ void NotifyAll();
- void Wait() {
- GetMonitor()->Wait();
- }
+ void Wait(int64_t timeout);
- void Wait(int64_t timeout) {
- GetMonitor()->Wait(timeout);
- }
-
- void Wait(int64_t timeout, int32_t nanos) {
- GetMonitor()->Wait(timeout, nanos);
- }
+ void Wait(int64_t timeout, int32_t nanos);
bool IsClass() const;
@@ -433,40 +410,13 @@
private:
Class* klass_;
- Monitor* monitor_;
+ uint32_t monitor_;
- friend struct ObjectOffsets; // for verifying offset information
+ friend class ImageWriter; // for abusing monitor_ directly
+ friend class ObjectOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
-class ObjectLock {
- public:
- explicit ObjectLock(Object* object) : obj_(object) {
- CHECK(object != NULL);
- obj_->MonitorEnter();
- }
-
- ~ObjectLock() {
- obj_->MonitorExit();
- }
-
- void Wait(int64_t millis = 0) {
- return obj_->Wait(millis);
- }
-
- void Notify() {
- obj_->Notify();
- }
-
- void NotifyAll() {
- obj_->NotifyAll();
- }
-
- private:
- Object* obj_;
- DISALLOW_COPY_AND_ASSIGN(ObjectLock);
-};
-
// C++ mirror of java.lang.reflect.AccessibleObject
class MANAGED AccessibleObject : public Object {
private:
@@ -2047,7 +1997,7 @@
// Total class size; used when allocating storage on gc heap.
size_t class_size_;
- // threadId, used to check for recursive <clinit> invocation
+ // tid used to check for recursive <clinit> invocation
pid_t clinit_thread_id_;
// number of instance fields that are object refs
diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc
index d72002f..ecd48cd 100644
--- a/src/signal_catcher.cc
+++ b/src/signal_catcher.cc
@@ -30,18 +30,17 @@
namespace art {
-SignalCatcher::SignalCatcher() : lock_("SignalCatcher lock"), thread_(NULL) {
+SignalCatcher::SignalCatcher()
+ : lock_("SignalCatcher lock"), cond_("SignalCatcher::cond_"), thread_(NULL) {
SetHaltFlag(false);
// Create a raw pthread; its start routine will attach to the runtime.
CHECK_PTHREAD_CALL(pthread_create, (&pthread_, NULL, &Run, this), "signal catcher thread");
- CHECK_PTHREAD_CALL(pthread_cond_init, (&cond_, NULL), "SignalCatcher::cond_");
MutexLock mu(lock_);
while (thread_ == NULL) {
- CHECK_PTHREAD_CALL(pthread_cond_wait, (&cond_, lock_.GetImpl()), __FUNCTION__);
+ cond_.Wait(lock_);
}
- CHECK_PTHREAD_CALL(pthread_cond_destroy, (&cond_), "SignalCatcher::cond_");
}
SignalCatcher::~SignalCatcher() {
@@ -123,7 +122,7 @@
{
MutexLock mu(signal_catcher->lock_);
signal_catcher->thread_ = Thread::Current();
- CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&signal_catcher->cond_), __FUNCTION__);
+ signal_catcher->cond_.Broadcast();
}
// Set up mask with signals we want to handle.
diff --git a/src/signal_catcher.h b/src/signal_catcher.h
index 16cf9e4..123b38f 100644
--- a/src/signal_catcher.h
+++ b/src/signal_catcher.h
@@ -45,7 +45,7 @@
mutable Mutex lock_;
bool halt_;
- pthread_cond_t cond_;
+ ConditionVariable cond_;
pthread_t pthread_;
Thread* thread_;
};
diff --git a/src/sync.cc b/src/sync.cc
new file mode 100644
index 0000000..b07ec35
--- /dev/null
+++ b/src/sync.cc
@@ -0,0 +1,872 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sync.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "mutex.h"
+#include "object.h"
+#include "thread.h"
+
+namespace art {
+
+/*
+ * Every Object has a monitor associated with it, but not every Object is
+ * actually locked. Even the ones that are locked do not need a
+ * full-fledged monitor until a) there is actual contention or b) wait()
+ * is called on the Object.
+ *
+ * For Android, we have implemented a scheme similar to the one described
+ * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
+ * (ACM 1998). Things are even easier for us, though, because we have
+ * a full 32 bits to work with.
+ *
+ * The two states of an Object's lock are referred to as "thin" and
+ * "fat". A lock may transition from the "thin" state to the "fat"
+ * state and this transition is referred to as inflation. Once a lock
+ * has been inflated it remains in the "fat" state indefinitely.
+ *
+ * The lock value itself is stored in Object.lock. The LSB of the
+ * lock encodes its state. When cleared, the lock is in the "thin"
+ * state and its bits are formatted as follows:
+ *
+ * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
+ * lock count thread id hash state 0
+ *
+ * When set, the lock is in the "fat" state and its bits are formatted
+ * as follows:
+ *
+ * [31 ---- 3] [2 ---- 1] [0]
+ * pointer hash state 1
+ *
+ * For an in-depth description of the mechanics of thin-vs-fat locking,
+ * read the paper referred to above.
+ */
+
+/*
+ * Monitors provide:
+ * - mutually exclusive access to resources
+ * - a way for multiple threads to wait for notification
+ *
+ * In effect, they fill the role of both mutexes and condition variables.
+ *
+ * Only one thread can own the monitor at any time. There may be several
+ * threads waiting on it (the wait call unlocks it). One or more waiting
+ * threads may be getting interrupted or notified at any given time.
+ *
+ * TODO: the various members of monitor are not SMP-safe.
+ */
+Monitor::Monitor(Object* obj)
+ : owner_(NULL),
+ lock_count_(0),
+ obj_(obj),
+ wait_set_(NULL),
+ lock_("a monitor lock"),
+ next_(NULL),
+ owner_filename_(NULL),
+ owner_line_number_(0) {
+}
+
+Monitor::~Monitor() {
+ DCHECK(obj_ != NULL);
+ DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
+
+#ifndef NDEBUG
+ /* This lock is associated with an object
+ * that's being swept. The only possible way
+ * anyone could be holding this lock would be
+ * if some JNI code locked but didn't unlock
+ * the object, in which case we've got some bad
+ * native code somewhere.
+ */
+ DCHECK(lock_.TryLock());
+ lock_.Unlock();
+#endif
+}
+
+/*
+ * Links a thread into a monitor's wait set. The monitor lock must be
+ * held by the caller of this routine.
+ */
+void Monitor::AppendToWaitSet(Thread* thread) {
+ DCHECK(owner_ == Thread::Current());
+ DCHECK(thread != NULL);
+ DCHECK(thread->wait_next_ == NULL);
+ if (wait_set_ == NULL) {
+ wait_set_ = thread;
+ return;
+ }
+
+ // push_back.
+ Thread* t = wait_set_;
+ while (t->wait_next_ != NULL) {
+ t = t->wait_next_;
+ }
+ t->wait_next_ = thread;
+}
+
+/*
+ * Unlinks a thread from a monitor's wait set. The monitor lock must
+ * be held by the caller of this routine.
+ */
+void Monitor::RemoveFromWaitSet(Thread *thread) {
+ DCHECK(owner_ == Thread::Current());
+ DCHECK(thread != NULL);
+ if (wait_set_ == NULL) {
+ return;
+ }
+ if (wait_set_ == thread) {
+ wait_set_ = thread->wait_next_;
+ thread->wait_next_ = NULL;
+ return;
+ }
+
+ Thread* t = wait_set_;
+ while (t->wait_next_ != NULL) {
+ if (t->wait_next_ == thread) {
+ t->wait_next_ = thread->wait_next_;
+ thread->wait_next_ = NULL;
+ return;
+ }
+ t = t->wait_next_;
+ }
+}
+
+// Global list of all monitors. Used for cleanup.
+static Monitor* gMonitorList = NULL;
+
+void Monitor::FreeMonitorList() {
+ Monitor* m = gMonitorList;
+ while (m != NULL) {
+ Monitor* next = m->next_;
+ delete m;
+ m = next;
+ }
+}
+
+/*
+ * Frees monitor objects belonging to unmarked objects.
+ */
+static void SweepMonitorList(Monitor** mon, bool (isUnmarkedObject)(void*)) {
+ UNIMPLEMENTED(FATAL);
+#if 0
+ Monitor handle;
+ Monitor *curr;
+
+ assert(mon != NULL);
+ assert(isUnmarkedObject != NULL);
+ Monitor* prev = &handle;
+ prev->next = curr = *mon;
+ while (curr != NULL) {
+ Object* obj = curr->obj;
+ if ((*isUnmarkedObject)(obj) != 0) {
+ prev->next = curr->next;
+ delete curr;
+ curr = prev->next;
+ } else {
+ prev = curr;
+ curr = curr->next;
+ }
+ }
+ *mon = handle.next;
+#endif
+}
+
+void Monitor::SweepMonitorList(bool (isUnmarkedObject)(void*)) {
+ ::art::SweepMonitorList(&gMonitorList, isUnmarkedObject);
+}
+
+/*
+static char *logWriteInt(char *dst, int value) {
+ *dst++ = EVENT_TYPE_INT;
+ set4LE((uint8_t *)dst, value);
+ return dst + 4;
+}
+
+static char *logWriteString(char *dst, const char *value, size_t len) {
+ *dst++ = EVENT_TYPE_STRING;
+ len = len < 32 ? len : 32;
+ set4LE((uint8_t *)dst, len);
+ dst += 4;
+ memcpy(dst, value, len);
+ return dst + len;
+}
+
+#define EVENT_LOG_TAG_dvm_lock_sample 20003
+
+static void logContentionEvent(Thread *self, uint32_t waitMs, uint32_t samplePercent,
+ const char *ownerFileName, uint32_t ownerLineNumber)
+{
+ const StackSaveArea *saveArea;
+ const Method *meth;
+ uint32_t relativePc;
+ char eventBuffer[174];
+ const char *fileName;
+ char procName[33];
+ char *cp;
+ size_t len;
+ int fd;
+
+ saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
+ meth = saveArea->method;
+ cp = eventBuffer;
+
+ // Emit the event list length, 1 byte.
+ *cp++ = 9;
+
+ // Emit the process name, <= 37 bytes.
+ fd = open("/proc/self/cmdline", O_RDONLY);
+ memset(procName, 0, sizeof(procName));
+ read(fd, procName, sizeof(procName) - 1);
+ close(fd);
+ len = strlen(procName);
+ cp = logWriteString(cp, procName, len);
+
+ // Emit the sensitive thread ("main thread") status, 5 bytes.
+ bool isSensitive = false;
+ if (gDvm.isSensitiveThreadHook != NULL) {
+ isSensitive = gDvm.isSensitiveThreadHook();
+ }
+ cp = logWriteInt(cp, isSensitive);
+
+ // Emit self thread name string, <= 37 bytes.
+ std::string selfName = dvmGetThreadName(self);
+ cp = logWriteString(cp, selfName.c_str(), selfName.size());
+
+ // Emit the wait time, 5 bytes.
+ cp = logWriteInt(cp, waitMs);
+
+ // Emit the source code file name, <= 37 bytes.
+ fileName = dvmGetMethodSourceFile(meth);
+ if (fileName == NULL) fileName = "";
+ cp = logWriteString(cp, fileName, strlen(fileName));
+
+ // Emit the source code line number, 5 bytes.
+ relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
+ cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
+
+ // Emit the lock owner source code file name, <= 37 bytes.
+ if (ownerFileName == NULL) {
+ ownerFileName = "";
+ } else if (strcmp(fileName, ownerFileName) == 0) {
+ // Common case, so save on log space.
+ ownerFileName = "-";
+ }
+ cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
+
+ // Emit the source code line number, 5 bytes.
+ cp = logWriteInt(cp, ownerLineNumber);
+
+ // Emit the sample percentage, 5 bytes.
+ cp = logWriteInt(cp, samplePercent);
+
+ assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
+ android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
+ EVENT_TYPE_LIST,
+ eventBuffer,
+ (size_t)(cp - eventBuffer));
+}
+*/
+
+void Monitor::Lock(Thread* self) {
+// uint32_t waitThreshold, samplePercent;
+// uint64_t waitStart, waitEnd, waitMs;
+
+ if (owner_ == self) {
+ lock_count_++;
+ return;
+ }
+ if (!lock_.TryLock()) {
+ {
+ ScopedThreadStateChange tsc(self, Thread::kBlocked);
+// waitThreshold = gDvm.lockProfThreshold;
+// if (waitThreshold) {
+// waitStart = dvmGetRelativeTimeUsec();
+// }
+// const char* currentOwnerFileName = mon->ownerFileName;
+// uint32_t currentOwnerLineNumber = mon->ownerLineNumber;
+
+ lock_.Lock();
+// if (waitThreshold) {
+// waitEnd = dvmGetRelativeTimeUsec();
+// }
+ }
+// if (waitThreshold) {
+// waitMs = (waitEnd - waitStart) / 1000;
+// if (waitMs >= waitThreshold) {
+// samplePercent = 100;
+// } else {
+// samplePercent = 100 * waitMs / waitThreshold;
+// }
+// if (samplePercent != 0 && ((uint32_t)rand() % 100 < samplePercent)) {
+// logContentionEvent(self, waitMs, samplePercent, currentOwnerFileName, currentOwnerLineNumber);
+// }
+// }
+ }
+ owner_ = self;
+ DCHECK_EQ(lock_count_, 0);
+
+ // When debugging, save the current monitor holder for future
+ // acquisition failures to use in sampled logging.
+// if (gDvm.lockProfThreshold > 0) {
+// const StackSaveArea *saveArea;
+// const Method *meth;
+// mon->ownerLineNumber = 0;
+// if (self->interpSave.curFrame == NULL) {
+// mon->ownerFileName = "no_frame";
+// } else if ((saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
+// mon->ownerFileName = "no_save_area";
+// } else if ((meth = saveArea->method) == NULL) {
+// mon->ownerFileName = "no_method";
+// } else {
+// uint32_t relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
+// mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
+// if (mon->ownerFileName == NULL) {
+// mon->ownerFileName = "no_method_file";
+// } else {
+// mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
+// }
+// }
+// }
+}
+
+void ThrowIllegalMonitorStateException(const char* msg) {
+ Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", "%s", msg);
+}
+
+bool Monitor::Unlock(Thread* self) {
+ DCHECK(self != NULL);
+ if (owner_ == self) {
+ // We own the monitor, so nobody else can be in here.
+ if (lock_count_ == 0) {
+ owner_ = NULL;
+ owner_filename_ = "unlocked";
+ owner_line_number_ = 0;
+ lock_.Unlock();
+ } else {
+ --lock_count_;
+ }
+ } else {
+ // We don't own this, so we're not allowed to unlock it.
+ // The JNI spec says that we should throw IllegalMonitorStateException
+ // in this case.
+ ThrowIllegalMonitorStateException("unlock of unowned monitor");
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Converts the given relative waiting time into an absolute time.
+ */
+void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
+ int64_t endSec;
+
+#ifdef HAVE_TIMEDWAIT_MONOTONIC
+ clock_gettime(CLOCK_MONOTONIC, ts);
+#else
+ {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ ts->tv_sec = tv.tv_sec;
+ ts->tv_nsec = tv.tv_usec * 1000;
+ }
+#endif
+ endSec = ts->tv_sec + ms / 1000;
+ if (endSec >= 0x7fffffff) {
+ LOG(INFO) << "Note: end time exceeds epoch";
+ endSec = 0x7ffffffe;
+ }
+ ts->tv_sec = endSec;
+ ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
+
+ // Catch rollover.
+ if (ts->tv_nsec >= 1000000000L) {
+ ts->tv_sec++;
+ ts->tv_nsec -= 1000000000L;
+ }
+}
+
+int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
+ struct timespec ts;
+ ToAbsoluteTime(ms, ns, &ts);
+#if defined(HAVE_TIMEDWAIT_MONOTONIC)
+ int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
+#else
+ int rc = pthread_cond_timedwait(cond, mutex, &ts);
+#endif
+ DCHECK(rc == 0 || rc == ETIMEDOUT);
+ return rc;
+}
+
+/*
+ * Wait on a monitor until timeout, interrupt, or notification. Used for
+ * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
+ *
+ * If another thread calls Thread.interrupt(), we throw InterruptedException
+ * and return immediately if one of the following are true:
+ * - blocked in wait(), wait(long), or wait(long, int) methods of Object
+ * - blocked in join(), join(long), or join(long, int) methods of Thread
+ * - blocked in sleep(long), or sleep(long, int) methods of Thread
+ * Otherwise, we set the "interrupted" flag.
+ *
+ * Checks to make sure that "ns" is in the range 0-999999
+ * (i.e. fractions of a millisecond) and throws the appropriate
+ * exception if it isn't.
+ *
+ * The spec allows "spurious wakeups", and recommends that all code using
+ * Object.wait() do so in a loop. This appears to derive from concerns
+ * about pthread_cond_wait() on multiprocessor systems. Some commentary
+ * on the web casts doubt on whether these can/should occur.
+ *
+ * Since we're allowed to wake up "early", we clamp extremely long durations
+ * to return at the end of the 32-bit time epoch.
+ */
+void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
+ DCHECK(self != NULL);
+
+ // Make sure that we hold the lock.
+ if (owner_ != self) {
+ ThrowIllegalMonitorStateException("object not locked by thread before wait()");
+ return;
+ }
+
+ // Enforce the timeout range.
+ if (ms < 0 || ns < 0 || ns > 999999) {
+ Thread::Current()->ThrowNewException("Ljava/lang/IllegalArgumentException;",
+ "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
+ return;
+ }
+
+ // Compute absolute wakeup time, if necessary.
+ struct timespec ts;
+ bool timed = false;
+ if (ms != 0 || ns != 0) {
+ ToAbsoluteTime(ms, ns, &ts);
+ timed = true;
+ }
+
+ /*
+ * Add ourselves to the set of threads waiting on this monitor, and
+ * release our hold. We need to let it go even if we're a few levels
+ * deep in a recursive lock, and we need to restore that later.
+ *
+ * We append to the wait set ahead of clearing the count and owner
+ * fields so the subroutine can check that the calling thread owns
+ * the monitor. Aside from that, the order of member updates is
+ * not order sensitive as we hold the pthread mutex.
+ */
+ AppendToWaitSet(self);
+ int prevLockCount = lock_count_;
+ lock_count_ = 0;
+ owner_ = NULL;
+ const char* savedFileName = owner_filename_;
+ owner_filename_ = NULL;
+ uint32_t savedLineNumber = owner_line_number_;
+ owner_line_number_ = 0;
+
+ /*
+ * Update thread status. If the GC wakes up, it'll ignore us, knowing
+ * that we won't touch any references in this state, and we'll check
+ * our suspend mode before we transition out.
+ */
+ if (timed) {
+ self->SetState(Thread::kTimedWaiting);
+ } else {
+ self->SetState(Thread::kWaiting);
+ }
+
+ self->wait_mutex_.Lock();
+
+ /*
+ * Set wait_monitor_ to the monitor object we will be waiting on.
+ * When wait_monitor_ is non-NULL a notifying or interrupting thread
+ * must signal the thread's wait_cond_ to wake it up.
+ */
+ DCHECK(self->wait_monitor_ == NULL);
+ self->wait_monitor_ = this;
+
+ /*
+ * Handle the case where the thread was interrupted before we called
+ * wait().
+ */
+ bool wasInterrupted = false;
+ if (self->interrupted_) {
+ wasInterrupted = true;
+ self->wait_monitor_ = NULL;
+ self->wait_mutex_.Unlock();
+ goto done;
+ }
+
+ /*
+ * Release the monitor lock and wait for a notification or
+ * a timeout to occur.
+ */
+ lock_.Unlock();
+
+ if (!timed) {
+ self->wait_cond_.Wait(self->wait_mutex_);
+ } else {
+ self->wait_cond_.TimedWait(self->wait_mutex_, ts);
+ }
+ if (self->interrupted_) {
+ wasInterrupted = true;
+ }
+
+ self->interrupted_ = false;
+ self->wait_monitor_ = NULL;
+ self->wait_mutex_.Unlock();
+
+ // Reacquire the monitor lock.
+ Lock(self);
+
+done:
+ /*
+ * We remove our thread from wait set after restoring the count
+ * and owner fields so the subroutine can check that the calling
+ * thread owns the monitor. Aside from that, the order of member
+ * updates is not order sensitive as we hold the pthread mutex.
+ */
+ owner_ = self;
+ lock_count_ = prevLockCount;
+ owner_filename_ = savedFileName;
+ owner_line_number_ = savedLineNumber;
+ RemoveFromWaitSet(self);
+
+ /* set self->status back to Thread::kRunnable, and self-suspend if needed */
+ self->SetState(Thread::kRunnable);
+
+ if (wasInterrupted) {
+ /*
+ * We were interrupted while waiting, or somebody interrupted an
+ * un-interruptible thread earlier and we're bailing out immediately.
+ *
+ * The doc sayeth: "The interrupted status of the current thread is
+ * cleared when this exception is thrown."
+ */
+ self->interrupted_ = false;
+ if (interruptShouldThrow) {
+ Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", "%s", "");
+ }
+ }
+}
+
+void Monitor::Notify(Thread* self) {
+ DCHECK(self != NULL);
+
+ // Make sure that we hold the lock.
+ if (owner_ != self) {
+ ThrowIllegalMonitorStateException("object not locked by thread before notify()");
+ return;
+ }
+ // Signal the first waiting thread in the wait set.
+ while (wait_set_ != NULL) {
+ Thread* thread = wait_set_;
+ wait_set_ = thread->wait_next_;
+ thread->wait_next_ = NULL;
+
+ // Check to see if the thread is still waiting.
+ MutexLock mu(thread->wait_mutex_);
+ if (thread->wait_monitor_ != NULL) {
+ thread->wait_cond_.Signal();
+ return;
+ }
+ }
+}
+
+void Monitor::NotifyAll(Thread* self) {
+ DCHECK(self != NULL);
+
+ // Make sure that we hold the lock.
+ if (owner_ != self) {
+ ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
+ return;
+ }
+ // Signal all threads in the wait set.
+ while (wait_set_ != NULL) {
+ Thread* thread = wait_set_;
+ wait_set_ = thread->wait_next_;
+ thread->wait_next_ = NULL;
+ thread->Notify();
+ }
+}
+
+/*
+ * Changes the shape of a monitor from thin to fat, preserving the
+ * internal lock state. The calling thread must own the lock.
+ */
+void Monitor::Inflate(Thread* self, Object* obj) {
+ DCHECK(self != NULL);
+ DCHECK(obj != NULL);
+ DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
+ DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->thin_lock_id_));
+
+ // Allocate and acquire a new monitor.
+ Monitor* m = new Monitor(obj);
+ // Replace the head of the list with the new monitor.
+ do {
+ m->next_ = gMonitorList;
+ } while (android_atomic_release_cas((int32_t)m->next_, (int32_t)m, (int32_t*)(void*)&gMonitorList) != 0);
+ m->Lock(self);
+ // Propagate the lock state.
+ uint32_t thin = *obj->GetRawLockWordAddress();
+ m->lock_count_ = LW_LOCK_COUNT(thin);
+ thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
+ thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
+ // Publish the updated lock word.
+ android_atomic_release_store(thin, obj->GetRawLockWordAddress());
+}
+
+void Monitor::MonitorEnter(Thread* self, Object* obj) {
+ volatile int32_t* thinp = obj->GetRawLockWordAddress();
+ struct timespec tm;
+ long sleepDelayNs;
+ long minSleepDelayNs = 1000000; /* 1 millisecond */
+ long maxSleepDelayNs = 1000000000; /* 1 second */
+ uint32_t thin, newThin, threadId;
+
+ assert(self != NULL);
+ assert(obj != NULL);
+ threadId = self->thin_lock_id_;
+retry:
+ thin = *thinp;
+ if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+ /*
+ * The lock is a thin lock. The owner field is used to
+ * determine the acquire method, ordered by cost.
+ */
+ if (LW_LOCK_OWNER(thin) == threadId) {
+ /*
+ * The calling thread owns the lock. Increment the
+ * value of the recursion count field.
+ */
+ *thinp += 1 << LW_LOCK_COUNT_SHIFT;
+ if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
+ /*
+ * The reacquisition limit has been reached. Inflate
+ * the lock so the next acquire will not overflow the
+ * recursion count field.
+ */
+ Inflate(self, obj);
+ }
+ } else if (LW_LOCK_OWNER(thin) == 0) {
+ /*
+ * The lock is unowned. Install the thread id of the
+ * calling thread into the owner field. This is the
+ * common case. In performance critical code the JIT
+ * will have tried this before calling out to the VM.
+ */
+ newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
+ if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
+ // The acquire failed. Try again.
+ goto retry;
+ }
+ } else {
+ LOG(INFO) << StringPrintf("(%d) spin on lock %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
+ // The lock is owned by another thread. Notify the VM that we are about to wait.
+ Thread::State oldStatus = self->SetState(Thread::kBlocked);
+ // Spin until the thin lock is released or inflated.
+ sleepDelayNs = 0;
+ for (;;) {
+ thin = *thinp;
+ // Check the shape of the lock word. Another thread
+ // may have inflated the lock while we were waiting.
+ if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+ if (LW_LOCK_OWNER(thin) == 0) {
+ // The lock has been released. Install the thread id of the
+ // calling thread into the owner field.
+ newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
+ if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
+ // The acquire succeed. Break out of the loop and proceed to inflate the lock.
+ break;
+ }
+ } else {
+ // The lock has not been released. Yield so the owning thread can run.
+ if (sleepDelayNs == 0) {
+ sched_yield();
+ sleepDelayNs = minSleepDelayNs;
+ } else {
+ tm.tv_sec = 0;
+ tm.tv_nsec = sleepDelayNs;
+ nanosleep(&tm, NULL);
+ // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
+ if (sleepDelayNs < maxSleepDelayNs / 2) {
+ sleepDelayNs *= 2;
+ } else {
+ sleepDelayNs = minSleepDelayNs;
+ }
+ }
+ }
+ } else {
+ // The thin lock was inflated by another thread. Let the VM know we are no longer
+ // waiting and try again.
+ LOG(INFO) << "(" << threadId << ") lock " << (void*) thinp << " surprise-fattened";
+ self->SetState(oldStatus);
+ goto retry;
+ }
+ }
+ LOG(INFO) << StringPrintf("(%d) spin on lock done %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
+ // We have acquired the thin lock. Let the VM know that we are no longer waiting.
+ self->SetState(oldStatus);
+ // Fatten the lock.
+ Inflate(self, obj);
+ LOG(INFO) << StringPrintf("(%d) lock %p fattened", threadId, thinp);
+ }
+ } else {
+ // The lock is a fat lock.
+ DCHECK(LW_MONITOR(*thinp) != NULL);
+ LW_MONITOR(*thinp)->Lock(self);
+ }
+}
+
+bool Monitor::MonitorExit(Thread* self, Object* obj) {
+ volatile int32_t* thinp = obj->GetRawLockWordAddress();
+
+ DCHECK(self != NULL);
+ DCHECK_EQ(self->GetState(), Thread::kRunnable);
+ DCHECK(obj != NULL);
+
+ /*
+ * Cache the lock word as its value can change while we are
+ * examining its state.
+ */
+ uint32_t thin = *thinp;
+ if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+ /*
+ * The lock is thin. We must ensure that the lock is owned
+ * by the given thread before unlocking it.
+ */
+ if (LW_LOCK_OWNER(thin) == self->thin_lock_id_) {
+ /*
+ * We are the lock owner. It is safe to update the lock
+ * without CAS as lock ownership guards the lock itself.
+ */
+ if (LW_LOCK_COUNT(thin) == 0) {
+ /*
+ * The lock was not recursively acquired, the common
+ * case. Unlock by clearing all bits except for the
+ * hash state.
+ */
+ thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
+ android_atomic_release_store(thin, thinp);
+ } else {
+ /*
+ * The object was recursively acquired. Decrement the
+ * lock recursion count field.
+ */
+ *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
+ }
+ } else {
+ /*
+ * We do not own the lock. The JVM spec requires that we
+ * throw an exception in this case.
+ */
+ ThrowIllegalMonitorStateException("unlock of unowned monitor");
+ return false;
+ }
+ } else {
+ /*
+ * The lock is fat. We must check to see if Unlock has
+ * raised any exceptions before continuing.
+ */
+ DCHECK(LW_MONITOR(*thinp) != NULL);
+ if (!LW_MONITOR(*thinp)->Unlock(self)) {
+ // An exception has been raised. Do not fall through.
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Object.wait(). Also called for class init.
+ */
+void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
+ volatile int32_t* thinp = obj->GetRawLockWordAddress();
+
+ // If the lock is still thin, we need to fatten it.
+ uint32_t thin = *thinp;
+ if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+ // Make sure that 'self' holds the lock.
+ if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
+ ThrowIllegalMonitorStateException("object not locked by thread before wait()");
+ return;
+ }
+
+ /* This thread holds the lock. We need to fatten the lock
+ * so 'self' can block on it. Don't update the object lock
+ * field yet, because 'self' needs to acquire the lock before
+ * any other thread gets a chance.
+ */
+ Inflate(self, obj);
+ LOG(INFO) << StringPrintf("(%d) lock %p fattened by wait()", self->thin_lock_id_, thinp);
+ }
+ LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
+}
+
+void Monitor::Notify(Thread* self, Object *obj) {
+ uint32_t thin = *obj->GetRawLockWordAddress();
+
+ // If the lock is still thin, there aren't any waiters;
+ // waiting on an object forces lock fattening.
+ if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+ // Make sure that 'self' holds the lock.
+ if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
+ ThrowIllegalMonitorStateException("object not locked by thread before notify()");
+ return;
+ }
+ // no-op; there are no waiters to notify.
+ } else {
+ // It's a fat lock.
+ LW_MONITOR(thin)->Notify(self);
+ }
+}
+
+void Monitor::NotifyAll(Thread* self, Object *obj) {
+ uint32_t thin = *obj->GetRawLockWordAddress();
+
+ // If the lock is still thin, there aren't any waiters;
+ // waiting on an object forces lock fattening.
+ if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+ // Make sure that 'self' holds the lock.
+ if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
+ ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
+ return;
+ }
+ // no-op; there are no waiters to notify.
+ } else {
+ // It's a fat lock.
+ LW_MONITOR(thin)->NotifyAll(self);
+ }
+}
+
+uint32_t Monitor::GetLockOwner(uint32_t raw_lock_word) {
+ if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
+ return LW_LOCK_OWNER(raw_lock_word);
+ } else {
+ Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
+ return owner ? owner->GetThinLockId() : 0;
+ }
+}
+
+} // namespace art
diff --git a/src/sync.h b/src/sync.h
new file mode 100644
index 0000000..f19237e
--- /dev/null
+++ b/src/sync.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_SYNC_H_
+#define ART_SRC_SYNC_H_
+
+#include <pthread.h>
+#include <stdint.h>
+
+#include "mutex.h"
+
+namespace art {
+
+/*
+ * Monitor shape field. Used to distinguish thin locks from fat locks.
+ */
+#define LW_SHAPE_THIN 0
+#define LW_SHAPE_FAT 1
+#define LW_SHAPE_MASK 0x1
+#define LW_SHAPE(x) ((x) & LW_SHAPE_MASK)
+
+/*
+ * Hash state field. Used to signify that an object has had its
+ * identity hash code exposed or relocated.
+ */
+#define LW_HASH_STATE_UNHASHED 0
+#define LW_HASH_STATE_HASHED 1
+#define LW_HASH_STATE_HASHED_AND_MOVED 3
+#define LW_HASH_STATE_MASK 0x3
+#define LW_HASH_STATE_SHIFT 1
+#define LW_HASH_STATE(x) (((x) >> LW_HASH_STATE_SHIFT) & LW_HASH_STATE_MASK)
+
+/*
+ * Monitor accessor. Extracts a monitor structure pointer from a fat
+ * lock. Performs no error checking.
+ */
+#define LW_MONITOR(x) \
+ ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
+
+/*
+ * Lock owner field. Contains the thread id of the thread currently
+ * holding the lock.
+ */
+#define LW_LOCK_OWNER_MASK 0xffff
+#define LW_LOCK_OWNER_SHIFT 3
+#define LW_LOCK_OWNER(x) (((x) >> LW_LOCK_OWNER_SHIFT) & LW_LOCK_OWNER_MASK)
+
+/*
+ * Lock recursion count field. Contains a count of the numer of times
+ * a lock has been recursively acquired.
+ */
+#define LW_LOCK_COUNT_MASK 0x1fff
+#define LW_LOCK_COUNT_SHIFT 19
+#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
+
+struct Object;
+struct Thread;
+
+class Monitor {
+ public:
+ ~Monitor();
+
+ static uint32_t GetLockOwner(uint32_t raw_lock_word);
+
+ static void MonitorEnter(Thread* thread, Object* obj);
+ static bool MonitorExit(Thread* thread, Object* obj);
+
+ static void Notify(Thread* self, Object* obj);
+ static void NotifyAll(Thread* self, Object* obj);
+ static void Wait(Thread* self, Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow);
+
+ static void SweepMonitorList(bool (isUnmarkedObject)(void*));
+
+ static void FreeMonitorList();
+
+ private:
+ Monitor(Object* obj);
+
+ void AppendToWaitSet(Thread* thread);
+ void RemoveFromWaitSet(Thread* thread);
+
+ static void Inflate(Thread* self, Object* obj);
+
+ void Lock(Thread* self);
+ bool Unlock(Thread* thread);
+
+ void Notify(Thread* self);
+ void NotifyAll(Thread* self);
+
+ void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow);
+
+ /* Which thread currently owns the lock? */
+ Thread* owner_;
+
+ /* Owner's recursive lock depth */
+ int lock_count_;
+
+ /* What object are we part of (for debugging). */
+ Object* obj_;
+
+ /* Threads currently waiting on this monitor. */
+ Thread* wait_set_;
+
+ Mutex lock_;
+
+ Monitor* next_;
+
+ /*
+ * Who last acquired this monitor, when lock sampling is enabled.
+ * Even when enabled, ownerFileName may be NULL.
+ */
+ const char* owner_filename_;
+ uint32_t owner_line_number_;
+
+ friend class Object;
+};
+
+/*
+ * Relative timed wait on condition
+ */
+int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t msec, int32_t nsec);
+
+} // namespace art
+
+#endif // ART_SRC_SYNC_H_
diff --git a/src/thread.cc b/src/thread.cc
index 0da46b6..ed82989 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -831,6 +831,7 @@
Thread::Thread()
: peer_(NULL),
wait_mutex_("Thread wait mutex"),
+ wait_cond_("Thread wait condition variable"),
wait_monitor_(NULL),
interrupted_(false),
stack_end_(NULL),
@@ -846,7 +847,7 @@
void MonitorExitVisitor(const Object* object, void*) {
Object* entered_monitor = const_cast<Object*>(object);
- entered_monitor->MonitorExit();
+ entered_monitor->MonitorExit(Thread::Current());
}
Thread::~Thread() {
@@ -875,11 +876,21 @@
// Thread.join() is implemented as an Object.wait() on the Thread.lock
// object. Signal anyone who is waiting.
- //Object* lock = dvmGetFieldObject(self->threadObj, gDvm.offJavaLangThread_lock);
- //dvmLockObject(self, lock);
- //dvmObjectNotifyAll(self, lock);
- //dvmUnlockObject(self, lock);
- //lock = NULL;
+ if (peer_ != NULL) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Class* java_lang_Thread_class = class_linker->FindSystemClass("Ljava/lang/Thread;");
+ Class* java_lang_ThreadLock_class = class_linker->FindSystemClass("Ljava/lang/ThreadLock;");
+ Field* lock_field = java_lang_Thread_class->FindDeclaredInstanceField("lock", java_lang_ThreadLock_class);
+
+ Thread* self = Thread::Current();
+ Object* lock = lock_field->GetObject(peer_);
+ // This conditional is only needed for tests, where Thread.lock won't have been set.
+ if (lock != NULL) {
+ lock->MonitorEnter(self);
+ lock->NotifyAll();
+ lock->MonitorExit(self);
+ }
+ }
delete jni_env_;
jni_env_ = NULL;
@@ -1233,6 +1244,13 @@
return result;
}
+bool Thread::HoldsLock(Object* object) {
+ if (object == NULL) {
+ return false;
+ }
+ return object->GetLockOwner() == thin_lock_id_;
+}
+
void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
if (exception_ != NULL) {
visitor(exception_, arg);
diff --git a/src/thread.h b/src/thread.h
index 2e7b615..6c76aeb 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -267,6 +267,8 @@
void WaitUntilSuspended();
+ bool HoldsLock(Object*);
+
/*
* Changes the priority of this thread to match that of the java.lang.Thread object.
*
@@ -400,6 +402,20 @@
return interrupted_;
}
+ void Interrupt() {
+ MutexLock mu(wait_mutex_);
+ if (interrupted_) {
+ return;
+ }
+ interrupted_ = true;
+ NotifyLocked();
+ }
+
+ void Notify() {
+ MutexLock mu(wait_mutex_);
+ NotifyLocked();
+ }
+
void RegisterExceptionEntryPoint(void (*handler)(Method**)) {
exception_entry_point_ = handler;
}
@@ -517,6 +533,12 @@
void InitFunctionPointers();
void InitStackHwm();
+ void NotifyLocked() {
+ if (wait_monitor_ != NULL) {
+ wait_cond_.Signal();
+ }
+ }
+
static void ThreadExitCallback(void* arg);
void WalkStack(StackVisitor* visitor) const;
@@ -541,10 +563,15 @@
// Guards the 'interrupted_' and 'wait_monitor_' members.
mutable Mutex wait_mutex_;
+ ConditionVariable wait_cond_;
// Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
Monitor* wait_monitor_;
// Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
bool interrupted_;
+ // The next thread in the wait set this thread is part of.
+ Thread* wait_next_;
+
+ friend class Monitor;
// FIXME: placeholder for the gc cardTable
uint32_t card_table_;
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 63cbf40..7b743ee 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -20,9 +20,9 @@
ThreadList::ThreadList()
: thread_list_lock_("thread list lock"),
- thread_suspend_count_lock_("thread suspend count lock") {
- CHECK_PTHREAD_CALL(pthread_cond_init, (&thread_start_cond_, NULL), "thread_start_cond_");
- CHECK_PTHREAD_CALL(pthread_cond_init, (&thread_suspend_count_cond_, NULL), "thread_suspend_count_cond_");
+ thread_start_cond_("thread_start_cond_"),
+ thread_suspend_count_lock_("thread suspend count lock"),
+ thread_suspend_count_cond_("thread_suspend_count_cond_") {
}
ThreadList::~ThreadList() {
@@ -30,9 +30,6 @@
Runtime::Current()->DetachCurrentThread();
}
- CHECK_PTHREAD_CALL(pthread_cond_destroy, (&thread_start_cond_), "thread_start_cond_");
- CHECK_PTHREAD_CALL(pthread_cond_destroy, (&thread_suspend_count_cond_), "thread_suspend_count_cond_");
-
// All threads should have exited and unregistered when we
// reach this point. This means that all daemon threads had been
// shutdown cleanly.
@@ -71,7 +68,7 @@
* and re-acquiring the lock provides the memory barriers we
* need for correct behavior on SMP.
*/
- CHECK_PTHREAD_CALL(pthread_cond_wait, (&thread_suspend_count_cond_, thread_suspend_count_lock_.GetImpl()), __FUNCTION__);
+ thread_suspend_count_cond_.Wait(thread_suspend_count_lock_);
}
CHECK_EQ(thread->suspend_count_, 0);
}
@@ -152,7 +149,7 @@
{
//LOG(INFO) << *self << " ResumeAll waking others";
MutexLock mu(thread_suspend_count_lock_);
- CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&thread_suspend_count_cond_), "thread_suspend_count_cond_");
+ thread_suspend_count_cond_.Broadcast();
}
//LOG(INFO) << *self << " ResumeAll complete";
@@ -212,7 +209,7 @@
// We wait for the child to tell us that it's in the thread list.
while (child->GetState() != Thread::kStarting) {
- CHECK_PTHREAD_CALL(pthread_cond_wait, (&thread_start_cond_, thread_list_lock_.GetImpl()), __FUNCTION__);
+ thread_start_cond_.Wait(thread_list_lock_);
}
}
@@ -222,7 +219,7 @@
// Tell the child that it's safe: it will see any future suspend request.
child->SetState(Thread::kVmWait);
- CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&thread_start_cond_), __FUNCTION__);
+ thread_start_cond_.Broadcast();
}
void ThreadList::WaitForGo() {
@@ -233,12 +230,12 @@
// Tell our parent that we're in the thread list.
self->SetState(Thread::kStarting);
- CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&thread_start_cond_), __FUNCTION__);
+ thread_start_cond_.Broadcast();
// Wait until our parent tells us there's no suspend still pending
// from before we were on the thread list.
while (self->GetState() != Thread::kVmWait) {
- CHECK_PTHREAD_CALL(pthread_cond_wait, (&thread_start_cond_, thread_list_lock_.GetImpl()), __FUNCTION__);
+ thread_start_cond_.Wait(thread_list_lock_);
}
// Enter the runnable state. We know that any pending suspend will affect us now.
diff --git a/src/thread_list.h b/src/thread_list.h
index 5630b29..aa1415a 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -59,12 +59,12 @@
std::bitset<kMaxThreadId> allocated_ids_;
std::list<Thread*> list_;
- pthread_cond_t thread_start_cond_;
+ ConditionVariable thread_start_cond_;
// This lock guards every thread's suspend_count_ field...
mutable Mutex thread_suspend_count_lock_;
// ...and is used in conjunction with this condition variable.
- pthread_cond_t thread_suspend_count_cond_;
+ ConditionVariable thread_suspend_count_cond_;
friend class Thread;
friend class ThreadListLock;