diff options
| author | 2012-03-01 18:46:05 -0800 | |
|---|---|---|
| committer | 2012-03-01 21:15:36 -0800 | |
| commit | ffb465f23d9549dd591e6aa62e9250523cb00233 (patch) | |
| tree | 2108ba79a4ec8031faa56ef0806f93bc2217c237 | |
| parent | 71ac99485e79ad7eb1ba3ea2d404d53bb5784c13 (diff) | |
libcorkscrew native stacks, mutex ranking, and better ScopedThreadListLock.
This change uses libcorkscrew to show native stacks for threads in kNative or,
unlike dalvikvm, kVmWait --- working on the runtime directly I've found it
somewhat useful to be able to see _which_ internal resource we're waiting on.
We can always take that back out (or make it oatexecd-only) if it turns out to
be too noisy/confusing for app developers.
This change also lets us rank mutexes and enforce -- in oatexecd -- that you
take locks in a specific order.
Both of these helped me test the third novelty: removing the heap locking from
ScopedThreadListLock. I've manually inspected all the callers and added a
ScopedHeapLock where I think one is necessary. In manual testing, this makes
jdb a lot less prone to locking us up. There still seems to be a problem with
the JDWP VirtualMachine.Resume command, but I'll look at that separately. This
is a big enough and potentially disruptive enough change already.
Change-Id: Iad974358919d0e00674662dc8a69cc65878cfb5c
| -rw-r--r-- | build/Android.libart.mk | 7 | ||||
| -rw-r--r-- | src/dalvik_system_VMStack.cc | 1 | ||||
| -rw-r--r-- | src/debugger.cc | 2 | ||||
| -rw-r--r-- | src/heap.cc | 16 | ||||
| -rw-r--r-- | src/hprof/hprof.cc | 2 | ||||
| -rw-r--r-- | src/monitor.cc | 53 | ||||
| -rw-r--r-- | src/mutex.cc | 31 | ||||
| -rw-r--r-- | src/mutex.h | 17 | ||||
| -rw-r--r-- | src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc | 1 | ||||
| -rw-r--r-- | src/thread.cc | 31 | ||||
| -rw-r--r-- | src/thread.h | 11 | ||||
| -rw-r--r-- | src/thread_android.cc | 25 | ||||
| -rw-r--r-- | src/thread_linux.cc | 4 | ||||
| -rw-r--r-- | src/thread_list.cc | 35 | ||||
| -rw-r--r-- | src/thread_list.h | 1 | ||||
| -rw-r--r-- | src/trace.cc | 23 |
16 files changed, 180 insertions, 80 deletions
diff --git a/build/Android.libart.mk b/build/Android.libart.mk index f6aac09370..9bfb15e43f 100644 --- a/build/Android.libart.mk +++ b/build/Android.libart.mk @@ -128,10 +128,13 @@ define build-libart endif LOCAL_SHARED_LIBRARIES := liblog libnativehelper ifeq ($$(art_target_or_host),target) - LOCAL_SHARED_LIBRARIES += libcutils libstlport libz libdl libdynamic_annotations + LOCAL_SHARED_LIBRARIES += libcutils libstlport libz libdl + LOCAL_SHARED_LIBRARIES += libdynamic_annotations # tsan support + LOCAL_SHARED_LIBRARIES += libcorkscrew # native stack trace support else # host LOCAL_STATIC_LIBRARIES += libcutils - LOCAL_SHARED_LIBRARIES += libz-host libdynamic_annotations-host + LOCAL_SHARED_LIBRARIES += libz-host + LOCAL_SHARED_LIBRARIES += libdynamic_annotations-host # tsan support LOCAL_LDLIBS := -ldl -lpthread ifeq ($(HOST_OS),linux) LOCAL_LDLIBS += -lrt diff --git a/src/dalvik_system_VMStack.cc b/src/dalvik_system_VMStack.cc index fd2af393e9..74258473be 100644 --- a/src/dalvik_system_VMStack.cc +++ b/src/dalvik_system_VMStack.cc @@ -26,6 +26,7 @@ namespace art { namespace { static jobject GetThreadStack(JNIEnv* env, jobject javaThread) { + ScopedHeapLock heap_lock; ScopedThreadListLock thread_list_lock; Thread* thread = Thread::FromManagedThread(env, javaThread); return (thread != NULL) ? GetThreadStack(env, thread) : NULL; diff --git a/src/debugger.cc b/src/debugger.cc index d2372e3c87..95e884914d 100644 --- a/src/debugger.cc +++ b/src/debugger.cc @@ -1301,7 +1301,7 @@ bool Dbg::GetThreadName(JDWP::ObjectId threadId, std::string& name) { if (thread == NULL) { return false; } - name = thread->GetThreadName()->ToModifiedUtf8(); + thread->GetThreadName(name); return true; } diff --git a/src/heap.cc b/src/heap.cc index ad20f9cb0f..e1532142ba 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -83,7 +83,7 @@ static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Spa } } -bool GenerateImage(const std::string image_file_name) { +static bool GenerateImage(const std::string image_file_name) { const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString()); std::vector<std::string> boot_class_path; Split(boot_class_path_string, ':', boot_class_path); @@ -257,7 +257,7 @@ void Heap::Init(size_t initial_size, size_t growth_limit, size_t capacity, // It's still to early to take a lock because there are no threads yet, // but we can create the heap lock now. We don't create it earlier to // make it clear that you can't use locks during heap initialization. - lock_ = new Mutex("Heap lock"); + lock_ = new Mutex("Heap lock", kHeapLock); Heap::EnableObjectValidation(); @@ -280,7 +280,7 @@ void Heap::Destroy() { Object* Heap::AllocObject(Class* klass, size_t byte_count) { { - ScopedHeapLock lock; + ScopedHeapLock heap_lock; DCHECK(klass == NULL || (klass->IsClassClass() && byte_count >= sizeof(Class)) || (klass->IsVariableSize() || klass->GetObjectSize() == byte_count) || strlen(ClassHelper(klass).GetDescriptor()) == 0); @@ -323,7 +323,7 @@ void Heap::VerifyObject(const Object* obj) { if (!verify_objects_) { return; } - ScopedHeapLock lock; + ScopedHeapLock heap_lock; Heap::VerifyObjectLocked(obj); } #endif @@ -366,7 +366,7 @@ void Heap::VerificationCallback(Object* obj, void* arg) { } void Heap::VerifyHeap() { - ScopedHeapLock lock; + ScopedHeapLock heap_lock; live_bitmap_->Walk(Heap::VerificationCallback, NULL); } @@ -553,14 +553,14 @@ class InstanceCounter { }; int64_t Heap::CountInstances(Class* c, bool count_assignable) { - ScopedHeapLock lock; + ScopedHeapLock heap_lock; InstanceCounter counter(c, count_assignable); live_bitmap_->Walk(InstanceCounter::Callback, &counter); return counter.GetCount(); } void Heap::CollectGarbage(bool clear_soft_references) { - ScopedHeapLock lock; + ScopedHeapLock heap_lock; CollectGarbageInternal(clear_soft_references); } @@ -685,7 +685,7 @@ void Heap::GrowForUtilization() { } void Heap::ClearGrowthLimit() { - ScopedHeapLock lock; + ScopedHeapLock heap_lock; WaitForConcurrentGcToComplete(); alloc_space_->ClearGrowthLimit(); } diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc index be37207c30..4d5cae10a3 100644 --- a/src/hprof/hprof.cc +++ b/src/hprof/hprof.cc @@ -757,7 +757,7 @@ void HprofBitmapCallback(Object *obj, void *arg) { */ int DumpHeap(const char* fileName, int fd, bool directToDdms) { CHECK(fileName != NULL); - ScopedHeapLock lock; + ScopedHeapLock heap_lock; ScopedThreadStateChange tsc(Thread::Current(), Thread::kRunnable); ThreadList* thread_list = Runtime::Current()->GetThreadList(); diff --git a/src/monitor.cc b/src/monitor.cc index e3c98bba67..7261bc451d 100644 --- a/src/monitor.cc +++ b/src/monitor.cc @@ -249,49 +249,60 @@ static std::string ThreadToString(Thread* thread) { return oss.str(); } -void Monitor::FailedUnlock(Object* obj, Thread* expected_owner, Thread* found_owner, - Monitor* mon) { - // Acquire thread list lock so threads won't disappear from under us - ScopedThreadListLock thread_list_lock; - // Re-read owner now that we hold lock - Thread* current_owner = mon != NULL ? mon->owner_ : NULL; +void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owner, + Monitor* monitor) { + Thread* current_owner = NULL; + std::string current_owner_string; + std::string expected_owner_string; + std::string found_owner_string; + { + // TODO: isn't this too late to prevent threads from disappearing? + // Acquire thread list lock so threads won't disappear from under us. + ScopedThreadListLock thread_list_lock; + // Re-read owner now that we hold lock. + current_owner = (monitor != NULL) ? monitor->owner_ : NULL; + // Get short descriptions of the threads involved. + current_owner_string = ThreadToString(current_owner); + expected_owner_string = ThreadToString(expected_owner); + found_owner_string = ThreadToString(found_owner); + } if (current_owner == NULL) { if (found_owner == NULL) { ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'" " on thread '%s'", - PrettyTypeOf(obj).c_str(), - ThreadToString(expected_owner).c_str()); + PrettyTypeOf(o).c_str(), + expected_owner_string.c_str()); } else { // Race: the original read found an owner but now there is none ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'" " (where now the monitor appears unowned) on thread '%s'", - ThreadToString(found_owner).c_str(), - PrettyTypeOf(obj).c_str(), - ThreadToString(expected_owner).c_str()); + found_owner_string.c_str(), + PrettyTypeOf(o).c_str(), + expected_owner_string.c_str()); } } else { if (found_owner == NULL) { // Race: originally there was no owner, there is now ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'" " (originally believed to be unowned) on thread '%s'", - ThreadToString(current_owner).c_str(), - PrettyTypeOf(obj).c_str(), - ThreadToString(expected_owner).c_str()); + current_owner_string.c_str(), + PrettyTypeOf(o).c_str(), + expected_owner_string.c_str()); } else { if (found_owner != current_owner) { // Race: originally found and current owner have changed ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now" " owned by '%s') on object of type '%s' on thread '%s'", - ThreadToString(found_owner).c_str(), - ThreadToString(current_owner).c_str(), - PrettyTypeOf(obj).c_str(), - ThreadToString(expected_owner).c_str()); + found_owner_string.c_str(), + current_owner_string.c_str(), + PrettyTypeOf(o).c_str(), + expected_owner_string.c_str()); } else { ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'" " on thread '%s", - ThreadToString(current_owner).c_str(), - PrettyTypeOf(obj).c_str(), - ThreadToString(expected_owner).c_str()); + current_owner_string.c_str(), + PrettyTypeOf(o).c_str(), + expected_owner_string.c_str()); } } } diff --git a/src/mutex.cc b/src/mutex.cc index 5d207f67d9..963ac99e9b 100644 --- a/src/mutex.cc +++ b/src/mutex.cc @@ -18,16 +18,28 @@ #include <errno.h> -#include "heap.h" // for VERIFY_OBJECT_ENABLED #include "logging.h" -#include "utils.h" #include "runtime.h" +#include "thread.h" +#include "utils.h" #define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_) namespace art { -Mutex::Mutex(const char* name) : name_(name) { +static inline void CheckRank(MutexRank rank, bool is_locking) { +#ifndef NDEBUG + if (rank == -1) { + return; + } + Thread* self = Thread::Current(); + if (self != NULL) { + self->CheckRank(rank, is_locking); + } +#endif +} + +Mutex::Mutex(const char* name, MutexRank rank) : name_(name), rank_(rank) { // Like Java, we use recursive mutexes. pthread_mutexattr_t attributes; CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes)); @@ -47,6 +59,7 @@ Mutex::~Mutex() { void Mutex::Lock() { CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); + CheckRank(rank_, true); AssertHeld(); } @@ -59,6 +72,7 @@ bool Mutex::TryLock() { errno = result; PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; } + CheckRank(rank_, true); AssertHeld(); return true; } @@ -66,6 +80,7 @@ bool Mutex::TryLock() { void Mutex::Unlock() { AssertHeld(); CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); + CheckRank(rank_, false); } pid_t Mutex::GetOwner() { @@ -152,4 +167,14 @@ void ConditionVariable::TimedWait(Mutex& mutex, const timespec& ts) { } } +std::ostream& operator<<(std::ostream& os, const MutexRank& rhs) { + switch (rhs) { + case kHeapLock: os << "HeapLock"; break; + case kThreadListLock: os << "ThreadListLock"; break; + case kThreadSuspendCountLock: os << "ThreadSuspendCountLock"; break; + default: os << "MutexRank[" << static_cast<int>(rhs) << "]"; break; + } + return os; +} + } // namespace diff --git a/src/mutex.h b/src/mutex.h index f4658fb420..b4774d74ba 100644 --- a/src/mutex.h +++ b/src/mutex.h @@ -19,6 +19,8 @@ #include <pthread.h> #include <stdint.h> + +#include <iosfwd> #include <string> #include "logging.h" @@ -26,9 +28,18 @@ namespace art { +enum MutexRank { + kNoMutexRank = -1, + kHeapLock = 0, + kThreadListLock = 1, + kThreadSuspendCountLock = 2, + kMaxMutexRank = kThreadSuspendCountLock, +}; +std::ostream& operator<<(std::ostream& os, const MutexRank& rhs); + class Mutex { public: - explicit Mutex(const char* name); + explicit Mutex(const char* name, MutexRank rank = kNoMutexRank); ~Mutex(); void Lock(); @@ -70,9 +81,9 @@ class Mutex { uint32_t GetDepth(); - std::string name_; - pthread_mutex_t mutex_; + std::string name_; + MutexRank rank_; DISALLOW_COPY_AND_ASSIGN(Mutex); }; diff --git a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index 71655a5e57..51983ead0b 100644 --- a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -63,6 +63,7 @@ static Thread* FindThreadByThinLockId(uint32_t thin_lock_id) { * NULL on failure, e.g. if the threadId couldn't be found. */ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) { + ScopedHeapLock heap_lock; ScopedThreadListLock thread_list_lock; Thread* thread = FindThreadByThinLockId(static_cast<uint32_t>(thin_lock_id)); if (thread == NULL) { diff --git a/src/thread.cc b/src/thread.cc index 74107d583f..1c521c1583 100644 --- a/src/thread.cc +++ b/src/thread.cc @@ -404,6 +404,7 @@ void Thread::Dump(std::ostream& os, bool full) const { } os << GetState() << ",Thread*=" << this + << ",peer=" << peer_ << ",\"" << *name_ << "\"" << "]"; } @@ -438,6 +439,10 @@ String* Thread::GetThreadName() const { return (peer_ != NULL) ? reinterpret_cast<String*>(gThread_name->GetObject(peer_)) : NULL; } +void Thread::GetThreadName(std::string& name) const { + name.assign(*name_); +} + void Thread::DumpState(std::ostream& os) const { std::string group_name; int priority; @@ -586,6 +591,10 @@ struct StackDumpVisitor : public Thread::StackVisitor { }; void Thread::DumpStack(std::ostream& os) const { + // If we're currently in native code, dump that stack before dumping the managed stack. + if (GetState() == Thread::kNative || GetState() == Thread::kVmWait) { + DumpNativeStack(os); + } StackDumpVisitor dumper(os, this); WalkStack(&dumper); } @@ -681,7 +690,7 @@ bool Thread::IsSuspended() { static void ReportThreadSuspendTimeout(Thread* waiting_thread) { Runtime* runtime = Runtime::Current(); std::ostringstream ss; - ss << "Thread suspend timeout; waiting thread=" << *waiting_thread << "\n"; + ss << "Thread suspend timeout waiting for thread " << *waiting_thread << "\n"; runtime->DumpLockHolders(ss); ss << "\n"; runtime->GetThreadList()->DumpLocked(ss); @@ -841,6 +850,7 @@ Thread::Thread() trace_stack_(new std::vector<TraceStackFrame>), name_(new std::string("<native thread without managed peer>")) { CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread); + memset(&held_mutexes_[0], 0, sizeof(held_mutexes_)); } void MonitorExitVisitor(const Object* object, void*) { @@ -1664,4 +1674,23 @@ std::ostream& operator<<(std::ostream& os, const Thread& thread) { return os; } +void Thread::CheckRank(MutexRank rank, bool is_locking) { + if (is_locking) { + if (held_mutexes_[rank] == 0) { + bool bad_mutexes_held = false; + for (int i = kMaxMutexRank; i > rank; --i) { + if (held_mutexes_[i] != 0) { + LOG(ERROR) << "holding " << static_cast<MutexRank>(i) << " while " << (is_locking ? "locking" : "unlocking") << " " << rank; + bad_mutexes_held = true; + } + } + CHECK(!bad_mutexes_held); + } + ++held_mutexes_[rank]; + } else { + CHECK_GT(held_mutexes_[rank], 0U); + --held_mutexes_[rank]; + } +} + } // namespace art diff --git a/src/thread.h b/src/thread.h index 7963091f68..02fa87c722 100644 --- a/src/thread.h +++ b/src/thread.h @@ -161,9 +161,13 @@ class PACKED Thread { return tid_; } - // Returns the java.lang.Thread's name, or NULL. + // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. String* GetThreadName() const; + // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, + // allocation, or locking. + void GetThreadName(std::string& name) const; + // Sets the thread's name. void SetThreadName(const char* name); @@ -418,6 +422,8 @@ class PACKED Thread { return frame; } + void CheckRank(MutexRank rank, bool is_locking); + private: Thread(); ~Thread(); @@ -428,6 +434,7 @@ class PACKED Thread { void DumpState(std::ostream& os) const; void DumpStack(std::ostream& os) const; + void DumpNativeStack(std::ostream& os) const; // Out-of-line conveniences for debugging in gdb. static Thread* CurrentFromGdb(); // Like Thread::Current. @@ -563,6 +570,8 @@ class PACKED Thread { // A cached copy of the java.lang.Thread's name. std::string* name_; + uint32_t held_mutexes_[kMaxMutexRank + 1]; + public: // Runtime support function pointers void (*pDebugMe)(Method*, uint32_t); diff --git a/src/thread_android.cc b/src/thread_android.cc index 295a509296..6f803335ad 100644 --- a/src/thread_android.cc +++ b/src/thread_android.cc @@ -21,6 +21,7 @@ #include <limits.h> #include <errno.h> +#include <corkscrew/backtrace.h> #include <cutils/sched_policy.h> #include <utils/threads.h> @@ -88,4 +89,28 @@ int Thread::GetNativePriority() { return managed_priority; } +void Thread::DumpNativeStack(std::ostream& os) const { + const size_t MAX_DEPTH = 32; + UniquePtr<backtrace_frame_t[]> backtrace(new backtrace_frame_t[MAX_DEPTH]); + ssize_t frame_count = unwind_backtrace_thread(GetTid(), backtrace.get(), 0, MAX_DEPTH); + if (frame_count == -1) { + os << " (unwind_backtrace_thread failed for thread " << GetTid() << ".)"; + return; + } else if (frame_count == 0) { + return; + } + + UniquePtr<backtrace_symbol_t[]> backtrace_symbols(new backtrace_symbol_t[frame_count]); + get_backtrace_symbols(backtrace.get(), frame_count, backtrace_symbols.get()); + + for (size_t i = 0; i < static_cast<size_t>(frame_count); ++i) { + char line[MAX_BACKTRACE_LINE_LENGTH]; + format_backtrace_line(i, &backtrace[i], &backtrace_symbols[i], + line, MAX_BACKTRACE_LINE_LENGTH); + os << " " << line << "\n"; + } + + free_backtrace_symbols(backtrace_symbols.get(), frame_count); +} + } // namespace art diff --git a/src/thread_linux.cc b/src/thread_linux.cc index 2bbbb25bb3..9578228e27 100644 --- a/src/thread_linux.cc +++ b/src/thread_linux.cc @@ -18,6 +18,10 @@ namespace art { +void Thread::DumpNativeStack(std::ostream& os) const { + // TODO: use glibc backtrace(3). +} + void Thread::SetNativePriority(int) { // Do nothing. } diff --git a/src/thread_list.cc b/src/thread_list.cc index f344e674f9..6ce34ec782 100644 --- a/src/thread_list.cc +++ b/src/thread_list.cc @@ -23,28 +23,14 @@ namespace art { ScopedThreadListLock::ScopedThreadListLock() { - // Self may be null during shutdown. - Thread* self = Thread::Current(); - - // We insist that anyone taking the thread list lock already has the heap lock, - // because pretty much any time someone takes the thread list lock, they may - // end up needing the heap lock (even removing a thread from the thread list calls - // back into managed code to remove the thread from its ThreadGroup, and that allocates - // an iterator). - // TODO: this makes the distinction between the two locks pretty pointless. - heap_lock_held_ = (self != NULL); - if (heap_lock_held_) { - Heap::Lock(); - } - // Avoid deadlock between two threads trying to SuspendAll // simultaneously by going to kVmWait if the lock cannot be // immediately acquired. - // TODO: is this needed if we took the heap lock? taking the heap lock will have done this, - // and the other thread will now be in kVmWait waiting for the heap lock. ThreadList* thread_list = Runtime::Current()->GetThreadList(); if (!thread_list->thread_list_lock_.TryLock()) { + Thread* self = Thread::Current(); if (self == NULL) { + // Self may be null during shutdown, but in that case there's no point going to kVmWait. thread_list->thread_list_lock_.Lock(); } else { ScopedThreadStateChange tsc(self, Thread::kVmWait); @@ -55,16 +41,13 @@ ScopedThreadListLock::ScopedThreadListLock() { ScopedThreadListLock::~ScopedThreadListLock() { Runtime::Current()->GetThreadList()->thread_list_lock_.Unlock(); - if (heap_lock_held_) { - Heap::Unlock(); - } } ThreadList::ThreadList() - : thread_list_lock_("thread list lock"), + : thread_list_lock_("thread list lock", kThreadListLock), thread_start_cond_("thread_start_cond_"), thread_exit_cond_("thread_exit_cond_"), - thread_suspend_count_lock_("thread suspend count lock"), + thread_suspend_count_lock_("thread suspend count lock", kThreadSuspendCountLock), thread_suspend_count_cond_("thread_suspend_count_cond_") { VLOG(threads) << "Default stack size: " << Runtime::Current()->GetDefaultStackSize() / KB << "KiB"; } @@ -424,16 +407,13 @@ void ThreadList::SignalGo(Thread* child) { CHECK(child != self); { - // We don't use ScopedThreadListLock here because we don't want to - // hold the heap lock while waiting because it can lead to deadlock. - thread_list_lock_.Lock(); + ScopedThreadListLock thread_list_lock; VLOG(threads) << *self << " waiting for child " << *child << " to be in thread list..."; // We wait for the child to tell us that it's in the thread list. while (child->GetState() != Thread::kStarting) { thread_start_cond_.Wait(thread_list_lock_); } - thread_list_lock_.Unlock(); } // If we switch out of runnable and then back in, we know there's no pending suspend. @@ -452,9 +432,7 @@ void ThreadList::WaitForGo() { DCHECK(Contains(self)); { - // We don't use ScopedThreadListLock here because we don't want to - // hold the heap lock while waiting because it can lead to deadlock. - thread_list_lock_.Lock(); + ScopedThreadListLock thread_list_lock; // Tell our parent that we're in the thread list. VLOG(threads) << *self << " telling parent that we're now in thread list..."; @@ -467,7 +445,6 @@ void ThreadList::WaitForGo() { while (self->GetState() != Thread::kVmWait) { thread_start_cond_.Wait(thread_list_lock_); } - thread_list_lock_.Unlock(); } // Enter the runnable state. We know that any pending suspend will affect us now. diff --git a/src/thread_list.h b/src/thread_list.h index 02357fce7f..4bd94993d0 100644 --- a/src/thread_list.h +++ b/src/thread_list.h @@ -93,7 +93,6 @@ class ScopedThreadListLock { ~ScopedThreadListLock(); private: - bool heap_lock_held_; DISALLOW_COPY_AND_ASSIGN(ScopedThreadListLock); }; diff --git a/src/trace.cc b/src/trace.cc index f5d118d817..b481b744fa 100644 --- a/src/trace.cc +++ b/src/trace.cc @@ -45,17 +45,17 @@ static inline uint32_t TraceMethodCombine(uint32_t method, uint8_t traceEvent) { return (method | traceEvent); } -bool UseThreadCpuClock() { +static bool UseThreadCpuClock() { // TODO: Allow control over which clock is used return true; } -bool UseWallClock() { +static bool UseWallClock() { // TODO: Allow control over which clock is used return true; } -void MeasureClockOverhead() { +static void MeasureClockOverhead() { if (UseThreadCpuClock()) { ThreadCpuMicroTime(); } @@ -64,7 +64,7 @@ void MeasureClockOverhead() { } } -uint32_t GetClockOverhead() { +static uint32_t GetClockOverhead() { uint64_t start = ThreadCpuMicroTime(); for (int i = 4000; i > 0; i--) { @@ -82,19 +82,22 @@ uint32_t GetClockOverhead() { return uint32_t (elapsed / 32); } -void Append2LE(uint8_t* buf, uint16_t val) { +// TODO: put this somewhere with the big-endian equivalent used by JDWP. +static void Append2LE(uint8_t* buf, uint16_t val) { *buf++ = (uint8_t) val; *buf++ = (uint8_t) (val >> 8); } -void Append4LE(uint8_t* buf, uint32_t val) { +// TODO: put this somewhere with the big-endian equivalent used by JDWP. +static void Append4LE(uint8_t* buf, uint32_t val) { *buf++ = (uint8_t) val; *buf++ = (uint8_t) (val >> 8); *buf++ = (uint8_t) (val >> 16); *buf++ = (uint8_t) (val >> 24); } -void Append8LE(uint8_t* buf, uint64_t val) { +// TODO: put this somewhere with the big-endian equivalent used by JDWP. +static void Append8LE(uint8_t* buf, uint64_t val) { *buf++ = (uint8_t) val; *buf++ = (uint8_t) (val >> 8); *buf++ = (uint8_t) (val >> 16); @@ -391,8 +394,10 @@ void Trace::DumpMethodList(std::ostream& os) { } static void DumpThread(Thread* t, void* arg) { - std::ostream* os = reinterpret_cast<std::ostream*>(arg); - *os << StringPrintf("%d\t%s\n", t->GetTid(), t->GetThreadName()->ToModifiedUtf8().c_str()); + std::ostream& os = *reinterpret_cast<std::ostream*>(arg); + std::string name; + t->GetThreadName(name); + os << t->GetTid() << "\t" << name << "\n"; } void Trace::DumpThreadList(std::ostream& os) { |