diff options
| -rw-r--r-- | build/Android.gtest.mk | 1 | ||||
| -rw-r--r-- | runtime/debugger.cc | 190 | ||||
| -rw-r--r-- | runtime/debugger.h | 40 | ||||
| -rw-r--r-- | runtime/jdwp/jdwp_event.cc | 20 | ||||
| -rw-r--r-- | runtime/monitor.cc | 7 | ||||
| -rw-r--r-- | runtime/monitor_test.cc | 380 | ||||
| -rw-r--r-- | runtime/runtime.cc | 1 |
7 files changed, 528 insertions, 111 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 183ec37cd6..10cd1cc65a 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -110,6 +110,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \ runtime/mem_map_test.cc \ runtime/mirror/dex_cache_test.cc \ runtime/mirror/object_test.cc \ + runtime/monitor_test.cc \ runtime/parsed_options_test.cc \ runtime/reference_table_test.cc \ runtime/thread_pool_test.cc \ diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 11415b49e0..6161aff647 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -170,27 +170,47 @@ class AllocRecord { AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have NULL method. }; -struct Breakpoint { - // The location of this breakpoint. - mirror::ArtMethod* method; - uint32_t dex_pc; +class Breakpoint { + public: + Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) { + ScopedObjectAccessUnchecked soa(Thread::Current()); + method_ = soa.EncodeMethod(method); + } - // Indicates whether breakpoint needs full deoptimization or selective deoptimization. - bool need_full_deoptimization; + Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : method_(nullptr), dex_pc_(other.dex_pc_), + need_full_deoptimization_(other.need_full_deoptimization_) { + ScopedObjectAccessUnchecked soa(Thread::Current()); + method_ = soa.EncodeMethod(other.Method()); + } - Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization) - : method(method), dex_pc(dex_pc), need_full_deoptimization(need_full_deoptimization) {} + mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ScopedObjectAccessUnchecked soa(Thread::Current()); + return soa.DecodeMethod(method_); + } - void VisitRoots(RootCallback* callback, void* arg) { - if (method != nullptr) { - callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger); - } + uint32_t DexPc() const { + return dex_pc_; + } + + bool NeedFullDeoptimization() const { + return need_full_deoptimization_; } + + private: + // The location of this breakpoint. + jmethodID method_; + uint32_t dex_pc_; + + // Indicates whether breakpoint needs full deoptimization or selective deoptimization. + bool need_full_deoptimization_; }; -static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) +static std::ostream& operator<<(std::ostream& os, Breakpoint& rhs) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc); + os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc()); return os; } @@ -349,18 +369,12 @@ void SingleStepControl::Clear() { dex_pcs.clear(); } -void DeoptimizationRequest::VisitRoots(RootCallback* callback, void* arg) { - if (method != nullptr) { - callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger); - } -} - static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc) LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { - if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) { + if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) { VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i]; return true; } @@ -641,21 +655,6 @@ void Dbg::StartJdwp() { } } -void Dbg::VisitRoots(RootCallback* callback, void* arg) { - { - MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); - for (Breakpoint& bp : gBreakpoints) { - bp.VisitRoots(callback, arg); - } - } - if (deoptimization_lock_ != nullptr) { // only true if the debugger is started. - MutexLock mu(Thread::Current(), *deoptimization_lock_); - for (DeoptimizationRequest& req : deoptimization_requests_) { - req.VisitRoots(callback, arg); - } - } -} - void Dbg::StopJdwp() { // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection. Disposed(); @@ -2844,22 +2843,22 @@ size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) { // Process request while all mutator threads are suspended. void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) { instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - switch (request.kind) { + switch (request.GetKind()) { case DeoptimizationRequest::kNothing: LOG(WARNING) << "Ignoring empty deoptimization request."; break; case DeoptimizationRequest::kRegisterForEvent: VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x", - request.instrumentation_event); - instrumentation->AddListener(&gDebugInstrumentationListener, request.instrumentation_event); - instrumentation_events_ |= request.instrumentation_event; + request.InstrumentationEvent()); + instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent()); + instrumentation_events_ |= request.InstrumentationEvent(); break; case DeoptimizationRequest::kUnregisterForEvent: VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x", - request.instrumentation_event); + request.InstrumentationEvent()); instrumentation->RemoveListener(&gDebugInstrumentationListener, - request.instrumentation_event); - instrumentation_events_ &= ~request.instrumentation_event; + request.InstrumentationEvent()); + instrumentation_events_ &= ~request.InstrumentationEvent(); break; case DeoptimizationRequest::kFullDeoptimization: VLOG(jdwp) << "Deoptimize the world ..."; @@ -2872,17 +2871,17 @@ void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) { VLOG(jdwp) << "Undeoptimize the world DONE"; break; case DeoptimizationRequest::kSelectiveDeoptimization: - VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " ..."; - instrumentation->Deoptimize(request.method); - VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " DONE"; + VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ..."; + instrumentation->Deoptimize(request.Method()); + VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE"; break; case DeoptimizationRequest::kSelectiveUndeoptimization: - VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " ..."; - instrumentation->Undeoptimize(request.method); - VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " DONE"; + VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ..."; + instrumentation->Undeoptimize(request.Method()); + VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE"; break; default: - LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind; + LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind(); break; } } @@ -2899,8 +2898,8 @@ void Dbg::ProcessDelayedFullUndeoptimizations() { MutexLock mu(Thread::Current(), *deoptimization_lock_); while (delayed_full_undeoptimization_count_ > 0) { DeoptimizationRequest req; - req.kind = DeoptimizationRequest::kFullUndeoptimization; - req.method = nullptr; + req.SetKind(DeoptimizationRequest::kFullUndeoptimization); + req.SetMethod(nullptr); RequestDeoptimizationLocked(req); --delayed_full_undeoptimization_count_; } @@ -2909,7 +2908,7 @@ void Dbg::ProcessDelayedFullUndeoptimizations() { } void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) { - if (req.kind == DeoptimizationRequest::kNothing) { + if (req.GetKind() == DeoptimizationRequest::kNothing) { // Nothing to do. return; } @@ -2918,35 +2917,35 @@ void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) { } void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) { - switch (req.kind) { + switch (req.GetKind()) { case DeoptimizationRequest::kRegisterForEvent: { - DCHECK_NE(req.instrumentation_event, 0u); - size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event); + DCHECK_NE(req.InstrumentationEvent(), 0u); + size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", - req.instrumentation_event); + req.InstrumentationEvent()); if (*counter == 0) { VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x", - deoptimization_requests_.size(), req.instrumentation_event); + deoptimization_requests_.size(), req.InstrumentationEvent()); deoptimization_requests_.push_back(req); } *counter = *counter + 1; break; } case DeoptimizationRequest::kUnregisterForEvent: { - DCHECK_NE(req.instrumentation_event, 0u); - size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event); + DCHECK_NE(req.InstrumentationEvent(), 0u); + size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", - req.instrumentation_event); + req.InstrumentationEvent()); *counter = *counter - 1; if (*counter == 0) { VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x", - deoptimization_requests_.size(), req.instrumentation_event); + deoptimization_requests_.size(), req.InstrumentationEvent()); deoptimization_requests_.push_back(req); } break; } case DeoptimizationRequest::kFullDeoptimization: { - DCHECK(req.method == nullptr); + DCHECK(req.Method() == nullptr); if (full_deoptimization_event_count_ == 0) { VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() << " for full deoptimization"; @@ -2956,7 +2955,7 @@ void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) { break; } case DeoptimizationRequest::kFullUndeoptimization: { - DCHECK(req.method == nullptr); + DCHECK(req.Method() == nullptr); DCHECK_GT(full_deoptimization_event_count_, 0U); --full_deoptimization_event_count_; if (full_deoptimization_event_count_ == 0) { @@ -2967,21 +2966,21 @@ void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) { break; } case DeoptimizationRequest::kSelectiveDeoptimization: { - DCHECK(req.method != nullptr); + DCHECK(req.Method() != nullptr); VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() - << " for deoptimization of " << PrettyMethod(req.method); + << " for deoptimization of " << PrettyMethod(req.Method()); deoptimization_requests_.push_back(req); break; } case DeoptimizationRequest::kSelectiveUndeoptimization: { - DCHECK(req.method != nullptr); + DCHECK(req.Method() != nullptr); VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() - << " for undeoptimization of " << PrettyMethod(req.method); + << " for undeoptimization of " << PrettyMethod(req.Method()); deoptimization_requests_.push_back(req); break; } default: { - LOG(FATAL) << "Unknown deoptimization request kind " << req.kind; + LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind(); break; } } @@ -3005,7 +3004,7 @@ void Dbg::ManageDeoptimization() { { MutexLock mu(self, *deoptimization_lock_); size_t req_index = 0; - for (const DeoptimizationRequest& request : deoptimization_requests_) { + for (DeoptimizationRequest& request : deoptimization_requests_) { VLOG(jdwp) << "Process deoptimization request #" << req_index++; ProcessDeoptimizationRequest(request); } @@ -3036,9 +3035,9 @@ static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m) } static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m) - EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) { - for (const Breakpoint& breakpoint : gBreakpoints) { - if (breakpoint.method == m) { + EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (Breakpoint& breakpoint : gBreakpoints) { + if (breakpoint.Method() == m) { return &breakpoint; } } @@ -3050,7 +3049,7 @@ static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_ EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) { if (kIsDebugBuild) { for (const Breakpoint& breakpoint : gBreakpoints) { - CHECK_EQ(need_full_deoptimization, breakpoint.need_full_deoptimization); + CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization()); } if (need_full_deoptimization) { // We should have deoptimized everything but not "selectively" deoptimized this method. @@ -3080,18 +3079,18 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques // inlined, we deoptimize everything; otherwise we deoptimize only this method. need_full_deoptimization = IsMethodPossiblyInlined(self, m); if (need_full_deoptimization) { - req->kind = DeoptimizationRequest::kFullDeoptimization; - req->method = nullptr; + req->SetKind(DeoptimizationRequest::kFullDeoptimization); + req->SetMethod(nullptr); } else { - req->kind = DeoptimizationRequest::kSelectiveDeoptimization; - req->method = m; + req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization); + req->SetMethod(m); } } else { // There is at least one breakpoint for this method: we don't need to deoptimize. - req->kind = DeoptimizationRequest::kNothing; - req->method = nullptr; + req->SetKind(DeoptimizationRequest::kNothing); + req->SetMethod(nullptr); - need_full_deoptimization = existing_breakpoint->need_full_deoptimization; + need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization(); SanityCheckExistingBreakpoints(m, need_full_deoptimization); } @@ -3103,15 +3102,14 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization // request if we need to undeoptimize. void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { + MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); mirror::ArtMethod* m = FromMethodId(location->method_id); DCHECK(m != nullptr) << "No method for method id " << location->method_id; - - MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); bool need_full_deoptimization = false; for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { - if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) { + if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) { VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i]; - need_full_deoptimization = gBreakpoints[i].need_full_deoptimization; + need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization(); DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); gBreakpoints.erase(gBreakpoints.begin() + i); break; @@ -3122,17 +3120,17 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ // There is no more breakpoint on this method: we need to undeoptimize. if (need_full_deoptimization) { // This method required full deoptimization: we need to undeoptimize everything. - req->kind = DeoptimizationRequest::kFullUndeoptimization; - req->method = nullptr; + req->SetKind(DeoptimizationRequest::kFullUndeoptimization); + req->SetMethod(nullptr); } else { // This method required selective deoptimization: we need to undeoptimize only that method. - req->kind = DeoptimizationRequest::kSelectiveUndeoptimization; - req->method = m; + req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization); + req->SetMethod(m); } } else { // There is at least one breakpoint for this method: we don't need to undeoptimize. - req->kind = DeoptimizationRequest::kNothing; - req->method = nullptr; + req->SetKind(DeoptimizationRequest::kNothing); + req->SetMethod(nullptr); SanityCheckExistingBreakpoints(m, need_full_deoptimization); } } @@ -4590,4 +4588,14 @@ jbyteArray Dbg::GetRecentAllocations() { return result; } +mirror::ArtMethod* DeoptimizationRequest::Method() const { + ScopedObjectAccessUnchecked soa(Thread::Current()); + return soa.DecodeMethod(method_); +} + +void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) { + ScopedObjectAccessUnchecked soa(Thread::Current()); + method_ = soa.EncodeMethod(m); +} + } // namespace art diff --git a/runtime/debugger.h b/runtime/debugger.h index 2589638461..1d3668c1f6 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -131,7 +131,8 @@ struct SingleStepControl { }; // TODO rename to InstrumentationRequest. -struct DeoptimizationRequest { +class DeoptimizationRequest { + public: enum Kind { kNothing, // no action. kRegisterForEvent, // start listening for instrumentation event. @@ -142,21 +143,48 @@ struct DeoptimizationRequest { kSelectiveUndeoptimization // undeoptimize one method. }; - DeoptimizationRequest() : kind(kNothing), instrumentation_event(0), method(nullptr) {} + DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {} + + DeoptimizationRequest(const DeoptimizationRequest& other) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) { + // Create a new JNI global reference for the method. + SetMethod(other.Method()); + } + + mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VisitRoots(RootCallback* callback, void* arg); + // Name 'Kind()' would collide with the above enum name. + Kind GetKind() const { + return kind_; + } - Kind kind; + void SetKind(Kind kind) { + kind_ = kind; + } + + uint32_t InstrumentationEvent() const { + return instrumentation_event_; + } + + void SetInstrumentationEvent(uint32_t instrumentation_event) { + instrumentation_event_ = instrumentation_event; + } + + private: + Kind kind_; // TODO we could use a union to hold the instrumentation_event and the method since they // respectively have sense only for kRegisterForEvent/kUnregisterForEvent and // kSelectiveDeoptimization/kSelectiveUndeoptimization. // Event to start or stop listening to. Only for kRegisterForEvent and kUnregisterForEvent. - uint32_t instrumentation_event; + uint32_t instrumentation_event_; // Method for selective deoptimization. - mirror::ArtMethod* method; + jmethodID method_; }; class Dbg { diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index 86c84e8b0f..36fbed4ea2 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -192,17 +192,17 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) { } } if (NeedsFullDeoptimization(pEvent->eventKind)) { - CHECK_EQ(req.kind, DeoptimizationRequest::kNothing); - CHECK(req.method == nullptr); - req.kind = DeoptimizationRequest::kFullDeoptimization; + CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing); + CHECK(req.Method() == nullptr); + req.SetKind(DeoptimizationRequest::kFullDeoptimization); } Dbg::RequestDeoptimization(req); } uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind); if (instrumentation_event != 0) { DeoptimizationRequest req; - req.kind = DeoptimizationRequest::kRegisterForEvent; - req.instrumentation_event = instrumentation_event; + req.SetKind(DeoptimizationRequest::kRegisterForEvent); + req.SetInstrumentationEvent(instrumentation_event); Dbg::RequestDeoptimization(req); } @@ -274,17 +274,17 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) { // deoptimization and only the last single-step will trigger a full undeoptimization. Dbg::DelayFullUndeoptimization(); } else if (NeedsFullDeoptimization(pEvent->eventKind)) { - CHECK_EQ(req.kind, DeoptimizationRequest::kNothing); - CHECK(req.method == nullptr); - req.kind = DeoptimizationRequest::kFullUndeoptimization; + CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing); + CHECK(req.Method() == nullptr); + req.SetKind(DeoptimizationRequest::kFullUndeoptimization); } Dbg::RequestDeoptimization(req); } uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind); if (instrumentation_event != 0) { DeoptimizationRequest req; - req.kind = DeoptimizationRequest::kUnregisterForEvent; - req.instrumentation_event = instrumentation_event; + req.SetKind(DeoptimizationRequest::kUnregisterForEvent); + req.SetInstrumentationEvent(instrumentation_event); Dbg::RequestDeoptimization(req); } diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 999a9e504b..eb62a694e0 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -401,8 +401,8 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, // Make sure that we hold the lock. if (owner_ != self) { - ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()"); monitor_lock_.Unlock(self); + ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()"); return; } @@ -414,10 +414,10 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, // Enforce the timeout range. if (ms < 0 || ns < 0 || ns > 999999) { + monitor_lock_.Unlock(self); ThrowLocation throw_location = self->GetCurrentLocationForThrow(); self->ThrowNewExceptionF(throw_location, "Ljava/lang/IllegalArgumentException;", "timeout arguments out of range: ms=%" PRId64 " ns=%d", ms, ns); - monitor_lock_.Unlock(self); return; } @@ -512,6 +512,8 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, --num_waiters_; RemoveFromWaitSet(self); + monitor_lock_.Unlock(self); + if (was_interrupted) { /* * We were interrupted while waiting, or somebody interrupted an @@ -529,7 +531,6 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, self->ThrowNewException(throw_location, "Ljava/lang/InterruptedException;", NULL); } } - monitor_lock_.Unlock(self); } void Monitor::Notify(Thread* self) { diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc new file mode 100644 index 0000000000..bdba494e14 --- /dev/null +++ b/runtime/monitor_test.cc @@ -0,0 +1,380 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "barrier.h" +#include "monitor.h" + +#include <string> + +#include "atomic.h" +#include "common_runtime_test.h" +#include "handle_scope-inl.h" +#include "mirror/class-inl.h" +#include "mirror/string-inl.h" // Strings are easiest to allocate +#include "thread_pool.h" +#include "utils.h" + +namespace art { + +class MonitorTest : public CommonRuntimeTest { + protected: + void SetUpRuntimeOptions(Runtime::Options *options) OVERRIDE { + // Use a smaller heap + for (std::pair<std::string, const void*>& pair : *options) { + if (pair.first.find("-Xmx") == 0) { + pair.first = "-Xmx4M"; // Smallest we can go. + } + } + options->push_back(std::make_pair("-Xint", nullptr)); + } + public: + std::unique_ptr<Monitor> monitor_; + Handle<mirror::String> object_; + Handle<mirror::String> second_object_; + Handle<mirror::String> watchdog_object_; + // One exception test is for waiting on another Thread's lock. This is used to race-free & + // loop-free pass + Thread* thread_; + std::unique_ptr<Barrier> barrier_; + std::unique_ptr<Barrier> complete_barrier_; + bool completed_; +}; + +// Fill the heap. +static const size_t kMaxHandles = 1000000; // Use arbitrary large amount for now. +static void FillHeap(Thread* self, ClassLinker* class_linker, + std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp, + std::vector<Handle<mirror::Object>>* handles) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB); + + hsp->reset(new StackHandleScope<kMaxHandles>(self)); + // Class java.lang.Object. + Handle<mirror::Class> c((*hsp)->NewHandle(class_linker->FindSystemClass(self, + "Ljava/lang/Object;"))); + // Array helps to fill memory faster. + Handle<mirror::Class> ca((*hsp)->NewHandle(class_linker->FindSystemClass(self, + "[Ljava/lang/Object;"))); + + // Start allocating with 128K + size_t length = 128 * KB / 4; + while (length > 10) { + Handle<mirror::Object> h((*hsp)->NewHandle<mirror::Object>( + mirror::ObjectArray<mirror::Object>::Alloc(self, ca.Get(), length / 4))); + if (self->IsExceptionPending() || h.Get() == nullptr) { + self->ClearException(); + + // Try a smaller length + length = length / 8; + // Use at most half the reported free space. + size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory(); + if (length * 8 > mem) { + length = mem / 8; + } + } else { + handles->push_back(h); + } + } + + // Allocate simple objects till it fails. + while (!self->IsExceptionPending()) { + Handle<mirror::Object> h = (*hsp)->NewHandle<mirror::Object>(c->AllocObject(self)); + if (!self->IsExceptionPending() && h.Get() != nullptr) { + handles->push_back(h); + } + } + self->ClearException(); +} + +// Check that an exception can be thrown correctly. +// This test is potentially racy, but the timeout is long enough that it should work. + +class CreateTask : public Task { + public: + explicit CreateTask(MonitorTest* monitor_test, uint64_t initial_sleep, int64_t millis, + bool expected) : + monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis), + expected_(expected) {} + + void Run(Thread* self) { + { + ScopedObjectAccess soa(self); + + monitor_test_->thread_ = self; // Pass the Thread. + monitor_test_->object_.Get()->MonitorEnter(self); // Lock the object. This should transition + LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked. + LockWord::LockState new_state = lock_after.GetState(); + + // Cannot use ASSERT only, as analysis thinks we'll keep holding the mutex. + if (LockWord::LockState::kThinLocked != new_state) { + monitor_test_->object_.Get()->MonitorExit(self); // To appease analysis. + ASSERT_EQ(LockWord::LockState::kThinLocked, new_state); // To fail the test. + return; + } + + // Force a fat lock by running identity hashcode to fill up lock word. + monitor_test_->object_.Get()->IdentityHashCode(); + LockWord lock_after2 = monitor_test_->object_.Get()->GetLockWord(false); + LockWord::LockState new_state2 = lock_after2.GetState(); + + // Cannot use ASSERT only, as analysis thinks we'll keep holding the mutex. + if (LockWord::LockState::kFatLocked != new_state2) { + monitor_test_->object_.Get()->MonitorExit(self); // To appease analysis. + ASSERT_EQ(LockWord::LockState::kFatLocked, new_state2); // To fail the test. + return; + } + } // Need to drop the mutator lock to use the barrier. + + monitor_test_->barrier_->Wait(self); // Let the other thread know we're done. + + { + ScopedObjectAccess soa(self); + + // Give the other task a chance to do its thing. + NanoSleep(initial_sleep_ * 1000 * 1000); + + // Now try to Wait on the Monitor. + Monitor::Wait(self, monitor_test_->object_.Get(), millis_, 0, true, + ThreadState::kTimedWaiting); + + // Check the exception status against what we expect. + EXPECT_EQ(expected_, self->IsExceptionPending()); + if (expected_) { + self->ClearException(); + } + } + + monitor_test_->complete_barrier_->Wait(self); // Wait for test completion. + + { + ScopedObjectAccess soa(self); + monitor_test_->object_.Get()->MonitorExit(self); // Release the object. Appeases analysis. + } + } + + void Finalize() { + delete this; + } + + private: + MonitorTest* monitor_test_; + uint64_t initial_sleep_; + int64_t millis_; + bool expected_; +}; + + +class UseTask : public Task { + public: + UseTask(MonitorTest* monitor_test, uint64_t initial_sleep, int64_t millis, bool expected) : + monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis), + expected_(expected) {} + + void Run(Thread* self) { + monitor_test_->barrier_->Wait(self); // Wait for the other thread to set up the monitor. + + { + ScopedObjectAccess soa(self); + + // Give the other task a chance to do its thing. + NanoSleep(initial_sleep_ * 1000 * 1000); + + Monitor::Wait(self, monitor_test_->object_.Get(), millis_, 0, true, + ThreadState::kTimedWaiting); + + // Check the exception status against what we expect. + EXPECT_EQ(expected_, self->IsExceptionPending()); + if (expected_) { + self->ClearException(); + } + } + + monitor_test_->complete_barrier_->Wait(self); // Wait for test completion. + } + + void Finalize() { + delete this; + } + + private: + MonitorTest* monitor_test_; + uint64_t initial_sleep_; + int64_t millis_; + bool expected_; +}; + +class InterruptTask : public Task { + public: + InterruptTask(MonitorTest* monitor_test, uint64_t initial_sleep, uint64_t millis) : + monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis) {} + + void Run(Thread* self) { + monitor_test_->barrier_->Wait(self); // Wait for the other thread to set up the monitor. + + { + ScopedObjectAccess soa(self); + + // Give the other task a chance to do its thing. + NanoSleep(initial_sleep_ * 1000 * 1000); + + // Interrupt the other thread. + monitor_test_->thread_->Interrupt(self); + + // Give it some more time to get to the exception code. + NanoSleep(millis_ * 1000 * 1000); + + // Now try to Wait. + Monitor::Wait(self, monitor_test_->object_.Get(), 10, 0, true, + ThreadState::kTimedWaiting); + + // No check here, as depending on scheduling we may or may not fail. + if (self->IsExceptionPending()) { + self->ClearException(); + } + } + + monitor_test_->complete_barrier_->Wait(self); // Wait for test completion. + } + + void Finalize() { + delete this; + } + + private: + MonitorTest* monitor_test_; + uint64_t initial_sleep_; + uint64_t millis_; +}; + +class WatchdogTask : public Task { + public: + explicit WatchdogTask(MonitorTest* monitor_test) : monitor_test_(monitor_test) {} + + void Run(Thread* self) { + ScopedObjectAccess soa(self); + + monitor_test_->watchdog_object_.Get()->MonitorEnter(self); // Lock the object. + + monitor_test_->watchdog_object_.Get()->Wait(self, 30 * 1000, 0); // Wait for 30s, or being + // woken up. + + monitor_test_->watchdog_object_.Get()->MonitorExit(self); // Release the lock. + + if (!monitor_test_->completed_) { + LOG(FATAL) << "Watchdog timeout!"; + } + } + + void Finalize() { + delete this; + } + + private: + MonitorTest* monitor_test_; +}; + +static void CommonWaitSetup(MonitorTest* test, ClassLinker* class_linker, uint64_t create_sleep, + int64_t c_millis, bool c_expected, bool interrupt, uint64_t use_sleep, + int64_t u_millis, bool u_expected, const char* pool_name) { + // First create the object we lock. String is easiest. + StackHandleScope<3> hs(Thread::Current()); + { + ScopedObjectAccess soa(Thread::Current()); + test->object_ = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(Thread::Current(), + "hello, world!")); + test->watchdog_object_ = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(Thread::Current(), + "hello, world!")); + } + + // Create the barrier used to synchronize. + test->barrier_ = std::unique_ptr<Barrier>(new Barrier(2)); + test->complete_barrier_ = std::unique_ptr<Barrier>(new Barrier(3)); + test->completed_ = false; + + // Fill the heap. + std::unique_ptr<StackHandleScope<kMaxHandles>> hsp; + std::vector<Handle<mirror::Object>> handles; + { + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + + // Our job: Fill the heap, then try Wait. + FillHeap(self, class_linker, &hsp, &handles); + + // Now release everything. + auto it = handles.begin(); + auto end = handles.end(); + + for ( ; it != end; ++it) { + it->Assign(nullptr); + } + } // Need to drop the mutator lock to allow barriers. + + Thread* self = Thread::Current(); + ThreadPool thread_pool(pool_name, 3); + thread_pool.AddTask(self, new CreateTask(test, create_sleep, c_millis, c_expected)); + if (interrupt) { + thread_pool.AddTask(self, new InterruptTask(test, use_sleep, static_cast<uint64_t>(u_millis))); + } else { + thread_pool.AddTask(self, new UseTask(test, use_sleep, u_millis, u_expected)); + } + thread_pool.AddTask(self, new WatchdogTask(test)); + thread_pool.StartWorkers(self); + + // Wait on completion barrier. + test->complete_barrier_->Wait(Thread::Current()); + test->completed_ = true; + + // Wake the watchdog. + { + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + + test->watchdog_object_.Get()->MonitorEnter(self); // Lock the object. + test->watchdog_object_.Get()->NotifyAll(self); // Wake up waiting parties. + test->watchdog_object_.Get()->MonitorExit(self); // Release the lock. + } + + thread_pool.StopWorkers(self); +} + + +// First test: throwing an exception when trying to wait in Monitor with another thread. +TEST_F(MonitorTest, CheckExceptionsWait1) { + // Make the CreateTask wait 10ms, the UseTask wait 10ms. + // => The use task will get the lock first and get to self == owner check. + CommonWaitSetup(this, class_linker_, 10, 50, false, false, 2, 50, true, + "Monitor test thread pool 1"); +} + +// Second test: throwing an exception for invalid wait time. +TEST_F(MonitorTest, CheckExceptionsWait2) { + // Make the CreateTask wait 0ms, the UseTask wait 10ms. + // => The create task will get the lock first and get to ms >= 0 + CommonWaitSetup(this, class_linker_, 0, -1, true, false, 10, 50, true, + "Monitor test thread pool 2"); +} + +// Third test: throwing an interrupted-exception. +TEST_F(MonitorTest, CheckExceptionsWait3) { + // Make the CreateTask wait 0ms, then Wait for a long time. Make the InterruptTask wait 10ms, + // after which it will interrupt the create task and then wait another 10ms. + // => The create task will get to the interrupted-exception throw. + CommonWaitSetup(this, class_linker_, 0, 500, true, true, 10, 50, true, + "Monitor test thread pool 3"); +} + +} // namespace art diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 53ddcca469..3b14aaa767 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -930,7 +930,6 @@ void Runtime::VisitConstantRoots(RootCallback* callback, void* arg) { void Runtime::VisitConcurrentRoots(RootCallback* callback, void* arg, VisitRootFlags flags) { intern_table_->VisitRoots(callback, arg, flags); class_linker_->VisitRoots(callback, arg, flags); - Dbg::VisitRoots(callback, arg); if ((flags & kVisitRootFlagNewRoots) == 0) { // Guaranteed to have no new roots in the constant roots. VisitConstantRoots(callback, arg); |