diff options
| author | 2015-10-06 10:32:38 -0700 | |
|---|---|---|
| committer | 2015-10-06 13:24:20 -0700 | |
| commit | a6b1ead81603513fd40b77fd72f06d8cb1f35276 (patch) | |
| tree | 3ba5add50b6b0034024e65c6de3aea1004ec5161 | |
| parent | bcb71a2ce5bcb516f76fc9fe838b61b0c48e1210 (diff) | |
Mark breakpoint roots
Used to prevent class unloading on methods that have breakpoints.
Bug: 22720414
Change-Id: I9aee8bcbfdf253607e89dfc55a50ba3f11d99206
| -rw-r--r-- | runtime/debugger.cc | 11 | ||||
| -rw-r--r-- | runtime/debugger.h | 1 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 7 | ||||
| -rw-r--r-- | runtime/runtime.cc | 1 |
4 files changed, 18 insertions, 2 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc index b19381d879..a4f95b6d2f 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -612,7 +612,7 @@ void Dbg::Disconnected() { // Since we're going to disable deoptimization, we clear the deoptimization requests queue. // This prevents us from having any pending deoptimization request when the debugger attaches // to us again while no event has been requested yet. - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); + MutexLock mu(self, *Locks::deoptimization_lock_); deoptimization_requests_.clear(); full_deoptimization_event_count_ = 0U; } @@ -5043,4 +5043,13 @@ void DeoptimizationRequest::SetMethod(ArtMethod* m) { method_ = soa.EncodeMethod(m); } +void Dbg::VisitRoots(RootVisitor* visitor) { + // Visit breakpoint roots, used to prevent unloading of methods with breakpoints. + ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); + BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal)); + for (Breakpoint& breakpoint : gBreakpoints) { + breakpoint.Method()->VisitRoots(root_visitor, sizeof(void*)); + } +} + } // namespace art diff --git a/runtime/debugger.h b/runtime/debugger.h index b3617e4bbb..e908304977 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -646,6 +646,7 @@ class Dbg { static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) SHARED_REQUIRES(Locks::mutator_lock_); + // Visit breakpoint roots, used to prevent unloading of methods with breakpoints. static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 0a7a69f37e..d2d12af6b4 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -18,6 +18,7 @@ #include "art_field-inl.h" #include "base/stl_util.h" +#include "debugger.h" #include "gc/accounting/heap_bitmap-inl.h" #include "gc/accounting/space_bitmap-inl.h" #include "gc/reference_processor.h" @@ -385,6 +386,10 @@ void ConcurrentCopying::MarkingPhase() { TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); Runtime::Current()->VisitNonThreadRoots(this); } + { + TimingLogger::ScopedTiming split6("Dbg::VisitRoots", GetTimings()); + Dbg::VisitRoots(this); + } Runtime::Current()->GetHeap()->VisitAllocationRecords(this); // Immune spaces. @@ -401,7 +406,7 @@ void ConcurrentCopying::MarkingPhase() { Thread* self = Thread::Current(); { - TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); + TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings()); // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The // primary reasons are the fact that we need to use a checkpoint to process thread-local mark // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 1f447d076b..9fb21a8425 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1426,6 +1426,7 @@ void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) { // Guaranteed to have no new roots in the constant roots. VisitConstantRoots(visitor); } + Dbg::VisitRoots(visitor); } void Runtime::VisitTransactionRoots(RootVisitor* visitor) { |