Register debugger for interesting instrumentation events only
This avoids the overhead of notifying events (like method entry/exit, field
read/write, ...) from the interpreter when they are not requested on the JDWP
side. It also avoids burning JDWP ids for objects and classes before we find
out we do not need to report the event.
When we register a JDWP event (like a breakpoint), we add the debugger as
a listener for the corresponding instrumentation event (like kDexPcChanged).
On the other hand, when a JDWP event is cleared, we remove the debugger as a
listener for the corresponding instrumentation event. To control we add/remove
the debugger as listener only once per instrumentation event, we use reference
counting.
Like deoptimization, we can update instrumentation listeners only when when all
mutator threads are suspended. To add or remove the debugger as listener, we
extend the support of deoptimization requests to a more general support dealing
with instrumentation requests.
We add kRegisterForEvent and kUnregisterForEvent request kinds, respectively to
add or remove the debugger as a listener for a given instrumentation event.
Note: we will rename the related classes, methods, ... to avoid pollution in
the code review.
This CL also fixes Instrumentation::IsActive to take field read/write events
into account.
Bug: 14401699
Bug: 14826953
Change-Id: Ic896469e82a8589de419ebea4b9dc3116925f3ab
diff --git a/runtime/debugger.h b/runtime/debugger.h
index bef708c..9311f49 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -129,21 +129,31 @@
DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
};
+// TODO rename to InstrumentationRequest.
struct DeoptimizationRequest {
enum Kind {
kNothing, // no action.
+ kRegisterForEvent, // start listening for instrumentation event.
+ kUnregisterForEvent, // stop listening for instrumentation event.
kFullDeoptimization, // deoptimize everything.
kFullUndeoptimization, // undeoptimize everything.
kSelectiveDeoptimization, // deoptimize one method.
kSelectiveUndeoptimization // undeoptimize one method.
};
- DeoptimizationRequest() : kind(kNothing), method(nullptr) {}
+ DeoptimizationRequest() : kind(kNothing), instrumentation_event(0), method(nullptr) {}
void VisitRoots(RootCallback* callback, void* arg);
Kind kind;
+ // TODO we could use a union to hold the instrumentation_event and the method since they
+ // respectively have sense only for kRegisterForEvent/kUnregisterForEvent and
+ // kSelectiveDeoptimization/kSelectiveUndeoptimization.
+
+ // Event to start or stop listening to. Only for kRegisterForEvent and kUnregisterForEvent.
+ uint32_t instrumentation_event;
+
// Method for selective deoptimization.
mirror::ArtMethod* method;
};
@@ -579,11 +589,13 @@
static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_);
// Guards deoptimization requests.
+ // TODO rename to instrumentation_update_lock.
static Mutex* deoptimization_lock_ ACQUIRED_AFTER(Locks::breakpoint_lock_);
// Deoptimization requests to be processed each time the event list is updated. This is used when
// registering and unregistering events so we do not deoptimize while holding the event list
// lock.
+ // TODO rename to instrumentation_requests.
static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(deoptimization_lock_);
// Count the number of events requiring full deoptimization. When the counter is > 0, everything
@@ -596,6 +608,19 @@
// session.
static size_t delayed_full_undeoptimization_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event);
+
+ // Instrumentation event reference counters.
+ // TODO we could use an array instead of having all these dedicated counters. Instrumentation
+ // events are bits of a mask so we could convert them to array index.
+ static size_t dex_pc_change_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t method_enter_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t method_exit_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t field_read_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t field_write_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t exception_catch_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static uint32_t instrumentation_events_ GUARDED_BY(Locks::mutator_lock_);
+
DISALLOW_COPY_AND_ASSIGN(Dbg);
};