summaryrefslogtreecommitdiff
path: root/runtime/debugger.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/debugger.cc')
-rw-r--r--runtime/debugger.cc111
1 files changed, 84 insertions, 27 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 21419973e0..8280c7c9a6 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -63,6 +63,9 @@ struct AllocRecordStackTraceElement {
mirror::ArtMethod* method;
uint32_t dex_pc;
+ AllocRecordStackTraceElement() : method(nullptr), dex_pc(0) {
+ }
+
int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return MethodHelper(method).GetLineNumFromDexPC(dex_pc);
}
@@ -81,6 +84,20 @@ struct AllocRecord {
}
return depth;
}
+
+ void UpdateObjectPointers(RootVisitor* visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (type != nullptr) {
+ type = down_cast<mirror::Class*>(visitor(type, arg));
+ }
+ for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
+ mirror::ArtMethod*& m = stack[stack_frame].method;
+ if (m == nullptr) {
+ break;
+ }
+ m = down_cast<mirror::ArtMethod*>(visitor(m, arg));
+ }
+ }
};
struct Breakpoint {
@@ -101,7 +118,7 @@ class DebugInstrumentationListener : public instrumentation::InstrumentationList
virtual ~DebugInstrumentationListener() {}
virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
- const mirror::ArtMethod* method, uint32_t dex_pc)
+ mirror::ArtMethod* method, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
@@ -111,7 +128,7 @@ class DebugInstrumentationListener : public instrumentation::InstrumentationList
}
virtual void MethodExited(Thread* thread, mirror::Object* this_object,
- const mirror::ArtMethod* method,
+ mirror::ArtMethod* method,
uint32_t dex_pc, const JValue& return_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
@@ -122,7 +139,7 @@ class DebugInstrumentationListener : public instrumentation::InstrumentationList
}
virtual void MethodUnwind(Thread* thread, mirror::Object* this_object,
- const mirror::ArtMethod* method, uint32_t dex_pc)
+ mirror::ArtMethod* method, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
@@ -130,7 +147,7 @@ class DebugInstrumentationListener : public instrumentation::InstrumentationList
}
virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
- const mirror::ArtMethod* method, uint32_t new_dex_pc)
+ mirror::ArtMethod* method, uint32_t new_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc);
}
@@ -303,7 +320,7 @@ static JDWP::JdwpTag TagFromClass(mirror::Class* c)
*
* Null objects are tagged JT_OBJECT.
*/
-static JDWP::JdwpTag TagFromObject(const mirror::Object* o)
+static JDWP::JdwpTag TagFromObject(mirror::Object* o)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass());
}
@@ -464,6 +481,8 @@ void Dbg::StartJdwp() {
}
void Dbg::StopJdwp() {
+ // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
+ Disposed();
delete gJdwpState;
gJdwpState = NULL;
delete gRegistry;
@@ -773,6 +792,8 @@ JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectI
JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>& counts)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ heap->CollectGarbage(false);
std::vector<mirror::Class*> classes;
counts.clear();
for (size_t i = 0; i < class_ids.size(); ++i) {
@@ -784,19 +805,20 @@ JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class
classes.push_back(c);
counts.push_back(0);
}
-
- Runtime::Current()->GetHeap()->CountInstances(classes, false, &counts[0]);
+ heap->CountInstances(classes, false, &counts[0]);
return JDWP::ERR_NONE;
}
JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ // We only want reachable instances, so do a GC.
+ heap->CollectGarbage(false);
JDWP::JdwpError status;
mirror::Class* c = DecodeClass(class_id, status);
- if (c == NULL) {
+ if (c == nullptr) {
return status;
}
-
std::vector<mirror::Object*> raw_instances;
Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
@@ -808,13 +830,14 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, s
JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>& referring_objects)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ heap->CollectGarbage(false);
mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if (o == NULL || o == ObjectRegistry::kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
-
std::vector<mirror::Object*> raw_instances;
- Runtime::Current()->GetHeap()->GetReferringObjects(o, max_count, raw_instances);
+ heap->GetReferringObjects(o, max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
referring_objects.push_back(gRegistry->Add(raw_instances[i]));
}
@@ -1054,16 +1077,16 @@ JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count,
size_t width = GetTagWidth(tag);
uint8_t* dst = expandBufAddSpace(pReply, count * width);
if (width == 8) {
- const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t)));
+ const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
} else if (width == 4) {
- const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t)));
+ const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
} else if (width == 2) {
- const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t)));
+ const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
} else {
- const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t)));
+ const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
memcpy(dst, &src[offset * width], count * width);
}
} else {
@@ -1079,10 +1102,13 @@ JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count,
return JDWP::ERR_NONE;
}
-template <typename T> void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count) {
+template <typename T>
+static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
+ NO_THREAD_SAFETY_ANALYSIS {
+ // TODO: fix when annotalysis correctly handles non-member functions.
DCHECK(a->GetClass()->IsPrimitiveArray());
- T* dst = &(reinterpret_cast<T*>(a->GetRawData(sizeof(T)))[offset * sizeof(T)]);
+ T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
for (int i = 0; i < count; ++i) {
*dst++ = src.ReadValue(sizeof(T));
}
@@ -1926,7 +1952,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram
JDWP::FrameId frame_id(GetFrameId());
JDWP::JdwpLocation location;
SetLocation(location, GetMethod(), GetDexPc());
- VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3lld ", depth_, frame_id) << location;
+ VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
expandBufAdd8BE(buf_, frame_id);
expandBufAddLocation(buf_, location);
}
@@ -2283,7 +2309,7 @@ void Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl
visitor.WalkStack();
}
-void Dbg::PostLocationEvent(const mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
+void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
int event_flags, const JValue* return_value) {
mirror::Class* c = m->GetDeclaringClass();
@@ -2338,7 +2364,7 @@ void Dbg::PostClassPrepare(mirror::Class* c) {
}
void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
- const mirror::ArtMethod* m, uint32_t dex_pc) {
+ mirror::ArtMethod* m, uint32_t dex_pc) {
if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
return;
}
@@ -2630,7 +2656,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize
if (!m->IsRuntimeMethod()) {
++single_step_control_->stack_depth;
if (single_step_control_->method == NULL) {
- const mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
+ mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
single_step_control_->method = m;
*line_number_ = -1;
if (dex_cache != NULL) {
@@ -2699,7 +2725,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize
uint32_t last_pc;
};
single_step_control->dex_pcs.clear();
- const mirror::ArtMethod* m = single_step_control->method;
+ mirror::ArtMethod* m = single_step_control->method;
if (!m->IsNative()) {
DebugCallbackContext context(single_step_control, line_number);
MethodHelper mh(m);
@@ -3062,7 +3088,7 @@ bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pRep
// Run through and find all chunks. [Currently just find the first.]
ScopedByteArrayRO contents(env, dataArray.get());
if (length != request_length) {
- LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%d)", length, request_length);
+ LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
return false;
}
@@ -3454,7 +3480,7 @@ class HeapChunkContext {
Flush();
}
}
- const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(start);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
// Determine the type of this chunk.
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
@@ -3497,8 +3523,8 @@ class HeapChunkContext {
*p_++ = length - 1;
}
- uint8_t ExamineObject(const mirror::Object* o, bool is_native_heap)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
if (o == NULL) {
return HPSG_STATE(SOLIDITY_FREE, 0);
}
@@ -3751,7 +3777,7 @@ void Dbg::DumpRecentAllocations() {
<< PrettyClass(record->type);
for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
- const mirror::ArtMethod* m = record->stack[stack_frame].method;
+ mirror::ArtMethod* m = record->stack[stack_frame].method;
if (m == NULL) {
break;
}
@@ -3767,6 +3793,37 @@ void Dbg::DumpRecentAllocations() {
}
}
+void Dbg::UpdateObjectPointers(RootVisitor* visitor, void* arg) {
+ {
+ MutexLock mu(Thread::Current(), gAllocTrackerLock);
+ if (recent_allocation_records_ != nullptr) {
+ size_t i = HeadIndex();
+ size_t count = gAllocRecordCount;
+ while (count--) {
+ AllocRecord* record = &recent_allocation_records_[i];
+ DCHECK(record != nullptr);
+ record->UpdateObjectPointers(visitor, arg);
+ i = (i + 1) & (gAllocRecordMax - 1);
+ }
+ }
+ }
+ if (gRegistry != nullptr) {
+ gRegistry->UpdateObjectPointers(visitor, arg);
+ }
+}
+
+void Dbg::AllowNewObjectRegistryObjects() {
+ if (gRegistry != nullptr) {
+ gRegistry->AllowNewObjects();
+ }
+}
+
+void Dbg::DisallowNewObjectRegistryObjects() {
+ if (gRegistry != nullptr) {
+ gRegistry->DisallowNewObjects();
+ }
+}
+
class StringTable {
public:
StringTable() {