summaryrefslogtreecommitdiff
path: root/runtime/debugger.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/debugger.cc')
-rw-r--r--runtime/debugger.cc800
1 files changed, 359 insertions, 441 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 24615e2a66..5918c10515 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -29,9 +29,11 @@
#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "gc/accounting/card_table-inl.h"
+#include "gc/allocation_record.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "handle_scope.h"
+#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
@@ -61,127 +63,30 @@ namespace art {
// The key identifying the debugger to update instrumentation.
static constexpr const char* kDbgInstrumentationKey = "Debugger";
-static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
-static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 2BE can hold 64k-1.
-
-// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
+// Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
- if (alloc_record_count > 0xffff) {
- return 0xffff;
- }
- return alloc_record_count;
-}
-
-class AllocRecordStackTraceElement {
- public:
- AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
- }
-
- int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ArtMethod* method = Method();
- DCHECK(method != nullptr);
- return method->GetLineNumFromDexPC(DexPc());
- }
-
- ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- return soa.DecodeMethod(method_);
- }
-
- void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- method_ = soa.EncodeMethod(m);
- }
-
- uint32_t DexPc() const {
- return dex_pc_;
- }
-
- void SetDexPc(uint32_t pc) {
- dex_pc_ = pc;
- }
-
- private:
- jmethodID method_;
- uint32_t dex_pc_;
-};
-
-jobject Dbg::TypeCache::Add(mirror::Class* t) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- JNIEnv* const env = soa.Env();
- ScopedLocalRef<jobject> local_ref(soa.Env(), soa.AddLocalReference<jobject>(t));
- const int32_t hash_code = soa.Decode<mirror::Class*>(local_ref.get())->IdentityHashCode();
- auto range = objects_.equal_range(hash_code);
- for (auto it = range.first; it != range.second; ++it) {
- if (soa.Decode<mirror::Class*>(it->second) == soa.Decode<mirror::Class*>(local_ref.get())) {
- // Found a matching weak global, return it.
- return it->second;
+ size_t cap = 0xffff;
+#ifdef HAVE_ANDROID_OS
+ // Check whether there's a system property overriding the number of recent records.
+ const char* propertyName = "dalvik.vm.recentAllocMax";
+ char recentAllocMaxString[PROPERTY_VALUE_MAX];
+ if (property_get(propertyName, recentAllocMaxString, "") > 0) {
+ char* end;
+ size_t value = strtoul(recentAllocMaxString, &end, 10);
+ if (*end != '\0') {
+ LOG(ERROR) << "Ignoring " << propertyName << " '" << recentAllocMaxString
+ << "' --- invalid";
+ } else {
+ cap = value;
}
}
- const jobject weak_global = env->NewWeakGlobalRef(local_ref.get());
- objects_.insert(std::make_pair(hash_code, weak_global));
- return weak_global;
-}
-
-void Dbg::TypeCache::Clear() {
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- Thread* self = Thread::Current();
- for (const auto& p : objects_) {
- vm->DeleteWeakGlobalRef(self, p.second);
+#endif
+ if (alloc_record_count > cap) {
+ return cap;
}
- objects_.clear();
+ return alloc_record_count;
}
-class AllocRecord {
- public:
- AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
-
- mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
- }
-
- void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
- Locks::alloc_tracker_lock_) {
- type_ = Dbg::type_cache_.Add(t);
- }
-
- size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- size_t depth = 0;
- while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != nullptr) {
- ++depth;
- }
- return depth;
- }
-
- size_t ByteCount() const {
- return byte_count_;
- }
-
- void SetByteCount(size_t count) {
- byte_count_ = count;
- }
-
- uint16_t ThinLockId() const {
- return thin_lock_id_;
- }
-
- void SetThinLockId(uint16_t id) {
- thin_lock_id_ = id;
- }
-
- AllocRecordStackTraceElement* StackElement(size_t index) {
- DCHECK_LT(index, kMaxAllocRecordStackDepth);
- return &stack_[index];
- }
-
- private:
- jobject type_; // This is a weak global.
- size_t byte_count_;
- uint16_t thin_lock_id_;
- // Unused entries have null method.
- AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];
-};
-
class Breakpoint {
public:
Breakpoint(ArtMethod* method, uint32_t dex_pc,
@@ -382,13 +287,6 @@ bool Dbg::gDebuggerActive = false;
bool Dbg::gDisposed = false;
ObjectRegistry* Dbg::gRegistry = nullptr;
-// Recent allocation tracking.
-AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord>
-size_t Dbg::alloc_record_max_ = 0;
-size_t Dbg::alloc_record_head_ = 0;
-size_t Dbg::alloc_record_count_ = 0;
-Dbg::TypeCache Dbg::type_cache_;
-
// Deoptimization support.
std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
size_t Dbg::full_deoptimization_event_count_ = 0;
@@ -1761,6 +1659,51 @@ JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
}
+static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
+ JValue field_value;
+ switch (fieldType) {
+ case Primitive::kPrimBoolean:
+ field_value.SetZ(f->GetBoolean(o));
+ return field_value;
+
+ case Primitive::kPrimByte:
+ field_value.SetB(f->GetByte(o));
+ return field_value;
+
+ case Primitive::kPrimChar:
+ field_value.SetC(f->GetChar(o));
+ return field_value;
+
+ case Primitive::kPrimShort:
+ field_value.SetS(f->GetShort(o));
+ return field_value;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ // Int and Float must be treated as 32-bit values in JDWP.
+ field_value.SetI(f->GetInt(o));
+ return field_value;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ // Long and Double must be treated as 64-bit values in JDWP.
+ field_value.SetJ(f->GetLong(o));
+ return field_value;
+
+ case Primitive::kPrimNot:
+ field_value.SetL(f->GetObject(o));
+ return field_value;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Attempt to read from field of type 'void'";
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Attempt to read from field of unknown type";
+ UNREACHABLE();
+}
+
static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
bool is_static)
@@ -1795,27 +1738,17 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje
}
} else {
if (f->IsStatic()) {
- LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field "
- << PrettyField(f);
+ LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
+ << " on static field " << PrettyField(f);
}
}
if (f->IsStatic()) {
o = f->GetDeclaringClass();
}
+ JValue field_value(GetArtFieldValue(f, o));
JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
- JValue field_value;
- if (tag == JDWP::JT_VOID) {
- LOG(FATAL) << "Unknown tag: " << tag;
- } else if (!IsPrimitiveTag(tag)) {
- field_value.SetL(f->GetObject(o));
- } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
- field_value.SetJ(f->Get64(o));
- } else {
- field_value.SetI(f->Get32(o));
- }
Dbg::OutputJValue(tag, &field_value, pReply);
-
return JDWP::ERR_NONE;
}
@@ -1829,6 +1762,76 @@ JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::Fiel
return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
}
+static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
+ // Debugging only happens at runtime so we know we are not running in a transaction.
+ static constexpr bool kNoTransactionMode = false;
+ switch (fieldType) {
+ case Primitive::kPrimBoolean:
+ CHECK_EQ(width, 1);
+ f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimByte:
+ CHECK_EQ(width, 1);
+ f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimChar:
+ CHECK_EQ(width, 2);
+ f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimShort:
+ CHECK_EQ(width, 2);
+ f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ CHECK_EQ(width, 4);
+ // Int and Float must be treated as 32-bit values in JDWP.
+ f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ CHECK_EQ(width, 8);
+ // Long and Double must be treated as 64-bit values in JDWP.
+ f->SetLong<kNoTransactionMode>(o, value);
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimNot: {
+ JDWP::JdwpError error;
+ mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
+ if (error != JDWP::ERR_NONE) {
+ return JDWP::ERR_INVALID_OBJECT;
+ }
+ if (v != nullptr) {
+ mirror::Class* field_type;
+ {
+ StackHandleScope<2> hs(Thread::Current());
+ HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
+ HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
+ field_type = f->GetType<true>();
+ }
+ if (!field_type->IsAssignableFrom(v->GetClass())) {
+ return JDWP::ERR_INVALID_OBJECT;
+ }
+ }
+ f->SetObject<kNoTransactionMode>(o, v);
+ return JDWP::ERR_NONE;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Attempt to write to field of type 'void'";
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Attempt to write to field of unknown type";
+ UNREACHABLE();
+}
+
static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1847,47 +1850,14 @@ static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId
}
} else {
if (f->IsStatic()) {
- LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field " << PrettyField(f);
+ LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
+ << " on static field " << PrettyField(f);
}
}
if (f->IsStatic()) {
o = f->GetDeclaringClass();
}
-
- JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
-
- if (IsPrimitiveTag(tag)) {
- if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
- CHECK_EQ(width, 8);
- // Debugging can't use transactional mode (runtime only).
- f->Set64<false>(o, value);
- } else {
- CHECK_LE(width, 4);
- // Debugging can't use transactional mode (runtime only).
- f->Set32<false>(o, value);
- }
- } else {
- mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
- if (error != JDWP::ERR_NONE) {
- return JDWP::ERR_INVALID_OBJECT;
- }
- if (v != nullptr) {
- mirror::Class* field_type;
- {
- StackHandleScope<2> hs(Thread::Current());
- HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
- HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
- field_type = f->GetType<true>();
- }
- if (!field_type->IsAssignableFrom(v->GetClass())) {
- return JDWP::ERR_INVALID_OBJECT;
- }
- }
- // Debugging can't use transactional mode (runtime only).
- f->SetObject<false>(o, v);
- }
-
- return JDWP::ERR_NONE;
+ return SetArtFieldValue(f, o, value, width);
}
JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
@@ -3763,17 +3733,16 @@ static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
}
}
-JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
- JDWP::RefTypeId class_id, JDWP::MethodId method_id,
- uint32_t arg_count, uint64_t* arg_values,
- JDWP::JdwpTag* arg_types, uint32_t options,
- JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
- JDWP::ObjectId* pExceptionId) {
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
+JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
+ JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
+ JDWP::MethodId method_id, uint32_t arg_count,
+ uint64_t arg_values[], JDWP::JdwpTag* arg_types,
+ uint32_t options) {
+ Thread* const self = Thread::Current();
+ CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* targetThread = nullptr;
- std::unique_ptr<DebugInvokeReq> req;
- Thread* self = Thread::Current();
{
ScopedObjectAccessUnchecked soa(self);
JDWP::JdwpError error;
@@ -3883,99 +3852,82 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
}
// Allocates a DebugInvokeReq.
- req.reset(new (std::nothrow) DebugInvokeReq(receiver, c, m, options, arg_values, arg_count));
- if (req.get() == nullptr) {
+ DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
+ options, arg_values, arg_count);
+ if (req == nullptr) {
LOG(ERROR) << "Failed to allocate DebugInvokeReq";
return JDWP::ERR_OUT_OF_MEMORY;
}
- // Attach the DebugInvokeReq to the target thread so it executes the method when
- // it is resumed. Once the invocation completes, it will detach it and signal us
- // before suspending itself.
- targetThread->SetDebugInvokeReq(req.get());
+ // Attaches the DebugInvokeReq to the target thread so it executes the method when
+ // it is resumed. Once the invocation completes, the target thread will delete it before
+ // suspending itself (see ThreadList::SuspendSelfForDebugger).
+ targetThread->SetDebugInvokeReq(req);
}
// The fact that we've released the thread list lock is a bit risky --- if the thread goes
- // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
- // call, and it's unwise to hold it during WaitForSuspend.
-
- {
- /*
- * We change our (JDWP thread) status, which should be THREAD_RUNNING,
- * so we can suspend for a GC if the invoke request causes us to
- * run out of memory. It's also a good idea to change it before locking
- * the invokeReq mutex, although that should never be held for long.
- */
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
-
- VLOG(jdwp) << " Transferring control to event thread";
- {
- MutexLock mu(self, req->lock);
-
- if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
- VLOG(jdwp) << " Resuming all threads";
- thread_list->UndoDebuggerSuspensions();
- } else {
- VLOG(jdwp) << " Resuming event thread only";
- thread_list->Resume(targetThread, true);
- }
-
- // The target thread is resumed but needs the JDWP token we're holding.
- // We release it now and will acquire it again when the invocation is
- // complete and the target thread suspends itself.
- gJdwpState->ReleaseJdwpTokenForCommand();
-
- // Wait for the request to finish executing.
- while (targetThread->GetInvokeReq() != nullptr) {
- req->cond.Wait(self);
- }
- }
- VLOG(jdwp) << " Control has returned from event thread";
-
- /* wait for thread to re-suspend itself */
- SuspendThread(thread_id, false /* request_suspension */);
-
- // Now the thread is suspended again, we can re-acquire the JDWP token.
- gJdwpState->AcquireJdwpTokenForCommand();
-
- self->TransitionFromSuspendedToRunnable();
- }
+ // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
+ // call.
- /*
- * Suspend the threads. We waited for the target thread to suspend
- * itself, so all we need to do is suspend the others.
- *
- * The SuspendAllForDebugger() call will double-suspend the event thread,
- * so we want to resume the target thread once to keep the books straight.
- */
if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
- VLOG(jdwp) << " Suspending all threads";
- thread_list->SuspendAllForDebugger();
- self->TransitionFromSuspendedToRunnable();
- VLOG(jdwp) << " Resuming event thread to balance the count";
+ VLOG(jdwp) << " Resuming all threads";
+ thread_list->UndoDebuggerSuspensions();
+ } else {
+ VLOG(jdwp) << " Resuming event thread only";
thread_list->Resume(targetThread, true);
}
- // Copy the result.
- *pResultTag = req->result_tag;
- *pResultValue = req->result_value;
- *pExceptionId = req->exception;
- return req->error;
+ return JDWP::ERR_NONE;
}
void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
- ScopedObjectAccess soa(Thread::Current());
+ Thread* const self = Thread::Current();
+ CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
+
+ ScopedObjectAccess soa(self);
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
- StackHandleScope<3> hs(soa.Self());
- auto old_exception = hs.NewHandle<mirror::Throwable>(soa.Self()->GetException());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
+ // Execute the method then sends reply to the debugger.
+ ExecuteMethodWithoutPendingException(soa, pReq);
+
+ // If an exception was pending before the invoke, restore it now.
+ if (old_exception.Get() != nullptr) {
+ soa.Self()->SetException(old_exception.Get());
+ }
+}
+
+// Helper function: write a variable-width value into the output input buffer.
+static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
+ switch (width) {
+ case 1:
+ expandBufAdd1(pReply, value);
+ break;
+ case 2:
+ expandBufAdd2BE(pReply, value);
+ break;
+ case 4:
+ expandBufAdd4BE(pReply, value);
+ break;
+ case 8:
+ expandBufAdd8BE(pReply, value);
+ break;
+ default:
+ LOG(FATAL) << width;
+ UNREACHABLE();
+ }
+}
+
+void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
+ soa.Self()->AssertNoPendingException();
+
// Translate the method through the vtable, unless the debugger wants to suppress it.
- auto* m = pReq->method;
- auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ ArtMethod* m = pReq->method;
+ size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
ArtMethod* actual_method =
pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
@@ -3992,39 +3944,133 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
+ // Invoke the method.
ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
- reinterpret_cast<jvalue*>(pReq->arg_values));
+ reinterpret_cast<jvalue*>(pReq->arg_values.get()));
- pReq->result_tag = BasicTagFromDescriptor(m->GetShorty());
- const bool is_object_result = (pReq->result_tag == JDWP::JT_OBJECT);
+ // Prepare JDWP ids for the reply.
+ JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
+ const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
- pReq->exception = gRegistry->Add(exception);
- if (pReq->exception != 0) {
+
+ if (!IsDebuggerActive()) {
+ // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
+ // because it won't be sent either.
+ return;
+ }
+
+ JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
+ uint64_t result_value = 0;
+ if (exceptionObjectId != 0) {
VLOG(jdwp) << " JDWP invocation returning with exception=" << exception.Get()
<< " " << exception->Dump();
- pReq->result_value = 0;
+ result_value = 0;
} else if (is_object_result) {
- /* if no exception thrown, examine object result more closely */
+ /* if no exception was thrown, examine object result more closely */
JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
- if (new_tag != pReq->result_tag) {
- VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
- pReq->result_tag = new_tag;
+ if (new_tag != result_tag) {
+ VLOG(jdwp) << " JDWP promoted result from " << result_tag << " to " << new_tag;
+ result_tag = new_tag;
}
// Register the object in the registry and reference its ObjectId. This ensures
// GC safety and prevents from accessing stale reference if the object is moved.
- pReq->result_value = gRegistry->Add(object_result.Get());
+ result_value = gRegistry->Add(object_result.Get());
} else {
// Primitive result.
- DCHECK(IsPrimitiveTag(pReq->result_tag));
- pReq->result_value = result.GetJ();
+ DCHECK(IsPrimitiveTag(result_tag));
+ result_value = result.GetJ();
+ }
+ const bool is_constructor = m->IsConstructor() && !m->IsStatic();
+ if (is_constructor) {
+ // If we invoked a constructor (which actually returns void), return the receiver,
+ // unless we threw, in which case we return null.
+ result_tag = JDWP::JT_OBJECT;
+ if (exceptionObjectId == 0) {
+ // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
+ // object registry.
+ result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
+ } else {
+ result_value = 0;
+ }
}
- if (old_exception.Get() != nullptr) {
- soa.Self()->SetException(old_exception.Get());
+ // Suspend other threads if the invoke is not single-threaded.
+ if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
+ soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
+ VLOG(jdwp) << " Suspending all threads";
+ Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
+ soa.Self()->TransitionFromSuspendedToRunnable();
+ }
+
+ VLOG(jdwp) << " --> returned " << result_tag
+ << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
+ exceptionObjectId);
+
+ // Show detailed debug output.
+ if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
+ if (result_value != 0) {
+ if (VLOG_IS_ON(jdwp)) {
+ std::string result_string;
+ JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
+ CHECK_EQ(error, JDWP::ERR_NONE);
+ VLOG(jdwp) << " string '" << result_string << "'";
+ }
+ } else {
+ VLOG(jdwp) << " string (null)";
+ }
+ }
+
+ // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
+ // is ready to suspend.
+ BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
+}
+
+void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
+ uint64_t result_value, JDWP::ObjectId exception) {
+ // Make room for the JDWP header since we do not know the size of the reply yet.
+ JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
+
+ size_t width = GetTagWidth(result_tag);
+ JDWP::expandBufAdd1(pReply, result_tag);
+ if (width != 0) {
+ WriteValue(pReply, width, result_value);
+ }
+ JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
+ JDWP::expandBufAddObjectId(pReply, exception);
+
+ // Now we know the size, we can complete the JDWP header.
+ uint8_t* buf = expandBufGetBuffer(pReply);
+ JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
+ JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
+ JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply); // flags
+ JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
+}
+
+void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
+ CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
+
+ JDWP::ExpandBuf* const pReply = pReq->reply;
+ CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
+
+ // We need to prevent other threads (including JDWP thread) from interacting with the debugger
+ // while we send the reply but are not yet suspended. The JDWP token will be released just before
+ // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
+ gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
+
+ // Send the reply unless the debugger detached before the completion of the method.
+ if (IsDebuggerActive()) {
+ const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
+ VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
+ pReq->request_id, replyDataLength);
+
+ gJdwpState->SendRequest(pReply);
+ } else {
+ VLOG(jdwp) << "Not sending invoke reply because debugger detached";
}
}
@@ -4665,177 +4711,41 @@ void Dbg::DdmSendHeapSegments(bool native) {
Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
}
-static size_t GetAllocTrackerMax() {
-#ifdef HAVE_ANDROID_OS
- // Check whether there's a system property overriding the number of records.
- const char* propertyName = "dalvik.vm.allocTrackerMax";
- char allocRecordMaxString[PROPERTY_VALUE_MAX];
- if (property_get(propertyName, allocRecordMaxString, "") > 0) {
- char* end;
- size_t value = strtoul(allocRecordMaxString, &end, 10);
- if (*end != '\0') {
- LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
- << "' --- invalid";
- return kDefaultNumAllocRecords;
- }
- if (!IsPowerOfTwo(value)) {
- LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
- << "' --- not power of two";
- return kDefaultNumAllocRecords;
- }
- return value;
- }
-#endif
- return kDefaultNumAllocRecords;
-}
-
void Dbg::SetAllocTrackingEnabled(bool enable) {
- Thread* self = Thread::Current();
- if (enable) {
- {
- MutexLock mu(self, *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ != nullptr) {
- return; // Already enabled, bail.
- }
- alloc_record_max_ = GetAllocTrackerMax();
- LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
- << kMaxAllocRecordStackDepth << " frames, taking "
- << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
- DCHECK_EQ(alloc_record_head_, 0U);
- DCHECK_EQ(alloc_record_count_, 0U);
- recent_allocation_records_ = new AllocRecord[alloc_record_max_];
- CHECK(recent_allocation_records_ != nullptr);
- }
- Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
- } else {
- {
- ScopedObjectAccess soa(self); // For type_cache_.Clear();
- MutexLock mu(self, *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ == nullptr) {
- return; // Already disabled, bail.
- }
- LOG(INFO) << "Disabling alloc tracker";
- delete[] recent_allocation_records_;
- recent_allocation_records_ = nullptr;
- alloc_record_head_ = 0;
- alloc_record_count_ = 0;
- type_cache_.Clear();
- }
- // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
- Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
- }
-}
-
-struct AllocRecordStackVisitor : public StackVisitor {
- AllocRecordStackVisitor(Thread* thread, AllocRecord* record_in)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- record(record_in),
- depth(0) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
- if (depth >= kMaxAllocRecordStackDepth) {
- return false;
- }
- ArtMethod* m = GetMethod();
- if (!m->IsRuntimeMethod()) {
- record->StackElement(depth)->SetMethod(m);
- record->StackElement(depth)->SetDexPc(GetDexPc());
- ++depth;
- }
- return true;
- }
-
- ~AllocRecordStackVisitor() {
- // Clear out any unused stack trace elements.
- for (; depth < kMaxAllocRecordStackDepth; ++depth) {
- record->StackElement(depth)->SetMethod(nullptr);
- record->StackElement(depth)->SetDexPc(0);
- }
- }
-
- AllocRecord* record;
- size_t depth;
-};
-
-void Dbg::RecordAllocation(Thread* self, mirror::Class* type, size_t byte_count) {
- MutexLock mu(self, *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ == nullptr) {
- // In the process of shutting down recording, bail.
- return;
- }
-
- // Advance and clip.
- if (++alloc_record_head_ == alloc_record_max_) {
- alloc_record_head_ = 0;
- }
-
- // Fill in the basics.
- AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
- record->SetType(type);
- record->SetByteCount(byte_count);
- record->SetThinLockId(self->GetThreadId());
-
- // Fill in the stack trace.
- AllocRecordStackVisitor visitor(self, record);
- visitor.WalkStack();
-
- if (alloc_record_count_ < alloc_record_max_) {
- ++alloc_record_count_;
- }
-}
-
-// Returns the index of the head element.
-//
-// We point at the most-recently-written record, so if alloc_record_count_ is 1
-// we want to use the current element. Take "head+1" and subtract count
-// from it.
-//
-// We need to handle underflow in our circular buffer, so we add
-// alloc_record_max_ and then mask it back down.
-size_t Dbg::HeadIndex() {
- return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
- (Dbg::alloc_record_max_ - 1);
+ gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
}
void Dbg::DumpRecentAllocations() {
ScopedObjectAccess soa(Thread::Current());
MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ == nullptr) {
+ if (!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
LOG(INFO) << "Not recording tracked allocations";
return;
}
+ gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
+ CHECK(records != nullptr);
- // "i" is the head of the list. We want to start at the end of the
- // list and move forward to the tail.
- size_t i = HeadIndex();
- const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
+ const uint16_t capped_count = CappedAllocRecordCount(records->Size());
uint16_t count = capped_count;
- LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
- while (count--) {
- AllocRecord* record = &recent_allocation_records_[i];
+ LOG(INFO) << "Tracked allocations, (count=" << count << ")";
+ for (auto it = records->RBegin(), end = records->REnd();
+ count > 0 && it != end; count--, it++) {
+ const gc::AllocRecord* record = it->second;
- LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
- << PrettyClass(record->Type());
+ LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount())
+ << PrettyClass(it->first.Read()->GetClass());
- for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
- AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
- ArtMethod* m = stack_element->Method();
- if (m == nullptr) {
- break;
- }
- LOG(INFO) << " " << PrettyMethod(m) << " line " << stack_element->LineNumber();
+ for (size_t stack_frame = 0, depth = record->GetDepth(); stack_frame < depth; ++stack_frame) {
+ const gc::AllocRecordStackTraceElement& stack_element = record->StackElement(stack_frame);
+ ArtMethod* m = stack_element.GetMethod();
+ LOG(INFO) << " " << PrettyMethod(m) << " line " << stack_element.ComputeLineNumber();
}
// pause periodically to help logcat catch up
if ((count % 5) == 0) {
usleep(40000);
}
-
- i = (i + 1) & (alloc_record_max_ - 1);
}
}
@@ -4937,6 +4847,15 @@ jbyteArray Dbg::GetRecentAllocations() {
std::vector<uint8_t> bytes;
{
MutexLock mu(self, *Locks::alloc_tracker_lock_);
+ gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
+ // In case this method is called when allocation tracker is disabled,
+ // we should still send some data back.
+ gc::AllocRecordObjectMap dummy;
+ if (records == nullptr) {
+ CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+ records = &dummy;
+ }
+
//
// Part 1: generate string tables.
//
@@ -4944,26 +4863,23 @@ jbyteArray Dbg::GetRecentAllocations() {
StringTable method_names;
StringTable filenames;
- const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
+ const uint16_t capped_count = CappedAllocRecordCount(records->Size());
uint16_t count = capped_count;
- size_t idx = HeadIndex();
- while (count--) {
- AllocRecord* record = &recent_allocation_records_[idx];
+ for (auto it = records->RBegin(), end = records->REnd();
+ count > 0 && it != end; count--, it++) {
+ const gc::AllocRecord* record = it->second;
std::string temp;
- class_names.Add(record->Type()->GetDescriptor(&temp));
- for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
- ArtMethod* m = record->StackElement(i)->Method();
- if (m != nullptr) {
- class_names.Add(m->GetDeclaringClassDescriptor());
- method_names.Add(m->GetName());
- filenames.Add(GetMethodSourceFile(m));
- }
+ class_names.Add(it->first.Read()->GetClass()->GetDescriptor(&temp));
+ for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
+ ArtMethod* m = record->StackElement(i).GetMethod();
+ class_names.Add(m->GetDeclaringClassDescriptor());
+ method_names.Add(m->GetName());
+ filenames.Add(GetMethodSourceFile(m));
}
-
- idx = (idx + 1) & (alloc_record_max_ - 1);
}
- LOG(INFO) << "allocation records: " << capped_count;
+ LOG(INFO) << "recent allocation records: " << capped_count;
+ LOG(INFO) << "allocation records all objects: " << records->Size();
//
// Part 2: Generate the output and store it in the buffer.
@@ -4991,20 +4907,23 @@ jbyteArray Dbg::GetRecentAllocations() {
JDWP::Append2BE(bytes, method_names.Size());
JDWP::Append2BE(bytes, filenames.Size());
- idx = HeadIndex();
std::string temp;
- for (count = capped_count; count != 0; --count) {
+ count = capped_count;
+ // The last "count" number of allocation records in "records" are the most recent "count" number
+ // of allocations. Reverse iterate to get them. The most recent allocation is sent first.
+ for (auto it = records->RBegin(), end = records->REnd();
+ count > 0 && it != end; count--, it++) {
// For each entry:
// (4b) total allocation size
// (2b) thread id
// (2b) allocated object's class name index
// (1b) stack depth
- AllocRecord* record = &recent_allocation_records_[idx];
+ const gc::AllocRecord* record = it->second;
size_t stack_depth = record->GetDepth();
size_t allocated_object_class_name_index =
- class_names.IndexOf(record->Type()->GetDescriptor(&temp));
+ class_names.IndexOf(it->first.Read()->GetClass()->GetDescriptor(&temp));
JDWP::Append4BE(bytes, record->ByteCount());
- JDWP::Append2BE(bytes, record->ThinLockId());
+ JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
JDWP::Append2BE(bytes, allocated_object_class_name_index);
JDWP::Append1BE(bytes, stack_depth);
@@ -5014,16 +4933,15 @@ jbyteArray Dbg::GetRecentAllocations() {
// (2b) method name
// (2b) method source file
// (2b) line number, clipped to 32767; -2 if native; -1 if no source
- ArtMethod* m = record->StackElement(stack_frame)->Method();
+ ArtMethod* m = record->StackElement(stack_frame).GetMethod();
size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
size_t method_name_index = method_names.IndexOf(m->GetName());
size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
JDWP::Append2BE(bytes, class_name_index);
JDWP::Append2BE(bytes, method_name_index);
JDWP::Append2BE(bytes, file_name_index);
- JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
+ JDWP::Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
}
- idx = (idx + 1) & (alloc_record_max_ - 1);
}
// (xb) class name strings