summaryrefslogtreecommitdiff
path: root/runtime/thread.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread.cc')
-rw-r--r--runtime/thread.cc306
1 files changed, 107 insertions, 199 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e8326ea982..9751076235 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -108,6 +108,12 @@ void Thread::InitTlsEntryPoints() {
&quick_entrypoints_);
}
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+
+void Thread::ResetQuickAllocEntryPointsForThread() {
+ ResetQuickAllocEntryPoints(&quick_entrypoints_);
+}
+
void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
deoptimization_shadow_frame_ = sf;
}
@@ -305,7 +311,7 @@ void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
DCHECK_EQ(Thread::Current(), this);
- thin_lock_id_ = thread_list->AllocThreadId(this);
+ thin_lock_thread_id_ = thread_list->AllocThreadId(this);
InitStackHwm();
jni_env_ = new JNIEnvExt(this, java_vm);
@@ -470,9 +476,9 @@ void Thread::InitStackHwm() {
void Thread::ShortDump(std::ostream& os) const {
os << "Thread[";
- if (GetThinLockId() != 0) {
+ if (GetThreadId() != 0) {
// If we're in kStarting, we won't have a thin lock id or tid yet.
- os << GetThinLockId()
+ os << GetThreadId()
<< ",tid=" << GetTid() << ',';
}
os << GetState()
@@ -568,18 +574,32 @@ void Thread::RunCheckpointFunction() {
ATRACE_BEGIN("Checkpoint function");
checkpoint_function_->Run(this);
ATRACE_END();
+ checkpoint_function_ = NULL;
+ AtomicClearFlag(kCheckpointRequest);
}
bool Thread::RequestCheckpoint(Closure* function) {
- CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
- checkpoint_function_ = function;
union StateAndFlags old_state_and_flags = state_and_flags_;
+ if (old_state_and_flags.as_struct.state != kRunnable) {
+ return false; // Fail, thread is suspended and so can't run a checkpoint.
+ }
+ if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) {
+ return false; // Fail, already a checkpoint pending.
+ }
+ CHECK(checkpoint_function_ == NULL);
+ checkpoint_function_ = function;
+ // Checkpoint function installed now install flag bit.
// We must be runnable to request a checkpoint.
old_state_and_flags.as_struct.state = kRunnable;
union StateAndFlags new_state_and_flags = old_state_and_flags;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
&state_and_flags_.as_int);
+ if (UNLIKELY(succeeded != 0)) {
+ // The thread changed state before the checkpoint was installed.
+ CHECK(checkpoint_function_ == function);
+ checkpoint_function_ = NULL;
+ }
return succeeded == 0;
}
@@ -594,88 +614,6 @@ void Thread::FullSuspendCheck() {
VLOG(threads) << this << " self-reviving";
}
-Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) {
- static const useconds_t kTimeoutUs = 30 * 1000000; // 30s.
- useconds_t total_delay_us = 0;
- useconds_t delay_us = 0;
- bool did_suspend_request = false;
- *timed_out = false;
- while (true) {
- Thread* thread;
- {
- ScopedObjectAccess soa(Thread::Current());
- Thread* self = soa.Self();
- MutexLock mu(self, *Locks::thread_list_lock_);
- thread = Thread::FromManagedThread(soa, peer);
- if (thread == NULL) {
- JNIEnv* env = self->GetJniEnv();
- ScopedLocalRef<jstring> scoped_name_string(env,
- (jstring)env->GetObjectField(peer,
- WellKnownClasses::java_lang_Thread_name));
- ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
- if (scoped_name_chars.c_str() == NULL) {
- LOG(WARNING) << "No such thread for suspend: " << peer;
- env->ExceptionClear();
- } else {
- LOG(WARNING) << "No such thread for suspend: " << peer << ":" << scoped_name_chars.c_str();
- }
-
- return NULL;
- }
- {
- MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
- if (request_suspension) {
- thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */);
- request_suspension = false;
- did_suspend_request = true;
- }
- // IsSuspended on the current thread will fail as the current thread is changed into
- // Runnable above. As the suspend count is now raised if this is the current thread
- // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
- // to just explicitly handle the current thread in the callers to this code.
- CHECK_NE(thread, soa.Self()) << "Attempt to suspend the current thread for the debugger";
- // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
- // count, or else we've waited and it has self suspended) or is the current thread, we're
- // done.
- if (thread->IsSuspended()) {
- return thread;
- }
- if (total_delay_us >= kTimeoutUs) {
- LOG(ERROR) << "Thread suspension timed out: " << peer;
- if (did_suspend_request) {
- thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */);
- }
- *timed_out = true;
- return NULL;
- }
- }
- // Release locks and come out of runnable state.
- }
- for (int i = kLockLevelCount - 1; i >= 0; --i) {
- BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
- LOG(FATAL) << "Holding " << held_mutex->GetName()
- << " while sleeping for thread suspension";
- }
- }
- {
- useconds_t new_delay_us = delay_us * 2;
- CHECK_GE(new_delay_us, delay_us);
- if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
- delay_us = new_delay_us;
- }
- }
- if (delay_us == 0) {
- sched_yield();
- // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
- delay_us = 500;
- } else {
- usleep(delay_us);
- total_delay_us += delay_us;
- }
- }
-}
-
void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
std::string group_name;
int priority;
@@ -712,7 +650,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
os << " daemon";
}
os << " prio=" << priority
- << " tid=" << thread->GetThinLockId()
+ << " tid=" << thread->GetThreadId()
<< " " << thread->GetState();
if (thread->IsStillStarting()) {
os << " (still starting up)";
@@ -962,9 +900,9 @@ Thread::Thread(bool daemon)
jpeer_(NULL),
stack_begin_(NULL),
stack_size_(0),
+ thin_lock_thread_id_(0),
stack_trace_sample_(NULL),
trace_clock_base_(0),
- thin_lock_id_(0),
tid_(0),
wait_mutex_(new Mutex("a thread wait mutex")),
wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
@@ -1012,9 +950,10 @@ void Thread::AssertNoPendingException() const {
}
}
-static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
+static mirror::Object* MonitorExitVisitor(mirror::Object* object, void* arg)
+ NO_THREAD_SAFETY_ANALYSIS {
Thread* self = reinterpret_cast<Thread*>(arg);
- mirror::Object* entered_monitor = const_cast<mirror::Object*>(object);
+ mirror::Object* entered_monitor = object;
if (self->HoldsLock(entered_monitor)) {
LOG(WARNING) << "Calling MonitorExit on object "
<< object << " (" << PrettyTypeOf(object) << ")"
@@ -1022,6 +961,7 @@ static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREA
<< *Thread::Current() << " which is detaching";
entered_monitor->MonitorExit(self);
}
+ return object;
}
void Thread::Destroy() {
@@ -1151,8 +1091,12 @@ void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
size_t num_refs = cur->NumberOfReferences();
for (size_t j = 0; j < num_refs; j++) {
mirror::Object* object = cur->GetReference(j);
- if (object != NULL) {
- visitor(object, arg);
+ if (object != nullptr) {
+ const mirror::Object* new_obj = visitor(object, arg);
+ DCHECK(new_obj != nullptr);
+ if (new_obj != object) {
+ cur->SetReference(j, const_cast<mirror::Object*>(new_obj));
+ }
}
}
}
@@ -1381,24 +1325,23 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
// Transition into runnable state to work on Object*/Array*
ScopedObjectAccess soa(env);
// Decode the internal stack trace into the depth, method trace and PC trace
- mirror::ObjectArray<mirror::Object>* method_trace =
- soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
- int32_t depth = method_trace->GetLength() - 1;
- mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
+ int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
jobjectArray result;
- mirror::ObjectArray<mirror::StackTraceElement>* java_traces;
+
if (output_array != NULL) {
// Reuse the array we were given.
result = output_array;
- java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array);
// ...adjusting the number of frames we'll write to not exceed the array length.
- depth = std::min(depth, java_traces->GetLength());
+ const int32_t traces_length =
+ soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
+ depth = std::min(depth, traces_length);
} else {
// Create java_trace array and place in local reference table
- java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
+ mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
+ class_linker->AllocStackTraceElementArray(soa.Self(), depth);
if (java_traces == NULL) {
return NULL;
}
@@ -1411,9 +1354,12 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
MethodHelper mh;
for (int32_t i = 0; i < depth; ++i) {
+ mirror::ObjectArray<mirror::Object>* method_trace =
+ soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
mh.ChangeMethod(method);
+ mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
uint32_t dex_pc = pc_trace->Get(i);
int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
// Allocate element, potentially triggering GC
@@ -1436,8 +1382,9 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
return NULL;
}
const char* source_file = mh.GetDeclaringClassSourceFile();
- SirtRef<mirror::String> source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- source_file));
+ SirtRef<mirror::String> source_name_object(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(),
+ source_file));
mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
class_name_object.get(),
method_name_object.get(),
@@ -1446,13 +1393,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
if (obj == NULL) {
return NULL;
}
-#ifdef MOVING_GARBAGE_COLLECTOR
- // Re-read after potential GC
- java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
- method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
- pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
-#endif
- java_traces->Set(i, obj);
+ soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set(i, obj);
}
return result;
}
@@ -1614,6 +1555,7 @@ static const EntryPointInfo gThreadEntryPointInfo[] = {
INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge),
INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge),
JNI_ENTRY_POINT_INFO(pDlsymLookup),
+ PORTABLE_ENTRY_POINT_INFO(pPortableImtConflictTrampoline),
PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline),
PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge),
QUICK_ENTRY_POINT_INFO(pAllocArray),
@@ -1623,7 +1565,6 @@ static const EntryPointInfo gThreadEntryPointInfo[] = {
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray),
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck),
QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial),
- QUICK_ENTRY_POINT_INFO(pCanPutArrayElement),
QUICK_ENTRY_POINT_INFO(pCheckCast),
QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess),
@@ -1641,6 +1582,9 @@ static const EntryPointInfo gThreadEntryPointInfo[] = {
QUICK_ENTRY_POINT_INFO(pGet64Static),
QUICK_ENTRY_POINT_INFO(pGetObjInstance),
QUICK_ENTRY_POINT_INFO(pGetObjStatic),
+ QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck),
+ QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck),
+ QUICK_ENTRY_POINT_INFO(pAputObject),
QUICK_ENTRY_POINT_INFO(pHandleFillArrayData),
QUICK_ENTRY_POINT_INFO(pJniMethodStart),
QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
@@ -1665,7 +1609,7 @@ static const EntryPointInfo gThreadEntryPointInfo[] = {
QUICK_ENTRY_POINT_INFO(pD2l),
QUICK_ENTRY_POINT_INFO(pF2l),
QUICK_ENTRY_POINT_INFO(pLdiv),
- QUICK_ENTRY_POINT_INFO(pLdivmod),
+ QUICK_ENTRY_POINT_INFO(pLmod),
QUICK_ENTRY_POINT_INFO(pLmul),
QUICK_ENTRY_POINT_INFO(pShlLong),
QUICK_ENTRY_POINT_INFO(pShrLong),
@@ -1674,10 +1618,10 @@ static const EntryPointInfo gThreadEntryPointInfo[] = {
QUICK_ENTRY_POINT_INFO(pMemcmp16),
QUICK_ENTRY_POINT_INFO(pStringCompareTo),
QUICK_ENTRY_POINT_INFO(pMemcpy),
+ QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline),
QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline),
QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge),
QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
- QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
@@ -1709,7 +1653,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_
DO_THREAD_OFFSET(self_);
DO_THREAD_OFFSET(stack_end_);
DO_THREAD_OFFSET(suspend_count_);
- DO_THREAD_OFFSET(thin_lock_id_);
+ DO_THREAD_OFFSET(thin_lock_thread_id_);
// DO_THREAD_OFFSET(top_of_managed_stack_);
// DO_THREAD_OFFSET(top_of_managed_stack_pc_);
DO_THREAD_OFFSET(top_sirt_);
@@ -1992,7 +1936,7 @@ bool Thread::HoldsLock(mirror::Object* object) {
if (object == NULL) {
return false;
}
- return object->GetThinLockId() == thin_lock_id_;
+ return object->GetLockOwnerThreadId() == thin_lock_thread_id_;
}
// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
@@ -2016,8 +1960,11 @@ class ReferenceMapVisitor : public StackVisitor {
// SIRT for JNI or References for interpreter.
for (size_t reg = 0; reg < num_regs; ++reg) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
}
}
} else {
@@ -2037,8 +1984,11 @@ class ReferenceMapVisitor : public StackVisitor {
for (size_t reg = 0; reg < num_regs; ++reg) {
if (TestBitmap(reg, reg_bitmap)) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
}
}
}
@@ -2069,19 +2019,25 @@ class ReferenceMapVisitor : public StackVisitor {
// Does this register hold a reference?
if (TestBitmap(reg, reg_bitmap)) {
uint32_t vmap_offset;
- mirror::Object* ref;
if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
- uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
- kReferenceVReg));
- ref = reinterpret_cast<mirror::Object*>(val);
+ int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(GetGPR(vmap_reg));
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (ref != new_ref) {
+ SetGPR(vmap_reg, reinterpret_cast<uintptr_t>(new_ref));
+ }
+ }
} else {
- ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item,
- core_spills, fp_spills, frame_size,
- reg));
- }
-
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ uint32_t* reg_addr =
+ GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size, reg);
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(*reg_addr);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (ref != new_ref) {
+ *reg_addr = reinterpret_cast<uint32_t>(new_ref);
+ }
+ }
}
}
}
@@ -2107,8 +2063,8 @@ class RootCallbackVisitor {
public:
RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
- void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
- visitor_(obj, arg_);
+ mirror::Object* operator()(mirror::Object* obj, size_t, const StackVisitor*) const {
+ return visitor_(obj, arg_);
}
private:
@@ -2132,67 +2088,17 @@ class VerifyCallbackVisitor {
void* const arg_;
};
-struct VerifyRootWrapperArg {
- VerifyRootVisitor* visitor;
- void* arg;
-};
-
-static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) {
- VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
- wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
-}
-
-void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) {
- // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
- // don't have.
- VerifyRootWrapperArg wrapperArg;
- wrapperArg.arg = arg;
- wrapperArg.visitor = visitor;
-
- if (opeer_ != NULL) {
- VerifyRootWrapperCallback(opeer_, &wrapperArg);
- }
- if (exception_ != NULL) {
- VerifyRootWrapperCallback(exception_, &wrapperArg);
- }
- throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
- if (class_loader_override_ != NULL) {
- VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
- }
- jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
- jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
-
- SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
-
- // Visit roots on this thread's stack
- Context* context = GetLongJumpContext();
- VerifyCallbackVisitor visitorToCallback(visitor, arg);
- ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback);
- mapper.WalkStack();
- ReleaseLongJumpContext(context);
-
- std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
- typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
- for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
- mirror::Object* this_object = (*it).this_object_;
- if (this_object != NULL) {
- VerifyRootWrapperCallback(this_object, &wrapperArg);
- }
- mirror::ArtMethod* method = (*it).method_;
- VerifyRootWrapperCallback(method, &wrapperArg);
- }
-}
-
void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
- if (opeer_ != NULL) {
- visitor(opeer_, arg);
+ if (opeer_ != nullptr) {
+ opeer_ = visitor(opeer_, arg);
}
- if (exception_ != NULL) {
- visitor(exception_, arg);
+ if (exception_ != nullptr) {
+ exception_ = reinterpret_cast<mirror::Throwable*>(visitor(exception_, arg));
}
throw_location_.VisitRoots(visitor, arg);
- if (class_loader_override_ != NULL) {
- visitor(class_loader_override_, arg);
+ if (class_loader_override_ != nullptr) {
+ class_loader_override_ = reinterpret_cast<mirror::ClassLoader*>(
+ visitor(class_loader_override_, arg));
}
jni_env_->locals.VisitRoots(visitor, arg);
jni_env_->monitors.VisitRoots(visitor, arg);
@@ -2206,24 +2112,26 @@ void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
mapper.WalkStack();
ReleaseLongJumpContext(context);
- for (const instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
- mirror::Object* this_object = frame.this_object_;
- if (this_object != NULL) {
- visitor(this_object, arg);
+ for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
+ if (frame.this_object_ != nullptr) {
+ frame.this_object_ = visitor(frame.this_object_, arg);
+ DCHECK(frame.this_object_ != nullptr);
}
- mirror::ArtMethod* method = frame.method_;
- visitor(method, arg);
+ frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
+ DCHECK(frame.method_ != nullptr);
}
}
-static void VerifyObject(const mirror::Object* root, void* arg) {
- gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
- heap->VerifyObject(root);
+static mirror::Object* VerifyRoot(mirror::Object* root, void* arg) {
+ DCHECK(root != nullptr);
+ DCHECK(arg != nullptr);
+ reinterpret_cast<gc::Heap*>(arg)->VerifyObject(root);
+ return root;
}
void Thread::VerifyStackImpl() {
UniquePtr<Context> context(Context::Create());
- RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
+ RootCallbackVisitor visitorToCallback(VerifyRoot, Runtime::Current()->GetHeap());
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
mapper.WalkStack();
}