summaryrefslogtreecommitdiff
path: root/runtime/thread.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread.cc')
-rw-r--r--runtime/thread.cc845
1 files changed, 524 insertions, 321 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 78ba26dec0..6b1934c86e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -23,12 +23,6 @@
#include <sys/resource.h>
#include <sys/time.h>
-#if __has_feature(hwaddress_sanitizer)
-#include <sanitizer/hwasan_interface.h>
-#else
-#define __hwasan_tag_pointer(p, t) (p)
-#endif
-
#include <algorithm>
#include <atomic>
#include <bitset>
@@ -41,6 +35,8 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
+#include "unwindstack/AndroidUnwinder.h"
+
#include "arch/context-inl.h"
#include "arch/context.h"
#include "art_field-inl.h"
@@ -75,6 +71,7 @@
#include "handle_scope-inl.h"
#include "indirect_reference_table-inl.h"
#include "instrumentation.h"
+#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "interpreter/shadow_frame-inl.h"
#include "java_frame_root_info.h"
@@ -84,6 +81,7 @@
#include "mirror/class_loader.h"
#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/stack_frame_info.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
#include "monitor_objects_stack_visitor.h"
@@ -110,9 +108,10 @@
#include "stack_map.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "trace.h"
#include "verifier/method_verifier.h"
#include "verify_object.h"
-#include "well_known_classes.h"
+#include "well_known_classes-inl.h"
#if ART_USE_FUTEXES
#include "linux/futex.h"
@@ -125,12 +124,15 @@
#pragma clang diagnostic push
#pragma clang diagnostic error "-Wconversion"
+extern "C" __attribute__((weak)) void* __hwasan_tag_pointer(const volatile void* p,
+ unsigned char tag);
+
namespace art {
using android::base::StringAppendV;
using android::base::StringPrintf;
-extern "C" NO_RETURN void artDeoptimize(Thread* self);
+extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
bool Thread::is_started_ = false;
pthread_key_t Thread::pthread_key_self_;
@@ -166,7 +168,7 @@ void InitEntryPoints(JniEntryPoints* jpoints,
void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active);
void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
tls32_.is_gc_marking = is_marking;
UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
}
@@ -210,7 +212,9 @@ class DeoptimizationContextRecord {
JValue GetReturnValue() const { return ret_val_; }
bool IsReference() const { return is_reference_; }
bool GetFromCode() const { return from_code_; }
- ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; }
+ ObjPtr<mirror::Throwable> GetPendingException() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return pending_exception_;
+ }
DeoptimizationContextRecord* GetLink() const { return link_; }
mirror::Object** GetReturnValueAsGCRoot() {
DCHECK(is_reference_);
@@ -272,6 +276,7 @@ void Thread::PushDeoptimizationContext(const JValue& return_value,
ObjPtr<mirror::Throwable> exception,
bool from_code,
DeoptimizationMethodType method_type) {
+ DCHECK(exception != Thread::GetDeoptimizationException());
DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
return_value,
is_reference,
@@ -433,15 +438,18 @@ void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type
tlsPtr_.stacked_shadow_frame_record = record;
}
-ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present) {
+ShadowFrame* Thread::MaybePopDeoptimizedStackedShadowFrame() {
StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
- if (must_be_present) {
- DCHECK(record != nullptr);
- } else {
- if (record == nullptr || record->GetType() != type) {
- return nullptr;
- }
+ if (record == nullptr ||
+ record->GetType() != StackedShadowFrameType::kDeoptimizationShadowFrame) {
+ return nullptr;
}
+ return PopStackedShadowFrame();
+}
+
+ShadowFrame* Thread::PopStackedShadowFrame() {
+ StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
+ DCHECK_NE(record, nullptr);
tlsPtr_.stacked_shadow_frame_record = record->GetLink();
ShadowFrame* shadow_frame = record->GetShadowFrame();
delete record;
@@ -531,7 +539,7 @@ ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id,
return shadow_frame;
}
VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method);
- shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, nullptr, method, dex_pc);
+ shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, method, dex_pc);
FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id,
shadow_frame,
tlsPtr_.frame_id_to_shadow_frame,
@@ -601,6 +609,10 @@ void Thread::DeleteJPeer(JNIEnv* env) {
env->DeleteGlobalRef(old_jpeer);
}
+void* Thread::CreateCallbackWithUffdGc(void* arg) {
+ return Thread::CreateCallback(arg);
+}
+
void* Thread::CreateCallback(void* arg) {
Thread* self = reinterpret_cast<Thread*>(arg);
Runtime* runtime = Runtime::Current();
@@ -634,7 +646,7 @@ void* Thread::CreateCallback(void* arg) {
self->DeleteJPeer(self->GetJniEnv());
self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());
- ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority);
+ ArtField* priorityField = WellKnownClasses::java_lang_Thread_priority;
self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
runtime->GetRuntimeCallbacks()->ThreadStart(self);
@@ -642,8 +654,7 @@ void* Thread::CreateCallback(void* arg) {
// Unpark ourselves if the java peer was unparked before it started (see
// b/28845097#comment49 for more information)
- ArtField* unparkedField = jni::DecodeArtField(
- WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+ ArtField* unparkedField = WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
bool should_unpark = false;
{
// Hold the lock here, so that if another thread calls unpark before the thread starts
@@ -657,19 +668,17 @@ void* Thread::CreateCallback(void* arg) {
}
// Invoke the 'run' method of our java.lang.Thread.
ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
- jmethodID mid = WellKnownClasses::java_lang_Thread_run;
- ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
- InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);
+ WellKnownClasses::java_lang_Thread_run->InvokeVirtual<'V'>(self, receiver);
}
// Detach and delete self.
- Runtime::Current()->GetThreadList()->Unregister(self);
+ Runtime::Current()->GetThreadList()->Unregister(self, /* should_run_callbacks= */ true);
return nullptr;
}
Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
ObjPtr<mirror::Object> thread_peer) {
- ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer);
+ ArtField* f = WellKnownClasses::java_lang_Thread_nativePeer;
Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer));
// Check that if we have a result it is either suspended or we hold the thread_list_lock_
// to stop it from going away.
@@ -786,7 +795,7 @@ void Thread::InstallImplicitProtection() {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wframe-larger-than="
NO_INLINE
- static void Touch(uintptr_t target) {
+ __attribute__((no_sanitize("memtag"))) static void Touch(uintptr_t target) {
volatile size_t zero = 0;
// Use a large local volatile array to ensure a large frame size. Do not use anything close
// to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but
@@ -803,7 +812,8 @@ void Thread::InstallImplicitProtection() {
volatile char space[kPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
char sink ATTRIBUTE_UNUSED = space[zero]; // NOLINT
// Remove tag from the pointer. Nop in non-hwasan builds.
- uintptr_t addr = reinterpret_cast<uintptr_t>(__hwasan_tag_pointer(space, 0));
+ uintptr_t addr = reinterpret_cast<uintptr_t>(
+ __hwasan_tag_pointer != nullptr ? __hwasan_tag_pointer(space, 0) : space);
if (addr >= target + kPageSize) {
Touch(target);
}
@@ -827,6 +837,22 @@ void Thread::InstallImplicitProtection() {
madvise(pregion, unwanted_size, MADV_DONTNEED);
}
+template <bool kSupportTransaction>
+static void SetNativePeer(ObjPtr<mirror::Object> java_peer, Thread* thread)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtField* field = WellKnownClasses::java_lang_Thread_nativePeer;
+ if (kSupportTransaction && Runtime::Current()->IsActiveTransaction()) {
+ field->SetLong</*kTransactionActive=*/ true>(java_peer, reinterpret_cast<jlong>(thread));
+ } else {
+ field->SetLong</*kTransactionActive=*/ false>(java_peer, reinterpret_cast<jlong>(thread));
+ }
+}
+
+static void SetNativePeer(JNIEnv* env, jobject java_peer, Thread* thread) {
+ ScopedObjectAccess soa(env);
+ SetNativePeer</*kSupportTransaction=*/ false>(soa.Decode<mirror::Object>(java_peer), thread);
+}
+
void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
CHECK(java_peer != nullptr);
Thread* self = static_cast<JNIEnvExt*>(env)->GetSelf();
@@ -834,7 +860,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
if (VLOG_IS_ON(threads)) {
ScopedObjectAccess soa(env);
- ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
+ ArtField* f = WellKnownClasses::java_lang_Thread_name;
ObjPtr<mirror::String> java_name =
f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString();
std::string thread_name;
@@ -873,8 +899,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
// Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing
// to assign it.
- env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
- reinterpret_cast<jlong>(child_thread));
+ SetNativePeer(env, java_peer, child_thread);
// Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
// do not have a good way to report this on the child's side.
@@ -893,7 +918,8 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
pthread_create_result = pthread_create(&new_pthread,
&attr,
- Thread::CreateCallback,
+ gUseUserfaultfd ? Thread::CreateCallbackWithUffdGc
+ : Thread::CreateCallback,
child_thread);
CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
@@ -918,7 +944,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
delete child_thread;
child_thread = nullptr;
// TODO: remove from thread group?
- env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
+ SetNativePeer(env, java_peer, nullptr);
{
std::string msg(child_jni_env_ext.get() == nullptr ?
StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) :
@@ -982,7 +1008,10 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
}
template <typename PeerAction>
-Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) {
+Thread* Thread::Attach(const char* thread_name,
+ bool as_daemon,
+ PeerAction peer_action,
+ bool should_run_callbacks) {
Runtime* runtime = Runtime::Current();
ScopedTrace trace("Thread::Attach");
if (runtime == nullptr) {
@@ -1017,7 +1046,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_
// Run the action that is acting on the peer.
if (!peer_action(self)) {
- runtime->GetThreadList()->Unregister(self);
+ runtime->GetThreadList()->Unregister(self, should_run_callbacks);
// Unregister deletes self, no need to do this here.
return nullptr;
}
@@ -1032,7 +1061,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_
self->Dump(LOG_STREAM(INFO));
}
- {
+ if (should_run_callbacks) {
ScopedObjectAccess soa(self);
runtime->GetRuntimeCallbacks()->ThreadStart(self);
}
@@ -1043,7 +1072,8 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_
Thread* Thread::Attach(const char* thread_name,
bool as_daemon,
jobject thread_group,
- bool create_peer) {
+ bool create_peer,
+ bool should_run_callbacks) {
auto create_peer_action = [&](Thread* self) {
// If we're the main thread, ClassLinker won't be created until after we're attached,
// so that thread needs a two-stage attach. Regular threads don't need this hack.
@@ -1076,67 +1106,58 @@ Thread* Thread::Attach(const char* thread_name,
}
return true;
};
- return Attach(thread_name, as_daemon, create_peer_action);
+ return Attach(thread_name, as_daemon, create_peer_action, should_run_callbacks);
}
Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) {
auto set_peer_action = [&](Thread* self) {
// Install the given peer.
- {
- DCHECK(self == Thread::Current());
- ScopedObjectAccess soa(self);
- self->tlsPtr_.opeer = soa.Decode<mirror::Object>(thread_peer).Ptr();
- }
- self->GetJniEnv()->SetLongField(thread_peer,
- WellKnownClasses::java_lang_Thread_nativePeer,
- reinterpret_cast64<jlong>(self));
+ DCHECK(self == Thread::Current());
+ ScopedObjectAccess soa(self);
+ ObjPtr<mirror::Object> peer = soa.Decode<mirror::Object>(thread_peer);
+ self->tlsPtr_.opeer = peer.Ptr();
+ SetNativePeer</*kSupportTransaction=*/ false>(peer, self);
return true;
};
- return Attach(thread_name, as_daemon, set_peer_action);
+ return Attach(thread_name, as_daemon, set_peer_action, /* should_run_callbacks= */ true);
}
void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
Runtime* runtime = Runtime::Current();
CHECK(runtime->IsStarted());
- JNIEnv* env = tlsPtr_.jni_env;
+ Thread* self = this;
+ DCHECK_EQ(self, Thread::Current());
- if (thread_group == nullptr) {
- thread_group = runtime->GetMainThreadGroup();
- }
- ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
+ ScopedObjectAccess soa(self);
+ StackHandleScope<4u> hs(self);
+ DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
+ Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
+ thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
+ Handle<mirror::String> thread_name = hs.NewHandle(
+ name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
// Add missing null check in case of OOM b/18297817
- if (name != nullptr && thread_name.get() == nullptr) {
- CHECK(IsExceptionPending());
+ if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
+ CHECK(self->IsExceptionPending());
return;
}
jint thread_priority = GetNativePriority();
- jboolean thread_is_daemon = as_daemon;
- ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
- if (peer.get() == nullptr) {
+ DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
+ Handle<mirror::Object> peer =
+ hs.NewHandle(WellKnownClasses::java_lang_Thread->AllocObject(self));
+ if (UNLIKELY(peer == nullptr)) {
CHECK(IsExceptionPending());
return;
}
- {
- ScopedObjectAccess soa(this);
- tlsPtr_.opeer = soa.Decode<mirror::Object>(peer.get()).Ptr();
- }
- env->CallNonvirtualVoidMethod(peer.get(),
- WellKnownClasses::java_lang_Thread,
- WellKnownClasses::java_lang_Thread_init,
- thread_group, thread_name.get(), thread_priority, thread_is_daemon);
- if (IsExceptionPending()) {
+ tlsPtr_.opeer = peer.Get();
+ WellKnownClasses::java_lang_Thread_init->InvokeInstance<'V', 'L', 'L', 'I', 'Z'>(
+ self, peer.Get(), thr_group.Get(), thread_name.Get(), thread_priority, as_daemon);
+ if (self->IsExceptionPending()) {
return;
}
- Thread* self = this;
- DCHECK_EQ(self, Thread::Current());
- env->SetLongField(peer.get(),
- WellKnownClasses::java_lang_Thread_nativePeer,
- reinterpret_cast64<jlong>(self));
+ SetNativePeer</*kSupportTransaction=*/ false>(peer.Get(), self);
- ScopedObjectAccess soa(self);
- StackHandleScope<1> hs(self);
MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName()));
if (peer_thread_name == nullptr) {
// The Thread constructor should have set the Thread.name to a
@@ -1144,18 +1165,16 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
// available (in the compiler, in tests), we manually assign the
// fields the constructor should have set.
if (runtime->IsActiveTransaction()) {
- InitPeer<true>(soa,
- tlsPtr_.opeer,
- thread_is_daemon,
- thread_group,
- thread_name.get(),
+ InitPeer<true>(tlsPtr_.opeer,
+ as_daemon,
+ thr_group.Get(),
+ thread_name.Get(),
thread_priority);
} else {
- InitPeer<false>(soa,
- tlsPtr_.opeer,
- thread_is_daemon,
- thread_group,
- thread_name.get(),
+ InitPeer<false>(tlsPtr_.opeer,
+ as_daemon,
+ thr_group.Get(),
+ thread_name.Get(),
thread_priority);
}
peer_thread_name.Assign(GetThreadName());
@@ -1166,27 +1185,32 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
}
}
-jobject Thread::CreateCompileTimePeer(JNIEnv* env,
- const char* name,
- bool as_daemon,
- jobject thread_group) {
+ObjPtr<mirror::Object> Thread::CreateCompileTimePeer(const char* name,
+ bool as_daemon,
+ jobject thread_group) {
Runtime* runtime = Runtime::Current();
CHECK(!runtime->IsStarted());
+ Thread* self = this;
+ DCHECK_EQ(self, Thread::Current());
- if (thread_group == nullptr) {
- thread_group = runtime->GetMainThreadGroup();
- }
- ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
+ ScopedObjectAccessUnchecked soa(self);
+ StackHandleScope<3u> hs(self);
+ DCHECK(WellKnownClasses::java_lang_ThreadGroup->IsInitialized());
+ Handle<mirror::Object> thr_group = hs.NewHandle(soa.Decode<mirror::Object>(
+ thread_group != nullptr ? thread_group : runtime->GetMainThreadGroup()));
+ Handle<mirror::String> thread_name = hs.NewHandle(
+ name != nullptr ? mirror::String::AllocFromModifiedUtf8(self, name) : nullptr);
// Add missing null check in case of OOM b/18297817
- if (name != nullptr && thread_name.get() == nullptr) {
- CHECK(Thread::Current()->IsExceptionPending());
+ if (name != nullptr && UNLIKELY(thread_name == nullptr)) {
+ CHECK(self->IsExceptionPending());
return nullptr;
}
jint thread_priority = kNormThreadPriority; // Always normalize to NORM priority.
- jboolean thread_is_daemon = as_daemon;
- ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
- if (peer.get() == nullptr) {
+ DCHECK(WellKnownClasses::java_lang_Thread->IsInitialized());
+ Handle<mirror::Object> peer = hs.NewHandle(
+ WellKnownClasses::java_lang_Thread->AllocObject(self));
+ if (peer == nullptr) {
CHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
@@ -1197,41 +1221,34 @@ jobject Thread::CreateCompileTimePeer(JNIEnv* env,
// non-null value. However, because we can run without code
// available (in the compiler, in tests), we manually assign the
// fields the constructor should have set.
- ScopedObjectAccessUnchecked soa(Thread::Current());
if (runtime->IsActiveTransaction()) {
- InitPeer<true>(soa,
- soa.Decode<mirror::Object>(peer.get()),
- thread_is_daemon,
- thread_group,
- thread_name.get(),
+ InitPeer<true>(peer.Get(),
+ as_daemon,
+ thr_group.Get(),
+ thread_name.Get(),
thread_priority);
} else {
- InitPeer<false>(soa,
- soa.Decode<mirror::Object>(peer.get()),
- thread_is_daemon,
- thread_group,
- thread_name.get(),
+ InitPeer<false>(peer.Get(),
+ as_daemon,
+ thr_group.Get(),
+ thread_name.Get(),
thread_priority);
}
- return peer.release();
+ return peer.Get();
}
template<bool kTransactionActive>
-void Thread::InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
- ObjPtr<mirror::Object> peer,
- jboolean thread_is_daemon,
- jobject thread_group,
- jobject thread_name,
+void Thread::InitPeer(ObjPtr<mirror::Object> peer,
+ bool as_daemon,
+ ObjPtr<mirror::Object> thread_group,
+ ObjPtr<mirror::String> thread_name,
jint thread_priority) {
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)->
- SetBoolean<kTransactionActive>(peer, thread_is_daemon);
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)->
- SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_group));
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)->
- SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_name));
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)->
- SetInt<kTransactionActive>(peer, thread_priority);
+ WellKnownClasses::java_lang_Thread_daemon->SetBoolean<kTransactionActive>(peer,
+ static_cast<uint8_t>(as_daemon ? 1u : 0u));
+ WellKnownClasses::java_lang_Thread_group->SetObject<kTransactionActive>(peer, thread_group);
+ WellKnownClasses::java_lang_Thread_name->SetObject<kTransactionActive>(peer, thread_name);
+ WellKnownClasses::java_lang_Thread_priority->SetInt<kTransactionActive>(peer, thread_priority);
}
void Thread::SetCachedThreadName(const char* name) {
@@ -1390,18 +1407,26 @@ void Thread::ShortDump(std::ostream& os) const {
tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
}
-void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map,
- bool force_dump_stack) const {
+Thread::DumpOrder Thread::Dump(std::ostream& os,
+ bool dump_native_stack,
+ bool force_dump_stack) const {
+ DumpState(os);
+ return DumpStack(os, dump_native_stack, force_dump_stack);
+}
+
+Thread::DumpOrder Thread::Dump(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack,
+ bool force_dump_stack) const {
DumpState(os);
- DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack);
+ return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
}
ObjPtr<mirror::String> Thread::GetThreadName() const {
- ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
if (tlsPtr_.opeer == nullptr) {
return nullptr;
}
- ObjPtr<mirror::Object> name = f->GetObject(tlsPtr_.opeer);
+ ObjPtr<mirror::Object> name = WellKnownClasses::java_lang_Thread_name->GetObject(tlsPtr_.opeer);
return name == nullptr ? nullptr : name->AsString();
}
@@ -1473,7 +1498,7 @@ bool Thread::ModifySuspendCountInternal(Thread* self,
return false;
}
- if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) {
+ if (delta > 0 && this != self && tlsPtr_.flip_function != nullptr) {
// Force retry of a suspend request if it's in the middle of a thread flip to avoid a
// deadlock. b/31683379.
return false;
@@ -1778,6 +1803,17 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState suspend
sched_yield();
}
}
+ // Ensure that the flip function for this thread, if pending, is finished *before*
+ // the checkpoint function is run. Otherwise, we may end up with both `to' and 'from'
+ // space references on the stack, confusing the GC's thread-flip logic. The caller is
+ // runnable so can't have a pending flip function.
+ DCHECK_EQ(self->GetState(), ThreadState::kRunnable);
+ DCHECK(
+ !self->GetStateAndFlags(std::memory_order_relaxed).IsAnyOfFlagsSet(FlipFunctionFlags()));
+ EnsureFlipFunctionStarted(self);
+ while (GetStateAndFlags(std::memory_order_acquire).IsAnyOfFlagsSet(FlipFunctionFlags())) {
+ sched_yield();
+ }
function->Run(this);
}
@@ -1939,21 +1975,18 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
// cause ScopedObjectAccessUnchecked to deadlock.
if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
ScopedObjectAccessUnchecked soa(self);
- priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)
- ->GetInt(thread->tlsPtr_.opeer);
- is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)
- ->GetBoolean(thread->tlsPtr_.opeer);
+ priority = WellKnownClasses::java_lang_Thread_priority->GetInt(thread->tlsPtr_.opeer);
+ is_daemon = WellKnownClasses::java_lang_Thread_daemon->GetBoolean(thread->tlsPtr_.opeer);
ObjPtr<mirror::Object> thread_group =
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)
- ->GetObject(thread->tlsPtr_.opeer);
+ WellKnownClasses::java_lang_Thread_group->GetObject(thread->tlsPtr_.opeer);
if (thread_group != nullptr) {
- ArtField* group_name_field =
- jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name);
- ObjPtr<mirror::String> group_name_string =
- group_name_field->GetObject(thread_group)->AsString();
- group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
+ ObjPtr<mirror::Object> group_name_object =
+ WellKnownClasses::java_lang_ThreadGroup_name->GetObject(thread_group);
+ group_name = (group_name_object != nullptr)
+ ? group_name_object->AsString()->ToModifiedUtf8()
+ : "<null>";
}
} else if (thread != nullptr) {
priority = thread->GetNativePriority();
@@ -1980,6 +2013,9 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
if (thread->IsStillStarting()) {
os << " (still starting up)";
}
+ if (thread->tls32_.disable_thread_flip_count != 0) {
+ os << " DisableFlipCount = " << thread->tls32_.disable_thread_flip_count;
+ }
os << "\n";
} else {
os << '"' << ::art::GetThreadName(tid) << '"'
@@ -2192,11 +2228,13 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
UNREACHABLE();
}
PrintObject(obj, msg, owner_tid);
+ num_blocked++;
}
void VisitLockedObject(ObjPtr<mirror::Object> obj)
override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
+ num_locked++;
}
void PrintObject(ObjPtr<mirror::Object> obj,
@@ -2230,6 +2268,8 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
ArtMethod* last_method;
int last_line_number;
size_t repetition_count;
+ size_t num_blocked = 0;
+ size_t num_locked = 0;
};
static bool ShouldShowNativeStack(const Thread* thread)
@@ -2261,7 +2301,9 @@ static bool ShouldShowNativeStack(const Thread* thread)
return current_method != nullptr && current_method->IsNative();
}
-void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const {
+Thread::DumpOrder Thread::DumpJavaStack(std::ostream& os,
+ bool check_suspended,
+ bool dump_locks) const {
// Dumping the Java stack involves the verifier for locks. The verifier operates under the
// assumption that there is no exception pending on entry. Thus, stash any pending exception.
// Thread::Current() instead of this in case a thread is dumping the stack of another suspended
@@ -2272,12 +2314,28 @@ void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_loc
StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
!tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
dumper.WalkStack();
+ if (IsJitSensitiveThread()) {
+ return DumpOrder::kMain;
+ } else if (dumper.num_blocked > 0) {
+ return DumpOrder::kBlocked;
+ } else if (dumper.num_locked > 0) {
+ return DumpOrder::kLocked;
+ } else {
+ return DumpOrder::kDefault;
+ }
+}
+
+Thread::DumpOrder Thread::DumpStack(std::ostream& os,
+ bool dump_native_stack,
+ bool force_dump_stack) const {
+ unwindstack::AndroidLocalUnwinder unwinder;
+ return DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
}
-void Thread::DumpStack(std::ostream& os,
- bool dump_native_stack,
- BacktraceMap* backtrace_map,
- bool force_dump_stack) const {
+Thread::DumpOrder Thread::DumpStack(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack,
+ bool force_dump_stack) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
// the race with the thread_suspend_count_lock_).
@@ -2288,6 +2346,7 @@ void Thread::DumpStack(std::ostream& os,
// thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
safe_to_dump = (safe_to_dump || dump_for_abort);
}
+ DumpOrder dump_order = DumpOrder::kDefault;
if (safe_to_dump || force_dump_stack) {
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
@@ -2295,14 +2354,15 @@ void Thread::DumpStack(std::ostream& os,
GetCurrentMethod(nullptr,
/*check_suspended=*/ !force_dump_stack,
/*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
- DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
+ DumpNativeStack(os, unwinder, GetTid(), " native: ", method);
}
- DumpJavaStack(os,
- /*check_suspended=*/ !force_dump_stack,
- /*dump_locks=*/ !force_dump_stack);
+ dump_order = DumpJavaStack(os,
+ /*check_suspended=*/ !force_dump_stack,
+ /*dump_locks=*/ !force_dump_stack);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
+ return dump_order;
}
void Thread::ThreadExitCallback(void* arg) {
@@ -2380,25 +2440,17 @@ void Thread::Shutdown() {
}
void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group) {
- ScopedLocalRef<jobject> thread_jobject(
- soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer()));
- ScopedLocalRef<jobject> thread_group_jobject_scoped(
- soa.Env(), nullptr);
- jobject thread_group_jobject = thread_group;
+ ObjPtr<mirror::Object> thread_object = soa.Self()->GetPeer();
+ ObjPtr<mirror::Object> thread_group_object = soa.Decode<mirror::Object>(thread_group);
if (thread_group == nullptr || kIsDebugBuild) {
// There is always a group set. Retrieve it.
- thread_group_jobject_scoped.reset(
- soa.Env()->GetObjectField(thread_jobject.get(),
- WellKnownClasses::java_lang_Thread_group));
- thread_group_jobject = thread_group_jobject_scoped.get();
+ thread_group_object = WellKnownClasses::java_lang_Thread_group->GetObject(thread_object);
if (kIsDebugBuild && thread_group != nullptr) {
- CHECK(soa.Env()->IsSameObject(thread_group, thread_group_jobject));
+ CHECK(thread_group_object == soa.Decode<mirror::Object>(thread_group));
}
}
- soa.Env()->CallNonvirtualVoidMethod(thread_group_jobject,
- WellKnownClasses::java_lang_ThreadGroup,
- WellKnownClasses::java_lang_ThreadGroup_add,
- thread_jobject.get());
+ WellKnownClasses::java_lang_ThreadGroup_add->InvokeVirtual<'V', 'L'>(
+ soa.Self(), thread_group_object, thread_object);
}
Thread::Thread(bool daemon)
@@ -2409,8 +2461,6 @@ Thread::Thread(bool daemon)
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
tlsPtr_.mutator_lock = Locks::mutator_lock_;
DCHECK(tlsPtr_.mutator_lock != nullptr);
- tlsPtr_.instrumentation_stack =
- new std::map<uintptr_t, instrumentation::InstrumentationStackFrame>;
tlsPtr_.name.store(kThreadNameDuringStartup, std::memory_order_relaxed);
static_assert((sizeof(Thread) % 4) == 0U,
@@ -2458,8 +2508,7 @@ void Thread::AssertPendingException() const {
void Thread::AssertPendingOOMException() const {
AssertPendingException();
auto* e = GetException();
- CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
- << e->Dump();
+ CHECK_EQ(e->GetClass(), WellKnownClasses::java_lang_OutOfMemoryError.Get()) << e->Dump();
}
void Thread::AssertNoPendingException() const {
@@ -2497,7 +2546,7 @@ class MonitorExitVisitor : public SingleRootVisitor {
Thread* const self_;
};
-void Thread::Destroy() {
+void Thread::Destroy(bool should_run_callbacks) {
Thread* self = this;
DCHECK_EQ(self, Thread::Current());
@@ -2522,27 +2571,25 @@ void Thread::Destroy() {
if (tlsPtr_.opeer != nullptr) {
ScopedObjectAccess soa(self);
+ if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
+ Trace::FlushThreadBuffer(self);
+ self->ResetMethodTraceBuffer();
+ }
// We may need to call user-supplied managed code, do this before final clean-up.
- HandleUncaughtExceptions(soa);
- RemoveFromThreadGroup(soa);
+ HandleUncaughtExceptions();
+ RemoveFromThreadGroup();
Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
+ if (runtime != nullptr && should_run_callbacks) {
runtime->GetRuntimeCallbacks()->ThreadDeath(self);
}
// this.nativePeer = 0;
- if (Runtime::Current()->IsActiveTransaction()) {
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer)
- ->SetLong<true>(tlsPtr_.opeer, 0);
- } else {
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer)
- ->SetLong<false>(tlsPtr_.opeer, 0);
- }
+ SetNativePeer</*kSupportTransaction=*/ true>(tlsPtr_.opeer, nullptr);
// Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
// who is waiting.
ObjPtr<mirror::Object> lock =
- jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
+ WellKnownClasses::java_lang_Thread_lock->GetObject(tlsPtr_.opeer);
// (This conditional is only needed for tests, where Thread.lock won't have been set.)
if (lock != nullptr) {
StackHandleScope<1> hs(self);
@@ -2559,7 +2606,7 @@ void Thread::Destroy() {
}
// Mark-stack revocation must be performed at the very end. No
// checkpoint/flip-function or read-barrier should be called after this.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
}
}
@@ -2600,47 +2647,47 @@ Thread::~Thread() {
CleanupCpu();
}
- delete tlsPtr_.instrumentation_stack;
SetCachedThreadName(nullptr); // Deallocate name.
delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
+ if (tlsPtr_.method_trace_buffer != nullptr) {
+ delete[] tlsPtr_.method_trace_buffer;
+ }
+
Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
TearDownAlternateSignalStack();
}
-void Thread::HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) {
- if (!IsExceptionPending()) {
+void Thread::HandleUncaughtExceptions() {
+ Thread* self = this;
+ DCHECK_EQ(self, Thread::Current());
+ if (!self->IsExceptionPending()) {
return;
}
- ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
- ScopedThreadStateChange tsc(this, ThreadState::kNative);
// Get and clear the exception.
- ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
- tlsPtr_.jni_env->ExceptionClear();
+ ObjPtr<mirror::Object> exception = self->GetException();
+ self->ClearException();
// Call the Thread instance's dispatchUncaughtException(Throwable)
- tlsPtr_.jni_env->CallVoidMethod(peer.get(),
- WellKnownClasses::java_lang_Thread_dispatchUncaughtException,
- exception.get());
+ WellKnownClasses::java_lang_Thread_dispatchUncaughtException->InvokeFinal<'V', 'L'>(
+ self, tlsPtr_.opeer, exception);
// If the dispatchUncaughtException threw, clear that exception too.
- tlsPtr_.jni_env->ExceptionClear();
+ self->ClearException();
}
-void Thread::RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) {
- // this.group.removeThread(this);
+void Thread::RemoveFromThreadGroup() {
+ Thread* self = this;
+ DCHECK_EQ(self, Thread::Current());
+ // this.group.threadTerminated(this);
// group can be null if we're in the compiler or a test.
- ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)
- ->GetObject(tlsPtr_.opeer);
- if (ogroup != nullptr) {
- ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
- ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
- ScopedThreadStateChange tsc(soa.Self(), ThreadState::kNative);
- tlsPtr_.jni_env->CallVoidMethod(group.get(),
- WellKnownClasses::java_lang_ThreadGroup_removeThread,
- peer.get());
+ ObjPtr<mirror::Object> group =
+ WellKnownClasses::java_lang_Thread_group->GetObject(tlsPtr_.opeer);
+ if (group != nullptr) {
+ WellKnownClasses::java_lang_ThreadGroup_threadTerminated->InvokeVirtual<'V', 'L'>(
+ self, group, tlsPtr_.opeer);
}
}
@@ -2731,27 +2778,15 @@ void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
}
}
-ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
- if (obj == nullptr) {
- return nullptr;
- }
+ObjPtr<mirror::Object> Thread::DecodeGlobalJObject(jobject obj) const {
+ DCHECK(obj != nullptr);
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
+ DCHECK_NE(kind, kJniTransition);
+ DCHECK_NE(kind, kLocal);
ObjPtr<mirror::Object> result;
bool expect_null = false;
- // The "kinds" below are sorted by the frequency we expect to encounter them.
- if (kind == kLocal) {
- IndirectReferenceTable& locals = tlsPtr_.jni_env->locals_;
- // Local references do not need a read barrier.
- result = locals.Get<kWithoutReadBarrier>(ref);
- } else if (kind == kJniTransitionOrInvalid) {
- // The `jclass` for a static method points to the CompressedReference<> in the
- // `ArtMethod::declaring_class_`. Other `jobject` arguments point to spilled stack
- // references but a StackReference<> is just a subclass of CompressedReference<>.
- DCHECK(IsJniTransitionReference(obj));
- result = reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(obj)->AsMirrorPtr();
- VerifyObject(result);
- } else if (kind == kGlobal) {
+ if (kind == kGlobal) {
result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref);
} else {
DCHECK_EQ(kind, kWeakGlobal);
@@ -2947,9 +2982,19 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
static_cast<uint32_t>(methods_and_pcs->GetLength()) / 2 + count_, dex_pc, pointer_size_);
// Save the declaring class of the method to ensure that the declaring classes of the methods
- // do not get unloaded while the stack trace is live.
+ // do not get unloaded while the stack trace is live. However, this does not work for copied
+ // methods because the declaring class of a copied method points to an interface class which
+ // may be in a different class loader. Instead, retrieve the class loader associated with the
+ // allocator that holds the copied method. This is much cheaper than finding the actual class.
+ ObjPtr<mirror::Object> keep_alive;
+ if (UNLIKELY(method->IsCopied())) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ keep_alive = class_linker->GetHoldingClassLoaderOfCopiedMethod(self_, method);
+ } else {
+ keep_alive = method->GetDeclaringClass();
+ }
trace_->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
- static_cast<int32_t>(count_) + 1, method->GetDeclaringClass());
+ static_cast<int32_t>(count_) + 1, keep_alive);
++count_;
}
@@ -2967,11 +3012,14 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
uint32_t skip_depth_;
// Current position down stack trace.
uint32_t count_ = 0;
- // An object array where the first element is a pointer array that contains the ArtMethod
- // pointers on the stack and dex PCs. The rest of the elements are the declaring class of
- // the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of the
- // i'th frame. We're initializing a newly allocated trace, so we do not need to record that
- // under a transaction. If the transaction is aborted, the whole trace shall be unreachable.
+ // An object array where the first element is a pointer array that contains the `ArtMethod`
+ // pointers on the stack and dex PCs. The rest of the elements are referencing objects
+ // that shall keep the methods alive, namely the declaring class of the `ArtMethod` for
+ // declared methods and the class loader for copied methods (because it's faster to find
+ // the class loader than the actual class that holds the copied method). The `trace_[i+1]`
+ // contains the declaring class or class loader of the `ArtMethod` of the i'th frame.
+ // We're initializing a newly allocated trace, so we do not need to record that under
+ // a transaction. If the transaction is aborted, the whole trace shall be unreachable.
mirror::ObjectArray<mirror::Object>* trace_ = nullptr;
// For cross compilation.
const PointerSize pointer_size_;
@@ -3138,6 +3186,148 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
return result;
}
+[[nodiscard]] static ObjPtr<mirror::StackFrameInfo> InitStackFrameInfo(
+ const ScopedObjectAccessAlreadyRunnable& soa,
+ ClassLinker* class_linker,
+ Handle<mirror::StackFrameInfo> stackFrameInfo,
+ ArtMethod* method,
+ uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<4> hs(soa.Self());
+ int32_t line_number;
+ auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
+ if (method->IsProxyMethod()) {
+ line_number = -1;
+ // source_name_object intentionally left null for proxy methods
+ } else {
+ line_number = method->GetLineNumFromDexPC(dex_pc);
+ if (line_number == -1) {
+ // Make the line_number field of StackFrameInfo hold the dex pc.
+ // source_name_object is intentionally left null if we failed to map the dex pc to
+ // a line number (most probably because there is no debug info). See b/30183883.
+ line_number = static_cast<int32_t>(dex_pc);
+ } else {
+ const char* source_file = method->GetDeclaringClassSourceFile();
+ if (source_file != nullptr) {
+ source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
+ if (source_name_object == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+ }
+ }
+ }
+
+ Handle<mirror::Class> declaring_class_object(
+ hs.NewHandle<mirror::Class>(method->GetDeclaringClass()));
+
+ ArtMethod* interface_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ const char* method_name = interface_method->GetName();
+ CHECK(method_name != nullptr);
+ Handle<mirror::String> method_name_object(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
+ if (method_name_object == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ dex::ProtoIndex proto_idx =
+ method->GetDexFile()->GetIndexForProtoId(interface_method->GetPrototype());
+ Handle<mirror::MethodType> method_type_object(hs.NewHandle<mirror::MethodType>(
+ class_linker->ResolveMethodType(soa.Self(), proto_idx, interface_method)));
+ if (method_type_object == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ stackFrameInfo->AssignFields(declaring_class_object,
+ method_type_object,
+ method_name_object,
+ source_name_object,
+ line_number,
+ static_cast<int32_t>(dex_pc));
+ return stackFrameInfo.Get();
+}
+
+constexpr jlong FILL_CLASS_REFS_ONLY = 0x2; // StackStreamFactory.FILL_CLASS_REFS_ONLY
+
+jint Thread::InternalStackTraceToStackFrameInfoArray(
+ const ScopedObjectAccessAlreadyRunnable& soa,
+ jlong mode, // See java.lang.StackStreamFactory for the mode flags
+ jobject internal,
+ jint startLevel,
+ jint batchSize,
+ jint startBufferIndex,
+ jobjectArray output_array) {
+ // Decode the internal stack trace into the depth, method trace and PC trace.
+ // Subtract one for the methods and PC trace.
+ int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
+ DCHECK_GE(depth, 0);
+
+ StackHandleScope<6> hs(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object>> framesOrClasses =
+ hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(output_array));
+
+ jint endBufferIndex = startBufferIndex;
+
+ if (startLevel < 0 || startLevel >= depth) {
+ return endBufferIndex;
+ }
+
+ int32_t bufferSize = framesOrClasses->GetLength();
+ if (startBufferIndex < 0 || startBufferIndex >= bufferSize) {
+ return endBufferIndex;
+ }
+
+ // The FILL_CLASS_REFS_ONLY flag is defined in AbstractStackWalker.fetchStackFrames() javadoc.
+ bool isClassArray = (mode & FILL_CLASS_REFS_ONLY) != 0;
+
+ Handle<mirror::ObjectArray<mirror::Object>> decoded_traces =
+ hs.NewHandle(soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>());
+ // Methods and dex PC trace is element 0.
+ DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
+ Handle<mirror::PointerArray> method_trace =
+ hs.NewHandle(ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0)));
+
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::Class> sfi_class =
+ hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/StackFrameInfo;"));
+ DCHECK(sfi_class != nullptr);
+
+ MutableHandle<mirror::StackFrameInfo> frame = hs.NewHandle<mirror::StackFrameInfo>(nullptr);
+ MutableHandle<mirror::Class> clazz = hs.NewHandle<mirror::Class>(nullptr);
+ for (uint32_t i = static_cast<uint32_t>(startLevel); i < static_cast<uint32_t>(depth); ++i) {
+ if (endBufferIndex >= startBufferIndex + batchSize || endBufferIndex >= bufferSize) {
+ break;
+ }
+
+ ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
+ if (isClassArray) {
+ clazz.Assign(method->GetDeclaringClass());
+ framesOrClasses->Set(endBufferIndex, clazz.Get());
+ } else {
+ // Prepare parameters for fields in StackFrameInfo
+ uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
+ i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
+
+ ObjPtr<mirror::Object> frameObject = framesOrClasses->Get(endBufferIndex);
+ // If libcore didn't allocate the object, we just stop here, but it's unlikely.
+ if (frameObject == nullptr || !frameObject->InstanceOf(sfi_class.Get())) {
+ break;
+ }
+ frame.Assign(ObjPtr<mirror::StackFrameInfo>::DownCast(frameObject));
+ frame.Assign(InitStackFrameInfo(soa, class_linker, frame, method, dex_pc));
+ // Break if InitStackFrameInfo fails to allocate objects or assign the fields.
+ if (frame == nullptr) {
+ break;
+ }
+ }
+
+ ++endBufferIndex;
+ }
+
+ return endBufferIndex;
+}
+
jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
// This code allocates. Do not allow it to operate with a pending exception.
if (IsExceptionPending()) {
@@ -3574,6 +3764,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pAputObject)
QUICK_ENTRY_POINT_INFO(pJniMethodStart)
QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
+ QUICK_ENTRY_POINT_INFO(pJniMethodEntryHook)
QUICK_ENTRY_POINT_INFO(pJniDecodeReferenceResult)
QUICK_ENTRY_POINT_INFO(pJniLockObject)
QUICK_ENTRY_POINT_INFO(pJniUnlockObject)
@@ -3639,6 +3830,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pA64Store)
QUICK_ENTRY_POINT_INFO(pNewEmptyString)
QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BB)
QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
@@ -3653,6 +3845,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pNewStringFromString)
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromUtf16Bytes_BII)
QUICK_ENTRY_POINT_INFO(pJniReadBarrier)
QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00)
QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01)
@@ -3691,12 +3884,15 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
os << offset;
}
-void Thread::QuickDeliverException() {
+void Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
// Get exception from thread.
ObjPtr<mirror::Throwable> exception = GetException();
CHECK(exception != nullptr);
if (exception == GetDeoptimizationException()) {
- artDeoptimize(this);
+ // This wasn't a real exception, so just clear it here. If there was an actual exception it
+ // will be recorded in the DeoptimizationContext and it will be restored later.
+ ClearException();
+ artDeoptimize(this, skip_method_exit_callbacks);
UNREACHABLE();
}
@@ -3717,63 +3913,37 @@ void Thread::QuickDeliverException() {
// Note: we do this *after* reporting the exception to instrumentation in case it now requires
// deoptimization. It may happen if a debugger is attached and requests new events (single-step,
// breakpoint, ...) when the exception is reported.
- //
- // Note we need to check for both force_frame_pop and force_retry_instruction. The first is
- // expected to happen fairly regularly but the second can only happen if we are using
- // instrumentation trampolines (for example with DDMS tracing). That forces us to do deopt later
- // and see every frame being popped. We don't need to handle it any differently.
- ShadowFrame* cf;
- bool force_deopt = false;
- if (Runtime::Current()->AreNonStandardExitsEnabled() || kIsDebugBuild) {
- NthCallerVisitor visitor(this, 0, false);
- visitor.WalkStack();
- cf = visitor.GetCurrentShadowFrame();
- if (cf == nullptr) {
- cf = FindDebuggerShadowFrame(visitor.GetFrameId());
- }
- bool force_frame_pop = cf != nullptr && cf->GetForcePopFrame();
- bool force_retry_instr = cf != nullptr && cf->GetForceRetryInstruction();
- if (kIsDebugBuild && force_frame_pop) {
- DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- NthCallerVisitor penultimate_visitor(this, 1, false);
- penultimate_visitor.WalkStack();
- ShadowFrame* penultimate_frame = penultimate_visitor.GetCurrentShadowFrame();
- if (penultimate_frame == nullptr) {
- penultimate_frame = FindDebuggerShadowFrame(penultimate_visitor.GetFrameId());
- }
- }
- if (force_retry_instr) {
- DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- }
- force_deopt = force_frame_pop || force_retry_instr;
- }
- if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt || IsForceInterpreter()) {
+ // Frame pop can be requested on a method unwind callback which requires a deopt. We could
+ // potentially check after each unwind callback to see if a frame pop was requested and deopt if
+ // needed. Since this is a debug only feature and this path is only taken when an exception is
+ // thrown, it is not performance critical and we keep it simple by just deopting if method exit
+ // listeners are installed and frame pop feature is supported.
+ bool needs_deopt =
+ instrumentation->HasMethodExitListeners() && Runtime::Current()->AreNonStandardExitsEnabled();
+ if (Dbg::IsForcedInterpreterNeededForException(this) || IsForceInterpreter() || needs_deopt) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
- if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
- // method_type shouldn't matter due to exception handling.
- const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
- // Save the exception into the deoptimization context so it can be restored
- // before entering the interpreter.
- if (force_deopt) {
- VLOG(deopt) << "Deopting " << cf->GetMethod()->PrettyMethod() << " for frame-pop";
- DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- // Get rid of the exception since we are doing a framepop instead.
- LOG(WARNING) << "Suppressing pending exception for retry-instruction/frame-pop: "
- << exception->Dump();
- ClearException();
+ if (visitor.GetCurrentQuickFrame() != nullptr) {
+ if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.GetOuterMethod(), visitor.caller_pc)) {
+ // method_type shouldn't matter due to exception handling.
+ const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
+ // Save the exception into the deoptimization context so it can be restored
+ // before entering the interpreter.
+ PushDeoptimizationContext(
+ JValue(),
+ /* is_reference= */ false,
+ exception,
+ /* from_code= */ false,
+ method_type);
+ artDeoptimize(this, skip_method_exit_callbacks);
+ UNREACHABLE();
+ } else {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << visitor.caller->PrettyMethod();
}
- PushDeoptimizationContext(
- JValue(),
- /* is_reference= */ false,
- (force_deopt ? nullptr : exception),
- /* from_code= */ false,
- method_type);
- artDeoptimize(this);
- UNREACHABLE();
- } else if (visitor.caller != nullptr) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
- << visitor.caller->PrettyMethod();
+ } else {
+ // This is either top of call stack, or shadow frame.
+ DCHECK(visitor.caller == nullptr || visitor.IsShadowFrame());
}
}
@@ -3781,7 +3951,7 @@ void Thread::QuickDeliverException() {
// resolution.
ClearException();
QuickExceptionHandler exception_handler(this, false);
- exception_handler.FindCatch(exception);
+ exception_handler.FindCatch(exception, skip_method_exit_callbacks);
if (exception_handler.GetClearException()) {
// Exception was cleared as part of delivery.
DCHECK(!IsExceptionPending());
@@ -3848,10 +4018,11 @@ class ReferenceMapVisitor : public StackVisitor {
public:
ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- // We are visiting the references in compiled frames, so we do not need
- // to know the inlined frames.
+ // We are visiting the references in compiled frames, so we do not need
+ // to know the inlined frames.
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
- visitor_(visitor) {}
+ visitor_(visitor),
+ visit_declaring_class_(!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (false) {
@@ -3896,6 +4067,9 @@ class ReferenceMapVisitor : public StackVisitor {
void VisitDeclaringClass(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS {
+ if (!visit_declaring_class_) {
+ return;
+ }
ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
// klass can be null for runtime methods.
if (klass != nullptr) {
@@ -3978,7 +4152,7 @@ class ReferenceMapVisitor : public StackVisitor {
// (PC shall be known thanks to the runtime frame for throwing SIOOBE).
// Note that JIT does not emit that intrinic implementation.
const void* pc = reinterpret_cast<const void*>(GetCurrentQuickFramePc());
- if (pc != 0u && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
+ if (pc != nullptr && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
return;
}
}
@@ -4189,6 +4363,7 @@ class ReferenceMapVisitor : public StackVisitor {
// Visitor for when we visit a root.
RootVisitor& visitor_;
+ bool visit_declaring_class_;
};
class RootCallbackVisitor {
@@ -4274,9 +4449,6 @@ void Thread::VisitRoots(RootVisitor* visitor) {
RootCallbackVisitor visitor_to_callback(visitor, thread_id);
ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback);
mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false);
- for (auto& entry : *GetInstrumentationStack()) {
- visitor->VisitRootIfNonNull(&entry.second.this_object_, RootInfo(kRootVMInternal, thread_id));
- }
}
#pragma GCC diagnostic pop
@@ -4293,27 +4465,46 @@ static void SweepCacheEntry(IsMarkedVisitor* visitor, const Instruction* inst, s
case Opcode::INSTANCE_OF:
case Opcode::NEW_ARRAY:
case Opcode::CONST_CLASS: {
- mirror::Class* cls = reinterpret_cast<mirror::Class*>(*value);
- if (cls == nullptr || cls == Runtime::GetWeakClassSentinel()) {
- // Entry got deleted in a previous sweep.
+ mirror::Class* klass = reinterpret_cast<mirror::Class*>(*value);
+ if (klass == nullptr || klass == Runtime::GetWeakClassSentinel()) {
return;
}
- Runtime::ProcessWeakClass(
- reinterpret_cast<GcRoot<mirror::Class>*>(value),
- visitor,
- Runtime::GetWeakClassSentinel());
+ mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
+ if (new_klass == nullptr) {
+ *value = reinterpret_cast<size_t>(Runtime::GetWeakClassSentinel());
+ } else if (new_klass != klass) {
+ *value = reinterpret_cast<size_t>(new_klass);
+ }
return;
}
case Opcode::CONST_STRING:
case Opcode::CONST_STRING_JUMBO: {
mirror::Object* object = reinterpret_cast<mirror::Object*>(*value);
+ if (object == nullptr) {
+ return;
+ }
mirror::Object* new_object = visitor->IsMarked(object);
// We know the string is marked because it's a strongly-interned string that
// is always alive (see b/117621117 for trying to make those strings weak).
- // The IsMarked implementation of the CMS collector returns
- // null for newly allocated objects, but we know those haven't moved. Therefore,
- // only update the entry if we get a different non-null string.
- if (new_object != nullptr && new_object != object) {
+ if (kIsDebugBuild && new_object == nullptr) {
+ // (b/275005060) Currently the problem is reported only on CC GC.
+ // Therefore we log it with more information. But since the failure rate
+ // is quite high, sampling it.
+ if (gUseReadBarrier) {
+ Runtime* runtime = Runtime::Current();
+ gc::collector::ConcurrentCopying* cc = runtime->GetHeap()->ConcurrentCopyingCollector();
+ CHECK_NE(cc, nullptr);
+ LOG(FATAL) << cc->DumpReferenceInfo(object, "string")
+ << " string interned: " << std::boolalpha
+ << runtime->GetInternTable()->LookupStrong(Thread::Current(),
+ down_cast<mirror::String*>(object))
+ << std::noboolalpha;
+ } else {
+ // Other GCs
+ LOG(FATAL) << __FUNCTION__
+ << ": IsMarked returned null for a strongly interned string: " << object;
+ }
+ } else if (new_object != object) {
*value = reinterpret_cast<size_t>(new_object);
}
return;
@@ -4327,16 +4518,12 @@ static void SweepCacheEntry(IsMarkedVisitor* visitor, const Instruction* inst, s
// New opcode is using the cache. We need to explicitly handle it in this method.
DCHECK(false) << "Unhandled opcode " << inst->Opcode();
}
-};
+}
-void Thread::SweepInterpreterCaches(IsMarkedVisitor* visitor) {
- MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- Runtime::Current()->GetThreadList()->ForEach([visitor](Thread* thread) {
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- for (InterpreterCache::Entry& entry : thread->GetInterpreterCache()->GetArray()) {
- SweepCacheEntry(visitor, reinterpret_cast<const Instruction*>(entry.first), &entry.second);
- }
- });
+void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) {
+ for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) {
+ SweepCacheEntry(visitor, reinterpret_cast<const Instruction*>(entry.first), &entry.second);
+ }
}
// FIXME: clang-r433403 reports the below function exceeds frame size limit.
@@ -4425,6 +4612,15 @@ bool Thread::HasTlab() const {
return has_tlab;
}
+void Thread::AdjustTlab(size_t slide_bytes) {
+ if (HasTlab()) {
+ tlsPtr_.thread_local_start -= slide_bytes;
+ tlsPtr_.thread_local_pos -= slide_bytes;
+ tlsPtr_.thread_local_end -= slide_bytes;
+ tlsPtr_.thread_local_limit -= slide_bytes;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const Thread& thread) {
thread.ShortDump(os);
return os;
@@ -4435,9 +4631,11 @@ bool Thread::ProtectStack(bool fatal_on_error) {
VLOG(threads) << "Protecting stack at " << pregion;
if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
if (fatal_on_error) {
- LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
+ // b/249586057, LOG(FATAL) times out
+ LOG(ERROR) << "Unable to create protected region in stack for implicit overflow check. "
"Reason: "
<< strerror(errno) << " size: " << kStackOverflowProtectedSize;
+ exit(1);
}
return false;
}
@@ -4471,25 +4669,29 @@ size_t Thread::NumberOfHeldMutexes() const {
void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
ClearException();
- ShadowFrame* shadow_frame =
- PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
ObjPtr<mirror::Throwable> pending_exception;
bool from_code = false;
DeoptimizationMethodType method_type;
PopDeoptimizationContext(result, &pending_exception, &from_code, &method_type);
SetTopOfStack(nullptr);
- SetTopOfShadowStack(shadow_frame);
// Restore the exception that was pending before deoptimization then interpret the
// deoptimized frames.
if (pending_exception != nullptr) {
SetException(pending_exception);
}
- interpreter::EnterInterpreterFromDeoptimize(this,
- shadow_frame,
- result,
- from_code,
- method_type);
+
+ ShadowFrame* shadow_frame = MaybePopDeoptimizedStackedShadowFrame();
+ // We may not have a shadow frame if we deoptimized at the return of the
+ // quick_to_interpreter_bridge which got directly called by art_quick_invoke_stub.
+ if (shadow_frame != nullptr) {
+ SetTopOfShadowStack(shadow_frame);
+ interpreter::EnterInterpreterFromDeoptimize(this,
+ shadow_frame,
+ result,
+ from_code,
+ method_type);
+ }
}
void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
@@ -4534,7 +4736,7 @@ bool Thread::IsAotCompiler() {
mirror::Object* Thread::GetPeerFromOtherThread() const {
DCHECK(tlsPtr_.jpeer == nullptr);
mirror::Object* peer = tlsPtr_.opeer;
- if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Current()->GetIsGcMarking()) {
// We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
// may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
// mark/forward it here.
@@ -4587,8 +4789,7 @@ bool Thread::IsSystemDaemon() const {
if (GetPeer() == nullptr) {
return false;
}
- return jni::DecodeArtField(
- WellKnownClasses::java_lang_Thread_systemDaemon)->GetBoolean(GetPeer());
+ return WellKnownClasses::java_lang_Thread_systemDaemon->GetBoolean(GetPeer());
}
std::string Thread::StateAndFlagsAsHexString() const {
@@ -4606,7 +4807,9 @@ void ScopedExceptionStorage::SuppressOldException(const char* message) {
CHECK(self_->IsExceptionPending()) << *self_;
ObjPtr<mirror::Throwable> old_suppressed(excp_.Get());
excp_.Assign(self_->GetException());
- LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
+ if (old_suppressed != nullptr) {
+ LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
+ }
self_->ClearException();
}