diff options
author | 2020-05-05 10:07:59 +0100 | |
---|---|---|
committer | 2020-05-05 13:34:31 +0000 | |
commit | d34b73b4ac478462acc03c4cd42ae7568c832eb8 (patch) | |
tree | 27f1c4599178ba57451c29d0156c232768711b6d | |
parent | 4a48775376a4c0b180a7d32ad2cdf00bd0dca140 (diff) |
Clean up internal stack trace construction.
Simplify the code by ignoring active transactions. Writing
to fields of a newly allocated object does not need to be
recorded as aborting the transaction removes all references
to the new object and it's unnecessary to roll back writes
to unreachable object's fields.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: aosp_taimen-userdebug boots.
Change-Id: Ia91d3274398b0ca0f5b0040dcf323921d915b657
-rw-r--r-- | compiler/exception_test.cc | 2 | ||||
-rw-r--r-- | compiler/jni/jni_compiler_test.cc | 2 | ||||
-rw-r--r-- | runtime/common_throws.cc | 2 | ||||
-rw-r--r-- | runtime/interpreter/unstarted_runtime.cc | 6 | ||||
-rw-r--r-- | runtime/mirror/array-inl.h | 20 | ||||
-rw-r--r-- | runtime/mirror/array.h | 7 | ||||
-rw-r--r-- | runtime/native/dalvik_system_VMStack.cc | 4 | ||||
-rw-r--r-- | runtime/native/java_lang_Throwable.cc | 2 | ||||
-rw-r--r-- | runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc | 4 | ||||
-rw-r--r-- | runtime/thread.cc | 39 | ||||
-rw-r--r-- | runtime/thread.h | 1 |
11 files changed, 40 insertions, 49 deletions
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index 633e124d07..7d56da07fb 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -213,7 +213,7 @@ TEST_F(ExceptionTest, StackTraceElement) { // Set up thread to appear as if we called out of method_g_ at given pc dex. thread->SetTopOfStack(reinterpret_cast<ArtMethod**>(&fake_stack[0])); - jobject internal = thread->CreateInternalStackTrace<false>(soa); + jobject internal = thread->CreateInternalStackTrace(soa); ASSERT_TRUE(internal != nullptr); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); ASSERT_TRUE(ste_array != nullptr); diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 405c9ec689..2db139078f 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -1178,7 +1178,7 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { ScopedObjectAccess soa(env); // Build stack trace - jobject internal = Thread::Current()->CreateInternalStackTrace<false>(soa); + jobject internal = Thread::Current()->CreateInternalStackTrace(soa); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> trace_array = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(ste_array); diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 1c9cf1860b..5a7944c0fe 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -831,7 +831,7 @@ void ThrowStackOverflowError(Thread* self) { ScopedLocalRef<jobject> stack_state_val(env, nullptr); { ScopedObjectAccessUnchecked soa(env); // TODO: Is this necessary? - stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa)); + stack_state_val.reset(soa.Self()->CreateInternalStackTrace(soa)); } if (stack_state_val != nullptr) { env->SetObjectField(exc.get(), diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index 1bdffa3812..6b10b7fb44 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -1837,11 +1837,7 @@ void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace( Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { ScopedObjectAccessUnchecked soa(self); - if (Runtime::Current()->IsActiveTransaction()) { - result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<true>(soa))); - } else { - result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<false>(soa))); - } + result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace(soa))); } void UnstartedRuntime::UnstartedJNIByteOrderIsLittleEndian( diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 34925f52e2..b75aa5d011 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -22,6 +22,7 @@ #include <android-base/logging.h> #include "base/bit_utils.h" +#include "base/casts.h" #include "class.h" #include "obj_ptr-inl.h" #include "runtime.h" @@ -251,23 +252,22 @@ inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) { return GetElementPtrSize<T, PointerSize::k32, kVerifyFlags>(idx); } -template<bool kTransactionActive, bool kUnchecked> +template<bool kTransactionActive, bool kCheckTransaction, bool kUnchecked> inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size) { if (ptr_size == PointerSize::k64) { (kUnchecked ? ObjPtr<LongArray>::DownCast(ObjPtr<Object>(this)) : AsLongArray())-> - SetWithoutChecks<kTransactionActive>(idx, element); + SetWithoutChecks<kTransactionActive, kCheckTransaction>(idx, element); } else { - DCHECK_LE(element, static_cast<uint64_t>(0xFFFFFFFFu)); + uint32_t element32 = dchecked_integral_cast<uint32_t>(element); (kUnchecked ? ObjPtr<IntArray>::DownCast(ObjPtr<Object>(this)) : AsIntArray()) - ->SetWithoutChecks<kTransactionActive>(idx, static_cast<uint32_t>(element)); + ->SetWithoutChecks<kTransactionActive, kCheckTransaction>(idx, element32); } } -template<bool kTransactionActive, bool kUnchecked, typename T> +template<bool kTransactionActive, bool kCheckTransaction, bool kUnchecked, typename T> inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size) { - SetElementPtrSize<kTransactionActive, kUnchecked>(idx, - reinterpret_cast<uintptr_t>(element), - ptr_size); + SetElementPtrSize<kTransactionActive, kCheckTransaction, kUnchecked>( + idx, reinterpret_cast<uintptr_t>(element), ptr_size); } template <VerifyObjectFlags kVerifyFlags, typename Visitor> @@ -278,7 +278,9 @@ inline void PointerArray::Fixup(ObjPtr<mirror::PointerArray> dest, void* ptr = GetElementPtrSize<void*, kVerifyFlags>(i, pointer_size); void* new_ptr = visitor(ptr); if (ptr != new_ptr) { - dest->SetElementPtrSize<false, true>(i, new_ptr, pointer_size); + dest->SetElementPtrSize</*kActiveTransaction=*/ false, + /*kCheckTransaction=*/ true, + /*kUnchecked=*/ true>(i, new_ptr, pointer_size); } } } diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index 19f9a927e3..4c172f22f0 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -241,10 +241,13 @@ class PointerArray : public Array { static_cast<size_t>(ptr_size) * index); } - template<bool kTransactionActive = false, bool kUnchecked = false> + template<bool kTransactionActive = false, bool kCheckTransaction = true, bool kUnchecked = false> void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive = false, bool kUnchecked = false, typename T> + template<bool kTransactionActive = false, + bool kCheckTransaction = true, + bool kUnchecked = false, + typename T> void SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc index 32733a8409..9d2dfac069 100644 --- a/runtime/native/dalvik_system_VMStack.cc +++ b/runtime/native/dalvik_system_VMStack.cc @@ -84,7 +84,7 @@ static jint VMStack_fillStackTraceElements(JNIEnv* env, jclass, jobject javaThre ScopedFastNativeObjectAccess soa(env); auto fn = [](Thread* thread, const ScopedFastNativeObjectAccess& soaa) REQUIRES_SHARED(Locks::mutator_lock_) -> jobject { - return thread->CreateInternalStackTrace<false>(soaa); + return thread->CreateInternalStackTrace(soaa); }; jobject trace = GetThreadStack(soa, javaThread, fn); if (trace == nullptr) { @@ -151,7 +151,7 @@ static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject jav ScopedFastNativeObjectAccess soa(env); auto fn = [](Thread* thread, const ScopedFastNativeObjectAccess& soaa) REQUIRES_SHARED(Locks::mutator_lock_) -> jobject { - return thread->CreateInternalStackTrace<false>(soaa); + return thread->CreateInternalStackTrace(soaa); }; jobject trace = GetThreadStack(soa, javaThread, fn); if (trace == nullptr) { diff --git a/runtime/native/java_lang_Throwable.cc b/runtime/native/java_lang_Throwable.cc index b5ef7d807b..b89e287481 100644 --- a/runtime/native/java_lang_Throwable.cc +++ b/runtime/native/java_lang_Throwable.cc @@ -27,7 +27,7 @@ namespace art { static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) { ScopedFastNativeObjectAccess soa(env); - return soa.Self()->CreateInternalStackTrace<false>(soa); + return soa.Self()->CreateInternalStackTrace(soa); } static jobjectArray Throwable_nativeGetStackTrace(JNIEnv* env, jclass, jobject javaStackState) { diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index d405735f19..b50825abe0 100644 --- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -60,7 +60,7 @@ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint th if (static_cast<uint32_t>(thin_lock_id) == self->GetThreadId()) { // No need to suspend ourself to build stacktrace. ScopedObjectAccess soa(env); - jobject internal_trace = self->CreateInternalStackTrace<false>(soa); + jobject internal_trace = self->CreateInternalStackTrace(soa); trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace); } else { ThreadList* thread_list = Runtime::Current()->GetThreadList(); @@ -78,7 +78,7 @@ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint th if (thread != nullptr) { { ScopedObjectAccess soa(env); - jobject internal_trace = thread->CreateInternalStackTrace<false>(soa); + jobject internal_trace = thread->CreateInternalStackTrace(soa); trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace); } // Restart suspended thread. diff --git a/runtime/thread.cc b/runtime/thread.cc index 684fae4cf1..607ad9f7f8 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -2711,7 +2711,6 @@ class FetchStackTraceVisitor : public StackVisitor { DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); }; -template<bool kTransactionActive> class BuildInternalStackTraceVisitor : public StackVisitor { public: BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) @@ -2747,7 +2746,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { self_->AssertPendingOOMException(); return false; } - trace->Set(0, methods_and_pcs); + trace->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(0, methods_and_pcs); trace_ = trace.Get(); // If We are called from native, use non-transactional mode. CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; @@ -2775,15 +2774,15 @@ class BuildInternalStackTraceVisitor : public StackVisitor { } void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { - ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); - trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_); - trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( - trace_methods_and_pcs->GetLength() / 2 + count_, - dex_pc, - pointer_size_); + ObjPtr<mirror::PointerArray> methods_and_pcs = GetTraceMethodsAndPCs(); + methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>( + count_, method, pointer_size_); + methods_and_pcs->SetElementPtrSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>( + methods_and_pcs->GetLength() / 2 + count_, dex_pc, pointer_size_); // Save the declaring class of the method to ensure that the declaring classes of the methods // do not get unloaded while the stack trace is live. - trace_->Set(count_ + 1, method->GetDeclaringClass()); + trace_->Set</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>( + count_ + 1, method->GetDeclaringClass()); ++count_; } @@ -2802,9 +2801,10 @@ class BuildInternalStackTraceVisitor : public StackVisitor { // Current position down stack trace. uint32_t count_ = 0; // An object array where the first element is a pointer array that contains the ArtMethod - // pointers on the stack and dex PCs. The rest of the elements are the declaring - // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of - // the i'th frame. + // pointers on the stack and dex PCs. The rest of the elements are the declaring class of + // the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of the + // i'th frame. We're initializing a newly allocated trace, so we do not need to record that + // under a transaction. If the transaction is aborted, the whole trace shall be unreachable. mirror::ObjectArray<mirror::Object>* trace_ = nullptr; // For cross compilation. const PointerSize pointer_size_; @@ -2812,7 +2812,6 @@ class BuildInternalStackTraceVisitor : public StackVisitor { DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor); }; -template<bool kTransactionActive> jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { // Compute depth of stack, save frames if possible to avoid needing to recompute many. constexpr size_t kMaxSavedFrames = 256; @@ -2825,9 +2824,8 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable const uint32_t skip_depth = count_visitor.GetSkipDepth(); // Build internal stack trace. - BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(), - const_cast<Thread*>(this), - skip_depth); + BuildInternalStackTraceVisitor build_trace_visitor( + soa.Self(), const_cast<Thread*>(this), skip_depth); if (!build_trace_visitor.Init(depth)) { return nullptr; // Allocation failed. } @@ -2853,10 +2851,6 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable } return soa.AddLocalReference<jobject>(trace); } -template jobject Thread::CreateInternalStackTrace<false>( - const ScopedObjectAccessAlreadyRunnable& soa) const; -template jobject Thread::CreateInternalStackTrace<true>( - const ScopedObjectAccessAlreadyRunnable& soa) const; bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { // Only count the depth since we do not pass a stack frame array as an argument. @@ -3273,10 +3267,7 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, if (cause.get() != nullptr) { exception->SetCause(DecodeJObject(cause.get())->AsThrowable()); } - ScopedLocalRef<jobject> trace(GetJniEnv(), - Runtime::Current()->IsActiveTransaction() - ? CreateInternalStackTrace<true>(soa) - : CreateInternalStackTrace<false>(soa)); + ScopedLocalRef<jobject> trace(GetJniEnv(), CreateInternalStackTrace(soa)); if (trace.get() != nullptr) { exception->SetStackState(DecodeJObject(trace.get()).Ptr()); } diff --git a/runtime/thread.h b/runtime/thread.h index faadd65b0d..5742689d37 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -638,7 +638,6 @@ class Thread { // Create the internal representation of a stack trace, that is more time // and space efficient to compute than the StackTraceElement[]. - template<bool kTransactionActive> jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const REQUIRES_SHARED(Locks::mutator_lock_); |