diff options
Diffstat (limited to 'runtime/thread.cc')
| -rw-r--r-- | runtime/thread.cc | 268 |
1 files changed, 173 insertions, 95 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc index 70ed7c8038..77b9f4f24c 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -35,6 +35,7 @@ #include <list> #include <sstream> +#include "android-base/file.h" #include "android-base/stringprintf.h" #include "android-base/strings.h" @@ -88,6 +89,7 @@ #include "native_stack_dump.h" #include "nativehelper/scoped_local_ref.h" #include "nativehelper/scoped_utf_chars.h" +#include "nterp_helpers.h" #include "nth_caller_visitor.h" #include "oat_quick_method_header.h" #include "obj_ptr-inl.h" @@ -97,6 +99,7 @@ #include "quick_exception_handler.h" #include "read_barrier-inl.h" #include "reflection.h" +#include "reflective_handle_scope-inl.h" #include "runtime-inl.h" #include "runtime.h" #include "runtime_callbacks.h" @@ -130,6 +133,9 @@ ConditionVariable* Thread::resume_cond_ = nullptr; const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); bool (*Thread::is_sensitive_thread_hook_)() = nullptr; Thread* Thread::jit_sensitive_thread_ = nullptr; +#ifndef __BIONIC__ +thread_local Thread* Thread::self_tls_ = nullptr; +#endif static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; @@ -580,6 +586,14 @@ void Thread::InitAfterFork() { InitTid(); } +void Thread::DeleteJPeer(JNIEnv* env) { + // Make sure nothing can observe both opeer and jpeer set at the same time. + jobject old_jpeer = tlsPtr_.jpeer; + CHECK(old_jpeer != nullptr); + tlsPtr_.jpeer = nullptr; + env->DeleteGlobalRef(old_jpeer); +} + void* Thread::CreateCallback(void* arg) { Thread* self = reinterpret_cast<Thread*>(arg); Runtime* runtime = Runtime::Current(); @@ -609,8 +623,8 @@ void* Thread::CreateCallback(void* arg) { // Copy peer into self, deleting global reference when done. CHECK(self->tlsPtr_.jpeer != nullptr); self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr(); - self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer); - self->tlsPtr_.jpeer = nullptr; + // Make sure nothing can observe both opeer and jpeer set at the same time. + self->DeleteJPeer(self->GetJniEnv()); self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority); @@ -776,7 +790,9 @@ void Thread::InstallImplicitProtection() { #else 1u; #endif - volatile char space[kPageSize - (kAsanMultiplier * 256)]; + // Keep space uninitialized as it can overflow the stack otherwise (should Clang actually + // auto-initialize this local variable). + volatile char space[kPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized)); char sink ATTRIBUTE_UNUSED = space[zero]; // NOLINT // Remove tag from the pointer. Nop in non-hwasan builds. uintptr_t addr = reinterpret_cast<uintptr_t>(__hwasan_tag_pointer(space, 0)); @@ -887,9 +903,9 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz MutexLock mu(self, *Locks::runtime_shutdown_lock_); runtime->EndThreadBirth(); } - // Manually delete the global reference since Thread::Init will not have been run. - env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer); - child_thread->tlsPtr_.jpeer = nullptr; + // Manually delete the global reference since Thread::Init will not have been run. Make sure + // nothing can observe both opeer and jpeer set at the same time. + child_thread->DeleteJPeer(env); delete child_thread; child_thread = nullptr; // TODO: remove from thread group? @@ -932,10 +948,11 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en interpreter::InitInterpreterTls(this); } -#ifdef ART_TARGET_ANDROID +#ifdef __BIONIC__ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; #else CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); + Thread::self_tls_ = this; #endif DCHECK_EQ(Thread::Current(), this); @@ -1447,9 +1464,6 @@ bool Thread::ModifySuspendCountInternal(Thread* self, tls32_.suspend_count += delta; switch (reason) { - case SuspendReason::kForDebugger: - tls32_.debug_suspend_count += delta; - break; case SuspendReason::kForUserCode: tls32_.user_code_suspend_count += delta; break; @@ -1773,7 +1787,8 @@ static std::string GetSchedulerGroupName(pid_t tid) { // 1:cpuacct,cpu:/ // We want the third field from the line whose second field contains the "cpu" token. std::string cgroup_file; - if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) { + if (!android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), + &cgroup_file)) { return ""; } std::vector<std::string> cgroup_lines; @@ -1833,8 +1848,11 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { group_name_field->GetObject(thread_group)->AsString(); group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>"; } + } else if (thread != nullptr) { + priority = thread->GetNativePriority(); } else { - priority = GetNativePriority(); + PaletteStatus status = PaletteSchedGetPriority(tid, &priority); + CHECK(status == PaletteStatus::kOkay || status == PaletteStatus::kCheckErrno); } std::string scheduler_group_name(GetSchedulerGroupName(tid)); @@ -1906,7 +1924,8 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { // Grab the scheduler stats for this thread. std::string scheduler_stats; - if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats) + if (android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), + &scheduler_stats) && !scheduler_stats.empty()) { scheduler_stats = android::base::Trim(scheduler_stats); // Lose the trailing '\n'. } else { @@ -2138,23 +2157,12 @@ void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_loc // assumption that there is no exception pending on entry. Thus, stash any pending exception. // Thread::Current() instead of this in case a thread is dumping the stack of another suspended // thread. - StackHandleScope<1> scope(Thread::Current()); - Handle<mirror::Throwable> exc; - bool have_exception = false; - if (IsExceptionPending()) { - exc = scope.NewHandle(GetException()); - const_cast<Thread*>(this)->ClearException(); - have_exception = true; - } + ScopedExceptionStorage ses(Thread::Current()); std::unique_ptr<Context> context(Context::Create()); StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks); dumper.WalkStack(); - - if (have_exception) { - const_cast<Thread*>(this)->SetException(exc.Get()); - } } void Thread::DumpStack(std::ostream& os, @@ -2174,7 +2182,6 @@ void Thread::DumpStack(std::ostream& os, if (safe_to_dump || force_dump_stack) { // If we're currently in native code, dump that stack before dumping the managed stack. if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { - DumpKernelStack(os, GetTid(), " kernel: ", false); ArtMethod* method = GetCurrentMethod(nullptr, /*check_suspended=*/ !force_dump_stack, @@ -2195,10 +2202,11 @@ void Thread::ThreadExitCallback(void* arg) { LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " "going to use a pthread_key_create destructor?): " << *self; CHECK(is_started_); -#ifdef ART_TARGET_ANDROID +#ifdef __BIONIC__ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; #else CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); + Thread::self_tls_ = self; #endif self->tls32_.thread_exit_check_count = 1; } else { @@ -2229,6 +2237,9 @@ void Thread::Startup() { if (pthread_getspecific(pthread_key_self_) != nullptr) { LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr"; } +#ifndef __BIONIC__ + CHECK(Thread::self_tls_ == nullptr); +#endif } void Thread::FinishStartup() { @@ -2287,7 +2298,8 @@ Thread::Thread(bool daemon) is_runtime_thread_(false) { wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock); wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); - tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>; + tlsPtr_.instrumentation_stack = + new std::map<uintptr_t, instrumentation::InstrumentationStackFrame>; tlsPtr_.name = new std::string(kThreadNameDuringStartup); static_assert((sizeof(Thread) % 4) == 0U, @@ -2310,6 +2322,7 @@ Thread::Thread(bool daemon) tlsPtr_.thread_local_mark_stack = nullptr; tls32_.is_transitioning_to_runnable = false; tls32_.use_mterp = false; + ResetTlab(); } void Thread::NotifyInTheadList() { @@ -2436,9 +2449,11 @@ void Thread::Destroy() { { ScopedObjectAccess soa(self); Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); - if (kUseReadBarrier) { - Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); - } + } + // Mark-stack revocation must be performed at the very end. No + // checkpoint/flip-function or read-barrier should be called after this. + if (kUseReadBarrier) { + Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); } } @@ -2459,6 +2474,12 @@ Thread::~Thread() { CHECK(tlsPtr_.flip_function == nullptr); CHECK_EQ(tls32_.is_transitioning_to_runnable, false); + if (kUseReadBarrier) { + Runtime::Current()->GetHeap()->ConcurrentCopyingCollector() + ->AssertNoThreadMarkStackMapping(this); + gc::accounting::AtomicStack<mirror::Object>* tl_mark_stack = GetThreadLocalMarkStack(); + CHECK(tl_mark_stack == nullptr) << "mark-stack: " << tl_mark_stack; + } // Make sure we processed all deoptimization requests. CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) << @@ -2478,9 +2499,6 @@ Thread::~Thread() { CleanupCpu(); } - if (tlsPtr_.single_step_control != nullptr) { - delete tlsPtr_.single_step_control; - } delete tlsPtr_.instrumentation_stack; delete tlsPtr_.name; delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; @@ -3281,7 +3299,7 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, ++i; } ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get())); - InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args); + InvokeWithJValues(soa, ref.get(), exception_init_method, jv_args); if (LIKELY(!IsExceptionPending())) { SetException(exception.Get()); } @@ -3353,6 +3371,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { return; \ } JNI_ENTRY_POINT_INFO(pDlsymLookup) + JNI_ENTRY_POINT_INFO(pDlsymLookupCritical) #undef JNI_ENTRY_POINT_INFO #define QUICK_ENTRY_POINT_INFO(x) \ @@ -3544,7 +3563,7 @@ void Thread::QuickDeliverException() { // Instrumentation may cause GC so keep the exception object safe. StackHandleScope<1> hs(this); HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); - instrumentation->ExceptionThrownEvent(this, exception.Ptr()); + instrumentation->ExceptionThrownEvent(this, exception); } // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something? // Note: we do this *after* reporting the exception to instrumentation in case it now requires @@ -3556,8 +3575,8 @@ void Thread::QuickDeliverException() { // instrumentation trampolines (for example with DDMS tracing). That forces us to do deopt later // and see every frame being popped. We don't need to handle it any differently. ShadowFrame* cf; - bool force_deopt; - { + bool force_deopt = false; + if (Runtime::Current()->AreNonStandardExitsEnabled() || kIsDebugBuild) { NthCallerVisitor visitor(this, 0, false); visitor.WalkStack(); cf = visitor.GetCurrentShadowFrame(); @@ -3567,16 +3586,16 @@ void Thread::QuickDeliverException() { bool force_frame_pop = cf != nullptr && cf->GetForcePopFrame(); bool force_retry_instr = cf != nullptr && cf->GetForceRetryInstruction(); if (kIsDebugBuild && force_frame_pop) { + DCHECK(Runtime::Current()->AreNonStandardExitsEnabled()); NthCallerVisitor penultimate_visitor(this, 1, false); penultimate_visitor.WalkStack(); ShadowFrame* penultimate_frame = penultimate_visitor.GetCurrentShadowFrame(); if (penultimate_frame == nullptr) { penultimate_frame = FindDebuggerShadowFrame(penultimate_visitor.GetFrameId()); } - DCHECK(penultimate_frame != nullptr && - penultimate_frame->GetForceRetryInstruction()) - << "Force pop frame without retry instruction found. penultimate frame is null: " - << (penultimate_frame == nullptr ? "true" : "false"); + } + if (force_retry_instr) { + DCHECK(Runtime::Current()->AreNonStandardExitsEnabled()); } force_deopt = force_frame_pop || force_retry_instr; } @@ -3604,7 +3623,7 @@ void Thread::QuickDeliverException() { method_type); artDeoptimize(this); UNREACHABLE(); - } else { + } else if (visitor.caller != nullptr) { LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " << visitor.caller->PrettyMethod(); } @@ -3694,6 +3713,8 @@ class ReferenceMapVisitor : public StackVisitor { ShadowFrame* shadow_frame = GetCurrentShadowFrame(); if (shadow_frame != nullptr) { VisitShadowFrame(shadow_frame); + } else if (GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader()) { + VisitNterpFrame(); } else { VisitQuickFrame(); } @@ -3705,7 +3726,6 @@ class ReferenceMapVisitor : public StackVisitor { VisitDeclaringClass(m); DCHECK(m != nullptr); size_t num_regs = shadow_frame->NumberOfVRegs(); - DCHECK(m->IsNative() || shadow_frame->HasReferenceArray()); // handle scope for JNI or References for interpreter. for (size_t reg = 0; reg < num_regs; ++reg) { mirror::Object* ref = shadow_frame->GetVRegReference(reg); @@ -3756,13 +3776,39 @@ class ReferenceMapVisitor : public StackVisitor { } } mirror::Object* new_ref = klass.Ptr(); - visitor_(&new_ref, /* vreg= */ -1, this); + visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kMethodDeclaringClass, this); if (new_ref != klass) { method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); } } } + void VisitNterpFrame() REQUIRES_SHARED(Locks::mutator_lock_) { + ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); + StackReference<mirror::Object>* vreg_ref_base = + reinterpret_cast<StackReference<mirror::Object>*>(NterpGetReferenceArray(cur_quick_frame)); + StackReference<mirror::Object>* vreg_int_base = + reinterpret_cast<StackReference<mirror::Object>*>(NterpGetRegistersArray(cur_quick_frame)); + CodeItemDataAccessor accessor((*cur_quick_frame)->DexInstructionData()); + const uint16_t num_regs = accessor.RegistersSize(); + // An nterp frame has two arrays: a dex register array and a reference array + // that shadows the dex register array but only containing references + // (non-reference dex registers have nulls). See nterp_helpers.cc. + for (size_t reg = 0; reg < num_regs; ++reg) { + StackReference<mirror::Object>* ref_addr = vreg_ref_base + reg; + mirror::Object* ref = ref_addr->AsMirrorPtr(); + if (ref != nullptr) { + mirror::Object* new_ref = ref; + visitor_(&new_ref, reg, this); + if (new_ref != ref) { + ref_addr->Assign(new_ref); + StackReference<mirror::Object>* int_addr = vreg_int_base + reg; + int_addr->Assign(new_ref); + } + } + } + } + template <typename T> ALWAYS_INLINE inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) { @@ -3778,9 +3824,9 @@ class ReferenceMapVisitor : public StackVisitor { StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame); uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); - CodeInfo code_info(method_header, kPrecise - ? CodeInfo::DecodeFlags::AllTables // We will need dex register maps. - : CodeInfo::DecodeFlags::GcMasksOnly); + CodeInfo code_info = kPrecise + ? CodeInfo(method_header) // We will need dex register maps. + : CodeInfo::DecodeGcMasksOnly(method_header); StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset); DCHECK(map.IsValid()); @@ -3829,7 +3875,7 @@ class ReferenceMapVisitor : public StackVisitor { mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { mirror::Object* new_ref = ref; - visitor_(&new_ref, /* vreg= */ -1, this); + visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kProxyReferenceArgument, this); if (ref != new_ref) { ref_addr->Assign(new_ref); } @@ -3860,7 +3906,7 @@ class ReferenceMapVisitor : public StackVisitor { size_t stack_index ATTRIBUTE_UNUSED, const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) { - visitor(ref, -1, stack_visitor); + visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor); } ALWAYS_INLINE @@ -3868,7 +3914,7 @@ class ReferenceMapVisitor : public StackVisitor { size_t register_index ATTRIBUTE_UNUSED, const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) { - visitor(ref, -1, stack_visitor); + visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor); } RootVisitor& visitor; @@ -3886,6 +3932,7 @@ class ReferenceMapVisitor : public StackVisitor { code_info(_code_info), dex_register_map(code_info.GetDexRegisterMapOf(map)), visitor(_visitor) { + DCHECK_EQ(dex_register_map.size(), number_of_dex_registers); } // TODO: If necessary, we should consider caching a reverse map instead of the linear @@ -3905,8 +3952,8 @@ class ReferenceMapVisitor : public StackVisitor { } if (!found) { - // If nothing found, report with -1. - visitor(ref, -1, stack_visitor); + // If nothing found, report with unknown. + visitor(ref, JavaFrameRootInfo::kUnknownVreg, stack_visitor); } } @@ -3955,6 +4002,14 @@ class RootCallbackVisitor { const uint32_t tid_; }; +void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) { + for (BaseReflectiveHandleScope* brhs = GetTopReflectiveHandleScope(); + brhs != nullptr; + brhs = brhs->GetLink()) { + brhs->VisitTargets(visitor); + } +} + template <bool kPrecise> void Thread::VisitRoots(RootVisitor* visitor) { const pid_t thread_id = GetThreadId(); @@ -3971,9 +4026,6 @@ void Thread::VisitRoots(RootVisitor* visitor) { tlsPtr_.jni_env->VisitJniLocalRoots(visitor, RootInfo(kRootJNILocal, thread_id)); tlsPtr_.jni_env->VisitMonitorRoots(visitor, RootInfo(kRootJNIMonitor, thread_id)); HandleScopeVisitRoots(visitor, thread_id); - if (tlsPtr_.debug_invoke_req != nullptr) { - tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); - } // Visit roots for deoptimization. if (tlsPtr_.stacked_shadow_frame_record != nullptr) { RootCallbackVisitor visitor_to_callback(visitor, thread_id); @@ -4015,8 +4067,43 @@ void Thread::VisitRoots(RootVisitor* visitor) { RootCallbackVisitor visitor_to_callback(visitor, thread_id); ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback); mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false); - for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { - visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); + for (auto& entry : *GetInstrumentationStack()) { + visitor->VisitRootIfNonNull(&entry.second.this_object_, RootInfo(kRootVMInternal, thread_id)); + } +} + +void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) { + for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) { + const Instruction* inst = reinterpret_cast<const Instruction*>(entry.first); + if (inst != nullptr) { + if (inst->Opcode() == Instruction::NEW_INSTANCE || + inst->Opcode() == Instruction::CHECK_CAST || + inst->Opcode() == Instruction::INSTANCE_OF || + inst->Opcode() == Instruction::NEW_ARRAY || + inst->Opcode() == Instruction::CONST_CLASS) { + mirror::Class* cls = reinterpret_cast<mirror::Class*>(entry.second); + if (cls == nullptr || cls == Runtime::GetWeakClassSentinel()) { + // Entry got deleted in a previous sweep. + continue; + } + Runtime::ProcessWeakClass( + reinterpret_cast<GcRoot<mirror::Class>*>(&entry.second), + visitor, + Runtime::GetWeakClassSentinel()); + } else if (inst->Opcode() == Instruction::CONST_STRING || + inst->Opcode() == Instruction::CONST_STRING_JUMBO) { + mirror::Object* object = reinterpret_cast<mirror::Object*>(entry.second); + mirror::Object* new_object = visitor->IsMarked(object); + // We know the string is marked because it's a strongly-interned string that + // is always alive (see b/117621117 for trying to make those strings weak). + // The IsMarked implementation of the CMS collector returns + // null for newly allocated objects, but we know those haven't moved. Therefore, + // only update the entry if we get a different non-null string. + if (new_object != nullptr && new_object != object) { + entry.second = reinterpret_cast<size_t>(new_object); + } + } + } } } @@ -4078,8 +4165,12 @@ void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) { tlsPtr_.thread_local_objects = 0; } +void Thread::ResetTlab() { + SetTlab(nullptr, nullptr, nullptr); +} + bool Thread::HasTlab() const { - bool has_tlab = tlsPtr_.thread_local_pos != nullptr; + const bool has_tlab = tlsPtr_.thread_local_pos != nullptr; if (has_tlab) { DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr); } else { @@ -4113,37 +4204,6 @@ bool Thread::UnprotectStack() { return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0; } -void Thread::ActivateSingleStepControl(SingleStepControl* ssc) { - CHECK(Dbg::IsDebuggerActive()); - CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this; - CHECK(ssc != nullptr); - tlsPtr_.single_step_control = ssc; -} - -void Thread::DeactivateSingleStepControl() { - CHECK(Dbg::IsDebuggerActive()); - CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this; - SingleStepControl* ssc = GetSingleStepControl(); - tlsPtr_.single_step_control = nullptr; - delete ssc; -} - -void Thread::SetDebugInvokeReq(DebugInvokeReq* req) { - CHECK(Dbg::IsDebuggerActive()); - CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this; - CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself"; - CHECK(req != nullptr); - tlsPtr_.debug_invoke_req = req; -} - -void Thread::ClearDebugInvokeReq() { - CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this; - CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself"; - DebugInvokeReq* req = tlsPtr_.debug_invoke_req; - tlsPtr_.debug_invoke_req = nullptr; - delete req; -} - void Thread::PushVerifier(verifier::MethodVerifier* verifier) { verifier->link_ = tlsPtr_.method_verifier; tlsPtr_.method_verifier = verifier; @@ -4266,15 +4326,13 @@ void Thread::ReleaseLongJumpContextInternal() { } void Thread::SetNativePriority(int new_priority) { - // ART tests on JVM can reach this code path, use tid = 0 as shorthand for current thread. - PaletteStatus status = PaletteSchedSetPriority(0, new_priority); + PaletteStatus status = PaletteSchedSetPriority(GetTid(), new_priority); CHECK(status == PaletteStatus::kOkay || status == PaletteStatus::kCheckErrno); } -int Thread::GetNativePriority() { +int Thread::GetNativePriority() const { int priority = 0; - // ART tests on JVM can reach this code path, use tid = 0 as shorthand for current thread. - PaletteStatus status = PaletteSchedGetPriority(0, &priority); + PaletteStatus status = PaletteSchedGetPriority(GetTid(), &priority); CHECK(status == PaletteStatus::kOkay || status == PaletteStatus::kCheckErrno); return priority; } @@ -4287,4 +4345,24 @@ bool Thread::IsSystemDaemon() const { WellKnownClasses::java_lang_Thread_systemDaemon)->GetBoolean(GetPeer()); } +ScopedExceptionStorage::ScopedExceptionStorage(art::Thread* self) + : self_(self), hs_(self_), excp_(hs_.NewHandle<art::mirror::Throwable>(self_->GetException())) { + self_->ClearException(); +} + +void ScopedExceptionStorage::SuppressOldException(const char* message) { + CHECK(self_->IsExceptionPending()) << *self_; + ObjPtr<mirror::Throwable> old_suppressed(excp_.Get()); + excp_.Assign(self_->GetException()); + LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump(); + self_->ClearException(); +} + +ScopedExceptionStorage::~ScopedExceptionStorage() { + CHECK(!self_->IsExceptionPending()) << *self_; + if (!excp_.IsNull()) { + self_->SetException(excp_.Get()); + } +} + } // namespace art |