diff options
Diffstat (limited to 'runtime/thread.cc')
| -rw-r--r-- | runtime/thread.cc | 191 |
1 files changed, 104 insertions, 87 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc index b3d14f0599..0fafbfa94d 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -32,7 +32,6 @@ #include "arch/context.h" #include "base/mutex.h" -#include "catch_finder.h" #include "class_linker.h" #include "class_linker-inl.h" #include "cutils/atomic.h" @@ -54,6 +53,7 @@ #include "mirror/stack_trace_element.h" #include "monitor.h" #include "object_utils.h" +#include "quick_exception_handler.h" #include "reflection.h" #include "runtime.h" #include "scoped_thread_state_change.h" @@ -876,7 +876,7 @@ struct StackDumpVisitor : public StackVisitor { if (o == nullptr) { os << "an unknown object"; } else { - if ((o->GetLockWord().GetState() == LockWord::kThinLocked) && + if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) && Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) { // Getting the identity hashcode here would result in lock inflation and suspension of the // current thread, which isn't safe if this is the only runnable thread. @@ -939,7 +939,7 @@ void Thread::DumpStack(std::ostream& os) const { if (dump_for_abort || ShouldShowNativeStack(this)) { DumpKernelStack(os, GetTid(), " kernel: ", false); SirtRef<mirror::ArtMethod> method_ref(Thread::Current(), GetCurrentMethod(nullptr)); - DumpNativeStack(os, GetTid(), " native: ", false, method_ref.get()); + DumpNativeStack(os, GetTid(), " native: ", method_ref.get()); } DumpJavaStack(os); } else { @@ -1018,7 +1018,8 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte tls32_.state_and_flags.as_struct.flags = 0; tls32_.state_and_flags.as_struct.state = kNative; memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); - memset(tlsPtr_.rosalloc_runs, 0, sizeof(tlsPtr_.rosalloc_runs)); + std::fill(tlsPtr_.rosalloc_runs, tlsPtr_.rosalloc_runs + kRosAllocNumOfSizeBrackets, + gc::allocator::RosAlloc::GetDedicatedFullRun()); for (uint32_t i = 0; i < kMaxCheckpoints; ++i) { tlsPtr_.checkpoint_functions[i] = nullptr; } @@ -1248,10 +1249,6 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const { // Read from SIRT. result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr(); VerifyObject(result); - } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) { - // Assume an invalid local reference is actually a direct pointer. - result = reinterpret_cast<mirror::Object*>(obj); - VerifyObject(result); } else { result = kInvalidIndirectRefObject; } @@ -1845,7 +1842,7 @@ void Thread::QuickDeliverException() { // Don't leave exception visible while we try to find the handler, which may cause class // resolution. ClearException(); - bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1)); + bool is_deoptimization = (exception == GetDeoptimizationException()); if (kDebugExceptionDelivery) { if (!is_deoptimization) { mirror::String* msg = exception->GetDetailMessage(); @@ -1856,10 +1853,14 @@ void Thread::QuickDeliverException() { DumpStack(LOG(INFO) << "Deoptimizing: "); } } - CatchFinder catch_finder(this, throw_location, exception, is_deoptimization); - catch_finder.FindCatch(); - catch_finder.UpdateInstrumentationStack(); - catch_finder.DoLongJump(); + QuickExceptionHandler exception_handler(this, is_deoptimization); + if (is_deoptimization) { + exception_handler.DeoptimizeStack(); + } else { + exception_handler.FindCatch(throw_location, exception); + } + exception_handler.UpdateInstrumentationStack(); + exception_handler.DoLongJump(); LOG(FATAL) << "UNREACHABLE"; } @@ -1931,92 +1932,102 @@ class ReferenceMapVisitor : public StackVisitor { bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (false) { LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) - << StringPrintf("@ PC:%04x", GetDexPc()); + << StringPrintf("@ PC:%04x", GetDexPc()); } ShadowFrame* shadow_frame = GetCurrentShadowFrame(); if (shadow_frame != nullptr) { - mirror::ArtMethod* m = shadow_frame->GetMethod(); - size_t num_regs = shadow_frame->NumberOfVRegs(); - if (m->IsNative() || shadow_frame->HasReferenceArray()) { - // SIRT for JNI or References for interpreter. - for (size_t reg = 0; reg < num_regs; ++reg) { + VisitShadowFrame(shadow_frame); + } else { + VisitQuickFrame(); + } + return true; + } + + void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtMethod* m = shadow_frame->GetMethod(); + size_t num_regs = shadow_frame->NumberOfVRegs(); + if (m->IsNative() || shadow_frame->HasReferenceArray()) { + // SIRT for JNI or References for interpreter. + for (size_t reg = 0; reg < num_regs; ++reg) { + mirror::Object* ref = shadow_frame->GetVRegReference(reg); + if (ref != nullptr) { + mirror::Object* new_ref = ref; + visitor_(&new_ref, reg, this); + if (new_ref != ref) { + shadow_frame->SetVRegReference(reg, new_ref); + } + } + } + } else { + // Java method. + // Portable path use DexGcMap and store in Method.native_gc_map_. + const uint8_t* gc_map = m->GetNativeGcMap(); + CHECK(gc_map != nullptr) << PrettyMethod(m); + verifier::DexPcToReferenceMap dex_gc_map(gc_map); + uint32_t dex_pc = shadow_frame->GetDexPC(); + const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); + DCHECK(reg_bitmap != nullptr); + num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs); + for (size_t reg = 0; reg < num_regs; ++reg) { + if (TestBitmap(reg, reg_bitmap)) { mirror::Object* ref = shadow_frame->GetVRegReference(reg); if (ref != nullptr) { mirror::Object* new_ref = ref; visitor_(&new_ref, reg, this); if (new_ref != ref) { - shadow_frame->SetVRegReference(reg, new_ref); + shadow_frame->SetVRegReference(reg, new_ref); } } } - } else { - // Java method. - // Portable path use DexGcMap and store in Method.native_gc_map_. - const uint8_t* gc_map = m->GetNativeGcMap(); - CHECK(gc_map != nullptr) << PrettyMethod(m); - verifier::DexPcToReferenceMap dex_gc_map(gc_map); - uint32_t dex_pc = GetDexPc(); - const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); + } + } + } + + private: + void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtMethod* m = GetMethod(); + // Process register map (which native and runtime methods don't have) + if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { + const uint8_t* native_gc_map = m->GetNativeGcMap(); + CHECK(native_gc_map != nullptr) << PrettyMethod(m); + mh_.ChangeMethod(m); + const DexFile::CodeItem* code_item = mh_.GetCodeItem(); + DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be nullptr or how would we compile its instructions? + NativePcOffsetToReferenceMap map(native_gc_map); + size_t num_regs = std::min(map.RegWidth() * 8, + static_cast<size_t>(code_item->registers_size_)); + if (num_regs > 0) { + const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset()); DCHECK(reg_bitmap != nullptr); - num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs); + const VmapTable vmap_table(m->GetVmapTable()); + uint32_t core_spills = m->GetCoreSpillMask(); + uint32_t fp_spills = m->GetFpSpillMask(); + size_t frame_size = m->GetFrameSizeInBytes(); + // For all dex registers in the bitmap + mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); + DCHECK(cur_quick_frame != nullptr); for (size_t reg = 0; reg < num_regs; ++reg) { + // Does this register hold a reference? if (TestBitmap(reg, reg_bitmap)) { - mirror::Object* ref = shadow_frame->GetVRegReference(reg); - if (ref != nullptr) { - mirror::Object* new_ref = ref; - visitor_(&new_ref, reg, this); - if (new_ref != ref) { - shadow_frame->SetVRegReference(reg, new_ref); + uint32_t vmap_offset; + if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) { + int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg); + // This is sound as spilled GPRs will be word sized (ie 32 or 64bit). + mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg)); + if (*ref_addr != nullptr) { + visitor_(ref_addr, reg, this); } - } - } - } - } - } else { - mirror::ArtMethod* m = GetMethod(); - // Process register map (which native and runtime methods don't have) - if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { - const uint8_t* native_gc_map = m->GetNativeGcMap(); - CHECK(native_gc_map != nullptr) << PrettyMethod(m); - mh_.ChangeMethod(m); - const DexFile::CodeItem* code_item = mh_.GetCodeItem(); - DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be nullptr or how would we compile its instructions? - NativePcOffsetToReferenceMap map(native_gc_map); - size_t num_regs = std::min(map.RegWidth() * 8, - static_cast<size_t>(code_item->registers_size_)); - if (num_regs > 0) { - const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset()); - DCHECK(reg_bitmap != nullptr); - const VmapTable vmap_table(m->GetVmapTable()); - uint32_t core_spills = m->GetCoreSpillMask(); - uint32_t fp_spills = m->GetFpSpillMask(); - size_t frame_size = m->GetFrameSizeInBytes(); - // For all dex registers in the bitmap - mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); - DCHECK(cur_quick_frame != nullptr); - for (size_t reg = 0; reg < num_regs; ++reg) { - // Does this register hold a reference? - if (TestBitmap(reg, reg_bitmap)) { - uint32_t vmap_offset; - if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) { - int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg); - // This is sound as spilled GPRs will be word sized (ie 32 or 64bit). - mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg)); - if (*ref_addr != nullptr) { - visitor_(ref_addr, reg, this); - } - } else { - StackReference<mirror::Object>* ref_addr = - reinterpret_cast<StackReference<mirror::Object>*>( - GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size, - reg)); - mirror::Object* ref = ref_addr->AsMirrorPtr(); - if (ref != nullptr) { - mirror::Object* new_ref = ref; - visitor_(&new_ref, reg, this); - if (ref != new_ref) { - ref_addr->Assign(new_ref); - } + } else { + StackReference<mirror::Object>* ref_addr = + reinterpret_cast<StackReference<mirror::Object>*>( + GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size, + reg)); + mirror::Object* ref = ref_addr->AsMirrorPtr(); + if (ref != nullptr) { + mirror::Object* new_ref = ref; + visitor_(&new_ref, reg, this); + if (ref != new_ref) { + ref_addr->Assign(new_ref); } } } @@ -2024,10 +2035,8 @@ class ReferenceMapVisitor : public StackVisitor { } } } - return true; } - private: static bool TestBitmap(size_t reg, const uint8_t* reg_vector) { return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0; } @@ -2064,7 +2073,7 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) { if (tlsPtr_.opeer != nullptr) { visitor(&tlsPtr_.opeer, arg, thread_id, kRootThreadObject); } - if (tlsPtr_.exception != nullptr) { + if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg, thread_id, kRootNativeStack); } tlsPtr_.throw_location.VisitRoots(visitor, arg); @@ -2084,6 +2093,14 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) { if (tlsPtr_.single_step_control != nullptr) { tlsPtr_.single_step_control->VisitRoots(visitor, arg, thread_id, kRootDebugger); } + if (tlsPtr_.deoptimization_shadow_frame != nullptr) { + RootCallbackVisitor visitorToCallback(visitor, arg, thread_id); + ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitorToCallback); + for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr; + shadow_frame = shadow_frame->GetLink()) { + mapper.VisitShadowFrame(shadow_frame); + } + } // Visit roots on this thread's stack Context* context = GetLongJumpContext(); RootCallbackVisitor visitorToCallback(visitor, arg, thread_id); |