diff options
Diffstat (limited to 'runtime')
33 files changed, 286 insertions, 201 deletions
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index f092772239..4bd702d0aa 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -2780,7 +2780,7 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file, klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def)); klass->SetDexTypeIndex(dex_class_def.class_idx_); - klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings()); + CHECK(klass->GetDexCacheStrings() != nullptr); const uint8_t* class_data = dex_file.GetClassData(dex_class_def); if (class_data == nullptr) { diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index da2dfe11d0..c329fe6920 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -20,7 +20,6 @@ #include "class_linker-inl.h" #include "dex_file-inl.h" #include "gc/accounting/card_table-inl.h" -#include "method_helper-inl.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc index 908d3cd43c..3b47f245f7 100644 --- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc +++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc @@ -24,9 +24,7 @@ namespace art { -// TODO: Make the MethodHelper here be compaction safe. -extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, +extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) { mirror::ArtMethod* method = shadow_frame->GetMethod(); // Ensure static methods are initialized. @@ -50,11 +48,11 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper* m } uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_; if (kUsePortableCompiler) { - InvokeWithShadowFrame(self, shadow_frame, arg_offset, mh, result); + InvokeWithShadowFrame(self, shadow_frame, arg_offset, result); } else { method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset), (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t), - result, mh->GetShorty()); + result, method->GetShorty()); } } diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.h b/runtime/entrypoints/interpreter/interpreter_entrypoints.h index 5d646e905f..09522149a7 100644 --- a/runtime/entrypoints/interpreter/interpreter_entrypoints.h +++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.h @@ -27,17 +27,14 @@ namespace art { union JValue; -class MethodHelper; class ShadowFrame; class Thread; // Pointers to functions that are called by interpreter trampolines via thread-local storage. struct PACKED(4) InterpreterEntryPoints { - void (*pInterpreterToInterpreterBridge)(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, + void (*pInterpreterToInterpreterBridge)(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result); - void (*pInterpreterToCompiledCodeBridge)(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, + void (*pInterpreterToCompiledCodeBridge)(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result); }; diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc index e7975f8923..0a5695660b 100644 --- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc +++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc @@ -21,6 +21,7 @@ #include "entrypoints/entrypoint_utils-inl.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "interpreter/interpreter.h" +#include "method_helper.h" #include "mirror/art_method-inl.h" #include "mirror/object-inl.h" #include "scoped_thread_state_change.h" @@ -224,7 +225,7 @@ extern "C" uint64_t artPortableToInterpreterBridge(mirror::ArtMethod* method, Th } } - JValue result = interpreter::EnterInterpreterFromEntryPoint(self, &mh, code_item, shadow_frame); + JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); // Pop transition. self->PopManagedStackFragment(fragment); return result.GetJ(); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 93dc62a094..e3eb707bdc 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -22,6 +22,7 @@ #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/accounting/card_table-inl.h" #include "interpreter/interpreter.h" +#include "method_helper.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" @@ -510,25 +511,25 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, shadow_frame, first_arg_reg); shadow_frame_builder.VisitArguments(); + const bool needs_initialization = + method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); // Push a transition back into managed code onto the linked list in thread. ManagedStack fragment; self->PushManagedStackFragment(&fragment); self->PushShadowFrame(shadow_frame); self->EndAssertNoThreadSuspension(old_cause); - StackHandleScope<1> hs(self); - MethodHelper mh(hs.NewHandle(method)); - if (mh.Get()->IsStatic() && !mh.Get()->GetDeclaringClass()->IsInitialized()) { + if (needs_initialization) { // Ensure static method's class is initialized. - StackHandleScope<1> hs2(self); - Handle<mirror::Class> h_class(hs2.NewHandle(mh.Get()->GetDeclaringClass())); + StackHandleScope<1> hs(self); + Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { - DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(mh.Get()); + DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod()); self->PopManagedStackFragment(fragment); return 0; } } - JValue result = interpreter::EnterInterpreterFromEntryPoint(self, &mh, code_item, shadow_frame); + JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); // Pop transition. self->PopManagedStackFragment(fragment); // No need to restore the args since the method has already been run by the interpreter. diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc index 835485c351..94753d4461 100644 --- a/runtime/fault_handler.cc +++ b/runtime/fault_handler.cc @@ -177,6 +177,12 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) { Thread* self = Thread::Current(); + // If ART is not running, or the thread is not attached to ART pass the + // signal on to the next handler in the chain. + if (self == nullptr || Runtime::Current() == nullptr || !Runtime::Current()->IsStarted()) { + InvokeUserSignalHandler(sig, info, context); + return; + } // Now set up the nested signal handler. // TODO: add SIGSEGV back to the nested signals when we can handle running out stack gracefully. diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 3101c68599..9d2f6d1238 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -48,11 +48,20 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas } // Need to check that we arent the large object allocator since the large object allocation code // path this function. If we didn't check we would have an infinite loop. + mirror::Object* obj; if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { - return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count, - pre_fence_visitor); + obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count, + pre_fence_visitor); + if (obj != nullptr) { + return obj; + } else { + // There should be an OOM exception, since we are retrying, clear it. + self->ClearException(); + } + // If the large object allocation failed, try to use the normal spaces (main space, + // non moving space). This can happen if there is significant virtual address space + // fragmentation. } - mirror::Object* obj; AllocationTimer alloc_timer(this, &obj); size_t bytes_allocated; size_t usable_size; @@ -171,10 +180,13 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) { } template <bool kInstrumented, typename PreFenceVisitor> -inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass, +inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) { - return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count, + // Save and restore the class in case it moves. + StackHandleScope<1> hs(self); + auto klass_wrapper = hs.NewHandleWrapper(klass); + return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count, kAllocatorTypeLOS, pre_fence_visitor); } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 0cceaa4467..0fd0a9ff52 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -365,6 +365,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max uint8_t* heap_end = continuous_spaces_.back()->Limit(); size_t heap_capacity = heap_end - heap_begin; // Remove the main backup space since it slows down the GC to have unused extra spaces. + // TODO: Avoid needing to do this. if (main_space_backup_.get() != nullptr) { RemoveSpace(main_space_backup_.get()); } @@ -977,6 +978,22 @@ void Heap::DoPendingTransitionOrTrim() { Trim(); } +class TrimIndirectReferenceTableClosure : public Closure { + public: + explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) { + } + virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { + ATRACE_BEGIN("Trimming reference table"); + thread->GetJniEnv()->locals.Trim(); + ATRACE_END(); + barrier_->Pass(Thread::Current()); + } + + private: + Barrier* const barrier_; +}; + + void Heap::Trim() { Thread* self = Thread::Current(); { @@ -998,6 +1015,19 @@ void Heap::Trim() { WaitForGcToCompleteLocked(kGcCauseTrim, self); collector_type_running_ = kCollectorTypeHeapTrim; } + // Trim reference tables. + { + ScopedObjectAccess soa(self); + JavaVMExt* vm = soa.Vm(); + // Trim globals indirect reference table. + vm->TrimGlobals(); + // Trim locals indirect reference tables. + Barrier barrier(0); + TrimIndirectReferenceTableClosure closure(&barrier); + ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); + size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); + barrier.Increment(self, barrier_count); + } uint64_t start_ns = NanoTime(); // Trim the managed spaces. uint64_t total_alloc_space_allocated = 0; @@ -1571,6 +1601,8 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE); const uint64_t space_size_before_compaction = from_space->Size(); AddSpace(to_space); + // Make sure that we will have enough room to copy. + CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit()); Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact); // Leave as prot read so that we can still run ROSAlloc verification on this space. from_space->GetMemMap()->Protect(PROT_READ); @@ -1689,8 +1721,8 @@ void Heap::TransitionCollector(CollectorType collector_type) { RemoveSpace(temp_space_); temp_space_ = nullptr; mem_map->Protect(PROT_READ | PROT_WRITE); - CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(), - mem_map->Size()); + CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, + std::min(mem_map->Size(), growth_limit_), mem_map->Size()); mem_map.release(); // Compact to the main space from the bump pointer space, don't need to swap semispaces. AddSpace(main_space_); @@ -1703,9 +1735,9 @@ void Heap::TransitionCollector(CollectorType collector_type) { if (kIsDebugBuild && kUseRosAlloc) { mem_map->Protect(PROT_READ | PROT_WRITE); } - main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize, - mem_map->Size(), mem_map->Size(), - name, true)); + main_space_backup_.reset(CreateMallocSpaceFromMemMap( + mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_), + mem_map->Size(), name, true)); if (kIsDebugBuild && kUseRosAlloc) { mem_map->Protect(PROT_NONE); } @@ -1947,7 +1979,8 @@ void Heap::PreZygoteFork() { MemMap* mem_map = main_space_->ReleaseMemMap(); RemoveSpace(main_space_); space::Space* old_main_space = main_space_; - CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size()); + CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_), + mem_map->Size()); delete old_main_space; AddSpace(main_space_); } else { @@ -2959,7 +2992,18 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) { void Heap::ClearGrowthLimit() { growth_limit_ = capacity_; - non_moving_space_->ClearGrowthLimit(); + for (const auto& space : continuous_spaces_) { + if (space->IsMallocSpace()) { + gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); + malloc_space->ClearGrowthLimit(); + malloc_space->SetFootprintLimit(malloc_space->Capacity()); + } + } + // This space isn't added for performance reasons. + if (main_space_backup_.get() != nullptr) { + main_space_backup_->ClearGrowthLimit(); + main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity()); + } } void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 69a573ef98..4e1a0ff242 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -654,7 +654,7 @@ class Heap { // We don't force this to be inlined since it is a slow path. template <bool kInstrumented, typename PreFenceVisitor> - mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count, + mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/globals.h b/runtime/globals.h index 4d33196c98..3104229b17 100644 --- a/runtime/globals.h +++ b/runtime/globals.h @@ -64,6 +64,12 @@ static constexpr bool kUsePortableCompiler = true; static constexpr bool kUsePortableCompiler = false; #endif +#if defined(ART_USE_OPTIMIZING_COMPILER) +static constexpr bool kUseOptimizingCompiler = true; +#else +static constexpr bool kUseOptimizingCompiler = false; +#endif + // Garbage collector constants. static constexpr bool kMovingCollector = true && !kUsePortableCompiler; static constexpr bool kMarkCompactSupport = false && kMovingCollector; diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 4d177a32d8..0d84a1ef98 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -162,13 +162,12 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { DCHECK(table_ != NULL); DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles); - int idx = ExtractIndex(iref); - if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid && Thread::Current()->HandleScopeContains(reinterpret_cast<jobject>(iref))) { LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring"; return true; } + const int idx = ExtractIndex(iref); if (idx < bottomIndex) { // Wrong segment. LOG(WARNING) << "Attempt to remove index outside index area (" << idx @@ -236,6 +235,13 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { return true; } +void IndirectReferenceTable::Trim() { + const size_t top_index = Capacity(); + auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); + uint8_t* release_end = table_mem_map_->End(); + madvise(release_start, release_end - release_start, MADV_DONTNEED); +} + void IndirectReferenceTable::VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type) { for (auto ref : *this) { diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h index 168f9f2764..fbd5714688 100644 --- a/runtime/indirect_reference_table.h +++ b/runtime/indirect_reference_table.h @@ -331,6 +331,9 @@ class IndirectReferenceTable { return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_)); } + // Release pages past the end of the table that may have previously held references. + void Trim() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: // Extract the table index from an indirect reference. static uint32_t ExtractIndex(IndirectRef iref) { diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index b17f3039c6..b04a18b934 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -327,37 +327,31 @@ static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKin // Clang 3.4 fails to build the goto interpreter implementation. static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl; template<bool do_access_check, bool transaction_active> -JValue ExecuteGotoImpl(Thread*, MethodHelper&, const DexFile::CodeItem*, ShadowFrame&, JValue) { +JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) { LOG(FATAL) << "UNREACHABLE"; UNREACHABLE(); } // Explicit definitions of ExecuteGotoImpl. template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteGotoImpl<true, false>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteGotoImpl<false, false>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteGotoImpl<true, true>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame, JValue result_register); +JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, + ShadowFrame& shadow_frame, JValue result_register); template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteGotoImpl<false, true>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame, JValue result_register); +JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, + ShadowFrame& shadow_frame, JValue result_register); #endif -static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame, JValue result_register) +static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, + JValue result_register) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -static inline JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, +static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) { - DCHECK(shadow_frame.GetMethod() == mh.GetMethod() || - shadow_frame.GetMethod()->GetDeclaringClass()->IsProxyClass()); DCHECK(!shadow_frame.GetMethod()->IsAbstract()); DCHECK(!shadow_frame.GetMethod()->IsNative()); shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self); @@ -367,32 +361,32 @@ static inline JValue Execute(Thread* self, MethodHelper& mh, const DexFile::Code // Enter the "without access check" interpreter. if (kInterpreterImplKind == kSwitchImpl) { if (transaction_active) { - return ExecuteSwitchImpl<false, true>(self, mh, code_item, shadow_frame, result_register); + return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register); } else { - return ExecuteSwitchImpl<false, false>(self, mh, code_item, shadow_frame, result_register); + return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register); } } else { DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind); if (transaction_active) { - return ExecuteGotoImpl<false, true>(self, mh, code_item, shadow_frame, result_register); + return ExecuteGotoImpl<false, true>(self, code_item, shadow_frame, result_register); } else { - return ExecuteGotoImpl<false, false>(self, mh, code_item, shadow_frame, result_register); + return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register); } } } else { // Enter the "with access check" interpreter. if (kInterpreterImplKind == kSwitchImpl) { if (transaction_active) { - return ExecuteSwitchImpl<true, true>(self, mh, code_item, shadow_frame, result_register); + return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register); } else { - return ExecuteSwitchImpl<true, false>(self, mh, code_item, shadow_frame, result_register); + return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register); } } else { DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind); if (transaction_active) { - return ExecuteGotoImpl<true, true>(self, mh, code_item, shadow_frame, result_register); + return ExecuteGotoImpl<true, true>(self, code_item, shadow_frame, result_register); } else { - return ExecuteGotoImpl<true, false>(self, mh, code_item, shadow_frame, result_register); + return ExecuteGotoImpl<true, false>(self, code_item, shadow_frame, result_register); } } } @@ -473,9 +467,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive } } if (LIKELY(!method->IsNative())) { - StackHandleScope<1> hs(self); - MethodHelper mh(hs.NewHandle(method)); - JValue r = Execute(self, mh, code_item, *shadow_frame, JValue()); + JValue r = Execute(self, code_item, *shadow_frame, JValue()); if (result != NULL) { *result = r; } @@ -500,10 +492,8 @@ void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JVa value.SetJ(ret_val->GetJ()); // Set value to last known result in case the shadow frame chain is empty. while (shadow_frame != NULL) { self->SetTopOfShadowStack(shadow_frame); - StackHandleScope<1> hs(self); - MethodHelper mh(hs.NewHandle(shadow_frame->GetMethod())); - const DexFile::CodeItem* code_item = mh.GetMethod()->GetCodeItem(); - value = Execute(self, mh, code_item, *shadow_frame, value); + const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem(); + value = Execute(self, code_item, *shadow_frame, value); ShadowFrame* old_frame = shadow_frame; shadow_frame = shadow_frame->GetLink(); delete old_frame; @@ -511,8 +501,7 @@ void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JVa ret_val->SetJ(value.GetJ()); } -JValue EnterInterpreterFromEntryPoint(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, +JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame) { DCHECK_EQ(self, Thread::Current()); bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks(); @@ -521,11 +510,10 @@ JValue EnterInterpreterFromEntryPoint(Thread* self, MethodHelper* mh, return JValue(); } - return Execute(self, *mh, code_item, *shadow_frame, JValue()); + return Execute(self, code_item, *shadow_frame, JValue()); } -extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, +extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) { bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks(); if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) { @@ -534,10 +522,10 @@ extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper* mh } self->PushShadowFrame(shadow_frame); - DCHECK_EQ(shadow_frame->GetMethod(), mh->Get()); // Ensure static methods are initialized. - if (mh->Get()->IsStatic()) { - mirror::Class* declaring_class = mh->Get()->GetDeclaringClass(); + const bool is_static = shadow_frame->GetMethod()->IsStatic(); + if (is_static) { + mirror::Class* declaring_class = shadow_frame->GetMethod()->GetDeclaringClass(); if (UNLIKELY(!declaring_class->IsInitialized())) { StackHandleScope<1> hs(self); HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class)); @@ -551,15 +539,15 @@ extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper* mh } } - if (LIKELY(!mh->Get()->IsNative())) { - result->SetJ(Execute(self, *mh, code_item, *shadow_frame, JValue()).GetJ()); + if (LIKELY(!shadow_frame->GetMethod()->IsNative())) { + result->SetJ(Execute(self, code_item, *shadow_frame, JValue()).GetJ()); } else { // We don't expect to be asked to interpret native code (which is entered via a JNI compiler // generated stub) except during testing and image writing. CHECK(!Runtime::Current()->IsStarted()); - Object* receiver = mh->Get()->IsStatic() ? nullptr : shadow_frame->GetVRegReference(0); - uint32_t* args = shadow_frame->GetVRegArgs(mh->Get()->IsStatic() ? 0 : 1); - UnstartedRuntimeJni(self, mh->Get(), receiver, args, result); + Object* receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0); + uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1); + UnstartedRuntimeJni(self, shadow_frame->GetMethod(), receiver, args, result); } self->PopShadowFrame(); diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h index d327a71a4f..7d634b3d25 100644 --- a/runtime/interpreter/interpreter.h +++ b/runtime/interpreter/interpreter.h @@ -27,7 +27,6 @@ class Object; } // namespace mirror union JValue; -class MethodHelper; class ShadowFrame; class Thread; @@ -42,21 +41,18 @@ extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_fra JValue* ret_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -extern JValue EnterInterpreterFromEntryPoint(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, +extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace interpreter -extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, +extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, +extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index eb80c307f6..5340bee5e6 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -505,14 +505,14 @@ uint32_t FindNextInstructionFollowingException(Thread* self, return found_dex_pc; } -void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh) { - LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(mh.GetMethod()->GetDexFile()); - exit(0); // Unreachable, keep GCC happy. +void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame) { + LOG(FATAL) << "Unexpected instruction: " + << inst->DumpString(shadow_frame.GetMethod()->GetDexFile()); + UNREACHABLE(); } -static void UnstartedRuntimeInvoke(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, - JValue* result, size_t arg_offset) +static void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame. @@ -540,30 +540,39 @@ void AbortTransaction(Thread* self, const char* fmt, ...) { va_end(args); } +static mirror::Class* GetClassFromTypeIdx(mirror::ArtMethod* method, uint16_t type_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* type = method->GetDexCacheResolvedType(type_idx); + if (type == nullptr) { + type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); + CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); + } + return type; +} + template<bool is_range, bool do_assignability_check> -bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, +bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data, JValue* result) { // Compute method information. - const DexFile::CodeItem* code_item = method->GetCodeItem(); + const DexFile::CodeItem* code_item = called_method->GetCodeItem(); const uint16_t num_ins = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data); uint16_t num_regs; if (LIKELY(code_item != NULL)) { num_regs = code_item->registers_size_; DCHECK_EQ(num_ins, code_item->ins_size_); } else { - DCHECK(method->IsNative() || method->IsProxyMethod()); + DCHECK(called_method->IsNative() || called_method->IsProxyMethod()); num_regs = num_ins; } // Allocate shadow frame on the stack. const char* old_cause = self->StartAssertNoThreadSuspension("DoCall"); void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); - ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame, method, 0, memory)); + ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame, called_method, 0, + memory)); // Initialize new shadow frame. const size_t first_dest_reg = num_regs - num_ins; - StackHandleScope<1> hs(self); - MethodHelper mh(hs.NewHandle(method)); if (do_assignability_check) { // Slow path. // We might need to do class loading, which incurs a thread state change to kNative. So @@ -573,11 +582,12 @@ bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, // We need to do runtime check on reference assignment. We need to load the shorty // to get the exact type of each reference argument. - const DexFile::TypeList* params = mh.Get()->GetParameterTypeList(); + const DexFile::TypeList* params = new_shadow_frame->GetMethod()->GetParameterTypeList(); uint32_t shorty_len = 0; - const char* shorty = mh.Get()->GetShorty(&shorty_len); + const char* shorty = new_shadow_frame->GetMethod()->GetShorty(&shorty_len); - // TODO: find a cleaner way to separate non-range and range information without duplicating code. + // TODO: find a cleaner way to separate non-range and range information without duplicating + // code. uint32_t arg[5]; // only used in invoke-XXX. uint32_t vregC; // only used in invoke-XXX-range. if (is_range) { @@ -589,7 +599,7 @@ bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, // Handle receiver apart since it's not part of the shorty. size_t dest_reg = first_dest_reg; size_t arg_offset = 0; - if (!mh.Get()->IsStatic()) { + if (!new_shadow_frame->GetMethod()->IsStatic()) { size_t receiver_reg = is_range ? vregC : arg[0]; new_shadow_frame->SetVRegReference(dest_reg, shadow_frame.GetVRegReference(receiver_reg)); ++dest_reg; @@ -602,7 +612,8 @@ bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, case 'L': { Object* o = shadow_frame.GetVRegReference(src_reg); if (do_assignability_check && o != NULL) { - Class* arg_type = mh.GetClassFromTypeIdx(params->GetTypeItem(shorty_pos).type_idx_); + Class* arg_type = GetClassFromTypeIdx(new_shadow_frame->GetMethod(), + params->GetTypeItem(shorty_pos).type_idx_); if (arg_type == NULL) { CHECK(self->IsExceptionPending()); return false; @@ -613,7 +624,7 @@ bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/VirtualMachineError;", "Invoking %s with bad arg %d, type '%s' not instance of '%s'", - mh.Get()->GetName(), shorty_pos, + new_shadow_frame->GetMethod()->GetName(), shorty_pos, o->GetClass()->GetDescriptor(&temp1), arg_type->GetDescriptor(&temp2)); return false; @@ -650,7 +661,8 @@ bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, uint16_t regList = inst->Fetch16(2); uint16_t count = num_ins; if (count == 5) { - AssignRegister(new_shadow_frame, shadow_frame, first_dest_reg + 4U, (inst_data >> 8) & 0x0f); + AssignRegister(new_shadow_frame, shadow_frame, first_dest_reg + 4U, + (inst_data >> 8) & 0x0f); --count; } for (size_t arg_index = 0; arg_index < count; ++arg_index, regList >>= 4) { @@ -662,17 +674,24 @@ bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, // Do the call now. if (LIKELY(Runtime::Current()->IsStarted())) { - if (kIsDebugBuild && mh.Get()->GetEntryPointFromInterpreter() == nullptr) { - LOG(FATAL) << "Attempt to invoke non-executable method: " << PrettyMethod(mh.Get()); + if (kIsDebugBuild && new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter() == nullptr) { + LOG(FATAL) << "Attempt to invoke non-executable method: " + << PrettyMethod(new_shadow_frame->GetMethod()); + UNREACHABLE(); } if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly() && - !mh.Get()->IsNative() && !mh.Get()->IsProxyMethod() && - mh.Get()->GetEntryPointFromInterpreter() == artInterpreterToCompiledCodeBridge) { - LOG(FATAL) << "Attempt to call compiled code when -Xint: " << PrettyMethod(mh.Get()); + !new_shadow_frame->GetMethod()->IsNative() && + !new_shadow_frame->GetMethod()->IsProxyMethod() && + new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter() + == artInterpreterToCompiledCodeBridge) { + LOG(FATAL) << "Attempt to call compiled code when -Xint: " + << PrettyMethod(new_shadow_frame->GetMethod()); + UNREACHABLE(); } - (mh.Get()->GetEntryPointFromInterpreter())(self, &mh, code_item, new_shadow_frame, result); + (new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter())(self, code_item, + new_shadow_frame, result); } else { - UnstartedRuntimeInvoke(self, &mh, code_item, new_shadow_frame, result, first_dest_reg); + UnstartedRuntimeInvoke(self, code_item, new_shadow_frame, result, first_dest_reg); } return !self->IsExceptionPending(); } @@ -813,8 +832,8 @@ static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> class result->SetL(found); } -static void UnstartedRuntimeInvoke(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, +static void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { // In a runtime that's not started we intercept certain methods to avoid complicated dependency // problems in core libraries. @@ -934,7 +953,7 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper* mh, } } else { // Not special, continue with regular interpreter execution. - artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result); + artInterpreterToInterpreterBridge(self, code_item, shadow_frame, result); } } diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index f88d56a30a..ce7c1c3817 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -33,7 +33,6 @@ #include "entrypoints/entrypoint_utils-inl.h" #include "gc/accounting/card_table-inl.h" #include "handle_scope-inl.h" -#include "method_helper-inl.h" #include "nth_caller_visitor.h" #include "mirror/art_field-inl.h" #include "mirror/art_method.h" @@ -70,13 +69,11 @@ namespace interpreter { // External references to both interpreter implementations. template<bool do_access_check, bool transaction_active> -extern JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +extern JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template<bool do_access_check, bool transaction_active> -extern JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); void ThrowNullPointerExceptionFromInterpreter(const ShadowFrame& shadow_frame) @@ -100,7 +97,7 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) // DoInvokeVirtualQuick functions. // Returns true on success, otherwise throws an exception and returns false. template<bool is_range, bool do_assignability_check> -bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame, +bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data, JValue* result); // Handles invoke-XXX/range instructions. @@ -112,19 +109,20 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c(); Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC); mirror::ArtMethod* sf_method = shadow_frame.GetMethod(); - ArtMethod* const method = FindMethodFromCode<type, do_access_check>( + ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>( method_idx, &receiver, &sf_method, self); // The shadow frame should already be pushed, so we don't need to update it. - if (UNLIKELY(method == nullptr)) { + if (UNLIKELY(called_method == nullptr)) { CHECK(self->IsExceptionPending()); result->SetJ(0); return false; - } else if (UNLIKELY(method->IsAbstract())) { - ThrowAbstractMethodError(method); + } else if (UNLIKELY(called_method->IsAbstract())) { + ThrowAbstractMethodError(called_method); result->SetJ(0); return false; } else { - return DoCall<is_range, do_access_check>(method, self, shadow_frame, inst, inst_data, result); + return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data, + result); } } @@ -144,18 +142,18 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, } const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable()); - ArtMethod* const method = receiver->GetClass()->GetEmbeddedVTableEntry(vtable_idx); - if (UNLIKELY(method == nullptr)) { + ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(vtable_idx); + if (UNLIKELY(called_method == nullptr)) { CHECK(self->IsExceptionPending()); result->SetJ(0); return false; - } else if (UNLIKELY(method->IsAbstract())) { - ThrowAbstractMethodError(method); + } else if (UNLIKELY(called_method->IsAbstract())) { + ThrowAbstractMethodError(called_method); result->SetJ(0); return false; } else { // No need to check since we've been quickened. - return DoCall<is_range, false>(method, self, shadow_frame, inst, inst_data, result); + return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result); } } @@ -351,12 +349,12 @@ uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh) +void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame) __attribute__((cold, noreturn)) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst, - const uint32_t dex_pc, MethodHelper& mh) + const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { constexpr bool kTracing = false; if (kTracing) { @@ -364,7 +362,7 @@ static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruc std::ostringstream oss; oss << PrettyMethod(shadow_frame.GetMethod()) << StringPrintf("\n0x%x: ", dex_pc) - << inst->DumpString(mh.GetMethod()->GetDexFile()) << "\n"; + << inst->DumpString(shadow_frame.GetMethod()->GetDexFile()) << "\n"; for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) { uint32_t raw_value = shadow_frame.GetVReg(i); Object* ref_value = shadow_frame.GetVRegReference(i); diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index 6350c56cf9..c332a7b598 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -26,7 +26,6 @@ namespace interpreter { // - "inst_data" : the current instruction's first 16 bits. // - "dex_pc": the current pc. // - "shadow_frame": the current shadow frame. -// - "mh": the current MethodHelper. // - "currentHandlersTable": the current table of pointer to each instruction handler. // Advance to the next instruction and updates interpreter state. @@ -36,7 +35,7 @@ namespace interpreter { inst = inst->RelativeAt(disp); \ dex_pc = static_cast<uint32_t>(static_cast<int32_t>(dex_pc) + disp); \ shadow_frame.SetDexPC(dex_pc); \ - TraceExecution(shadow_frame, inst, dex_pc, mh); \ + TraceExecution(shadow_frame, inst, dex_pc); \ inst_data = inst->Fetch16(0); \ goto *currentHandlersTable[inst->Opcode(inst_data)]; \ } while (false) @@ -59,6 +58,7 @@ namespace interpreter { do { \ if (kIsDebugBuild) { \ LOG(FATAL) << "We should not be here !"; \ + UNREACHABLE(); \ } \ } while (false) @@ -111,8 +111,8 @@ namespace interpreter { * */ template<bool do_access_check, bool transaction_active> -JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame, JValue result_register) { +JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, + JValue result_register) { // Define handler tables: // - The main handler table contains execution handlers for each instruction. // - The alternative handler table contains prelude handlers which check for thread suspend and @@ -2279,103 +2279,103 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_3E) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_3F) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_40) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_41) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_42) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_43) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_79) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_7A) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_EF) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F0) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F1) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F2) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F3) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F4) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F5) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F6) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F7) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F8) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_F9) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_FA) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_FB) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_FC) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_FD) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_FE) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); HANDLE_INSTRUCTION_START(UNUSED_FF) - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); HANDLE_INSTRUCTION_END(); exception_pending_label: { @@ -2430,21 +2430,17 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* // Explicit definitions of ExecuteGotoImpl. template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR -JValue ExecuteGotoImpl<true, false>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR -JValue ExecuteGotoImpl<false, false>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteGotoImpl<true, true>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame, JValue result_register); +JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, + ShadowFrame& shadow_frame, JValue result_register); template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteGotoImpl<false, true>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame, JValue result_register); +JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, + ShadowFrame& shadow_frame, JValue result_register); } // namespace interpreter } // namespace art diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 1b6f53e6c6..f9bbfa17b9 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -57,7 +57,7 @@ namespace interpreter { } while (false) template<bool do_access_check, bool transaction_active> -JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, +JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) { bool do_assignability_check = do_access_check; if (UNLIKELY(!shadow_frame.HasReferenceArray())) { @@ -82,7 +82,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem while (true) { dex_pc = inst->GetDexPc(insns); shadow_frame.SetDexPC(dex_pc); - TraceExecution(shadow_frame, inst, dex_pc, mh); + TraceExecution(shadow_frame, inst, dex_pc); inst_data = inst->Fetch16(0); switch (inst->Opcode(inst_data)) { case Instruction::NOP: @@ -2140,27 +2140,23 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem case Instruction::UNUSED_EF ... Instruction::UNUSED_FF: case Instruction::UNUSED_79: case Instruction::UNUSED_7A: - UnexpectedOpcode(inst, mh); + UnexpectedOpcode(inst, shadow_frame); } } } // NOLINT(readability/fn_size) // Explicit definitions of ExecuteSwitchImpl. template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR -JValue ExecuteSwitchImpl<true, false>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR -JValue ExecuteSwitchImpl<false, false>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteSwitchImpl<true, true>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -JValue ExecuteSwitchImpl<false, true>(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, +JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); } // namespace interpreter diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index a5abce6ab1..5d04faccb5 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -756,6 +756,11 @@ void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) { } } +void JavaVMExt::TrimGlobals() { + WriterMutexLock mu(Thread::Current(), globals_lock_); + globals_.Trim(); +} + void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) { Thread* self = Thread::Current(); { diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h index 2957ba3fae..749b9fb6c0 100644 --- a/runtime/java_vm_ext.h +++ b/runtime/java_vm_ext.h @@ -131,6 +131,9 @@ class JavaVMExt : public JavaVM { return unchecked_functions_; } + void TrimGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(globals_lock_); + private: Runtime* const runtime_; diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index 1dcfcabf9d..4797e696d8 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -566,7 +566,8 @@ class JNI { return soa.AddLocalReference<jobject>(decoded_obj); } - static void DeleteLocalRef(JNIEnv* env, jobject obj) { + static void DeleteLocalRef(JNIEnv* env, jobject obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (obj == nullptr) { return; } diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h index 22d55e2a39..85ef4e6575 100644 --- a/runtime/mirror/art_method-inl.h +++ b/runtime/mirror/art_method-inl.h @@ -24,7 +24,6 @@ #include "class_linker.h" #include "dex_cache.h" #include "dex_file.h" -#include "method_helper.h" #include "object-inl.h" #include "object_array.h" #include "oat.h" diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h index da494e0ec9..0466fe3857 100644 --- a/runtime/mirror/art_method.h +++ b/runtime/mirror/art_method.h @@ -32,15 +32,14 @@ namespace art { struct ArtMethodOffsets; struct ConstructorMethodOffsets; union JValue; -class MethodHelper; class ScopedObjectAccessAlreadyRunnable; class StringPiece; class ShadowFrame; namespace mirror { -typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper* mh, - const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result); +typedef void (EntryPointFromInterpreter)(Thread* self, const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); #define ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 599f178cbf..1662ebfe82 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -575,6 +575,10 @@ inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) { allocator_type, VoidFunctor()); if (add_finalizer && LIKELY(obj != nullptr)) { heap->AddFinalizerReference(self, &obj); + if (UNLIKELY(self->IsExceptionPending())) { + // Failed to allocate finalizer reference, it means that the whole allocation failed. + obj = nullptr; + } } return obj; } diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 566505911b..bd3bfbf9fe 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -149,6 +149,7 @@ void Class::SetStatus(Status new_status, Thread* self) { void Class::SetDexCache(DexCache* new_dex_cache) { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache); + SetDexCacheStrings(new_dex_cache != nullptr ? new_dex_cache->GetStrings() : nullptr); } void Class::SetClassSize(uint32_t new_class_size) { diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index a77972efc5..812cfd30b1 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -29,6 +29,10 @@ #include "read_barrier_option.h" #include "utils.h" +#ifndef IMT_SIZE +#error IMT_SIZE not defined +#endif + namespace art { struct ClassOffsets; @@ -58,7 +62,7 @@ class MANAGED Class FINAL : public Object { // Interface method table size. Increasing this value reduces the chance of two interface methods // colliding in the interface method table but increases the size of classes that implement // (non-marker) interfaces. - static constexpr size_t kImtSize = 64; + static constexpr size_t kImtSize = IMT_SIZE; // imtable entry embedded in class object. struct MANAGED ImTableEntry { @@ -654,6 +658,7 @@ class MANAGED Class FINAL : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Also updates the dex_cache_strings_ variable from new_dex_cache. void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE ObjectArray<ArtMethod>* GetDirectMethods() diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc index ffadfc61a7..1775468688 100644 --- a/runtime/native_bridge_art_interface.cc +++ b/runtime/native_bridge_art_interface.cc @@ -28,10 +28,8 @@ namespace art { static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) { ScopedObjectAccess soa(env); - StackHandleScope<1> scope(soa.Self()); mirror::ArtMethod* m = soa.DecodeMethod(mid); - MethodHelper mh(scope.NewHandle(m)); - return mh.GetShorty(); + return m->GetShorty(); } static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) { diff --git a/runtime/reflection.cc b/runtime/reflection.cc index 44d1bc4ad1..07afcb660a 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -528,7 +528,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab } void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset, - MethodHelper* mh, JValue* result) { + JValue* result) { // We want to make sure that the stack is not within a small distance from the // protected region in case we are calling into a leaf function whose stack // check has been elided. @@ -536,11 +536,12 @@ void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg ThrowStackOverflowError(self); return; } - - ArgArray arg_array(mh->GetShorty(), mh->GetShortyLength()); + uint32_t shorty_len; + const char* shorty = shadow_frame->GetMethod()->GetShorty(&shorty_len); + ArgArray arg_array(shorty, shorty_len); arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset); shadow_frame->GetMethod()->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, - mh->GetShorty()); + shorty); } jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaMethod, diff --git a/runtime/reflection.h b/runtime/reflection.h index f9a795194d..1764774c22 100644 --- a/runtime/reflection.h +++ b/runtime/reflection.h @@ -65,7 +65,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset, - MethodHelper* mh, JValue* result) + JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver, diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 5ff90d6392..beafcda8f2 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -168,7 +168,9 @@ class DumpCheckpoint FINAL : public Closure { const uint32_t kWaitTimeoutMs = 10000; bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kWaitTimeoutMs); if (timed_out) { - LOG(kIsDebugBuild ? FATAL : ERROR) << "Unexpected time out during dump checkpoint."; + // Avoid a recursive abort. + LOG((kIsDebugBuild && (gAborting == 0)) ? FATAL : ERROR) + << "Unexpected time out during dump checkpoint."; } } diff --git a/runtime/trace.cc b/runtime/trace.cc index 2cc50b3732..b5108443b0 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -735,7 +735,9 @@ void Trace::StoreExitingThreadInfo(Thread* thread) { if (the_trace_ != nullptr) { std::string name; thread->GetThreadName(name); - the_trace_->exited_threads_.Put(thread->GetTid(), name); + // The same thread/tid may be used multiple times. As SafeMap::Put does not allow to override + // a previous mapping, use SafeMap::Overwrite. + the_trace_->exited_threads_.Overwrite(thread->GetTid(), name); } } diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index a10c7cb895..f9098c769b 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -30,7 +30,6 @@ #include "indenter.h" #include "intern_table.h" #include "leb128.h" -#include "method_helper-inl.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class.h" |