diff options
author | 2020-07-31 16:07:17 +0100 | |
---|---|---|
committer | 2020-08-03 09:58:44 +0000 | |
commit | 1a277a6e5d5152b4fe4dd5717432ecf8941ec820 (patch) | |
tree | 1b7846a0ef94062ce9883a36104f32f829f0a735 | |
parent | 89992b8bbdf2d6d3c3da6a029e184c5d59eb505c (diff) |
Move mterp and switch interpreter to tiered JIT.
This is an intermediate step to move ProfilingInfo usage only within the
baseline compiler.
It also makes the system consistent with all intepreters now going to
baseline compilation before optimized.
Bug: 112676029
Test: test.py
Change-Id: I8505ca46ede9095683ac3f5f86f0c70335bed633
-rw-r--r-- | runtime/instrumentation.cc | 10 | ||||
-rw-r--r-- | runtime/interpreter/interpreter_common.cc | 4 | ||||
-rw-r--r-- | runtime/interpreter/interpreter_common.h | 4 | ||||
-rw-r--r-- | runtime/jit/jit.cc | 60 | ||||
-rw-r--r-- | runtime/jit/jit.h | 12 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 79 |
6 files changed, 19 insertions, 150 deletions
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 23355b1ce8..1838984644 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -928,16 +928,6 @@ void Instrumentation::UpdateMethodsCodeImpl(ArtMethod* method, const void* quick // Proxy.<init> correctly in all cases. method != jni::DecodeArtMethod(WellKnownClasses::java_lang_reflect_Proxy_init)) { new_quick_code = GetQuickInstrumentationEntryPoint(); - if (!method->IsNative() && Runtime::Current()->GetJit() != nullptr) { - // Native methods use trampoline entrypoints during interpreter tracing. - DCHECK(!Runtime::Current()->GetJit()->GetCodeCache()->GetGarbageCollectCodeUnsafe()); - ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); - // Tracing will look at the saved entry point in the profiling info to know the actual - // entrypoint, so we store it here. - if (profiling_info != nullptr) { - profiling_info->SetSavedEntryPoint(quick_code); - } - } } else { new_quick_code = quick_code; } diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index a3f2e3ef02..4e88eda162 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -85,10 +85,6 @@ bool UseFastInterpreterToInterpreterInvoke(ArtMethod* method) { if (method->IsStatic() && !method->GetDeclaringClass()->IsVisiblyInitialized()) { return false; } - ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); - if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) { - return false; - } return true; } diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 6c272f313f..ad6ceae1cc 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -299,10 +299,6 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self, } jit::Jit* jit = Runtime::Current()->GetJit(); - if (jit != nullptr && (type == kVirtual || type == kInterface)) { - jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method); - } - if (is_mterp && !is_range && called_method->IsIntrinsic()) { if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data, shadow_frame.GetResultRegister())) { diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index e2df149b75..8899fa4db6 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -1534,45 +1534,14 @@ bool Jit::MaybeCompileMethod(Thread* self, DCHECK_GE(PriorityThreadWeight(), 1); DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold()); - if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) { - // Note: Native method have no "warm" state or profiling info. - if (!method->IsNative() && - (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) && - code_cache_->CanAllocateProfilingInfo() && - !options_->UseTieredJitCompilation()) { - bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false); - if (success) { - VLOG(jit) << "Start profiling " << method->PrettyMethod(); - } - - if (thread_pool_ == nullptr) { - // Calling ProfilingInfo::Create might put us in a suspended state, which could - // lead to the thread pool being deleted when we are shutting down. - return false; - } - - if (!success) { - // We failed allocating. Instead of doing the collection on the Java thread, we push - // an allocation to a compiler thread, that will do the collection. - thread_pool_->AddTask( - self, - new JitCompileTask(method, - JitCompileTask::TaskKind::kAllocateProfile, - CompilationKind::kOptimized)); // Arbitrary compilation kind. - } - } - } if (UseJitCompilation()) { if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) { if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { DCHECK(thread_pool_ != nullptr); - CompilationKind compilation_kind = - (options_->UseTieredJitCompilation() || options_->UseBaselineCompiler()) - ? CompilationKind::kBaseline - : CompilationKind::kOptimized; thread_pool_->AddTask( self, - new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, compilation_kind)); + new JitCompileTask( + method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline)); } } if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) { @@ -1643,30 +1612,7 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) { return; } - ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); - // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it - // instead of interpreting the method. We don't update it for instrumentation as the entrypoint - // must remain the instrumentation entrypoint. - if ((profiling_info != nullptr) && - (profiling_info->GetSavedEntryPoint() != nullptr) && - (method->GetEntryPointFromQuickCompiledCode() != GetQuickInstrumentationEntryPoint())) { - Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( - method, profiling_info->GetSavedEntryPoint()); - } else { - AddSamples(thread, method, 1, /* with_backedges= */false); - } -} - -void Jit::InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object, - ArtMethod* caller, - uint32_t dex_pc, - ArtMethod* callee ATTRIBUTE_UNUSED) { - ScopedAssertNoThreadSuspension ants(__FUNCTION__); - DCHECK(this_object != nullptr); - ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize); - if (info != nullptr) { - info->AddInvokeInfo(dex_pc, this_object->GetClass()); - } + AddSamples(thread, method, 1, /* with_backedges= */false); } void Jit::WaitForCompilationToFinish(Thread* self) { diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 2af58e2187..5aa53ff8fe 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -121,12 +121,6 @@ class JitOptions { return use_tiered_jit_compilation_; } - bool CanCompileBaseline() const { - return use_tiered_jit_compilation_ || - use_baseline_compiler_ || - interpreter::IsNterpSupported(); - } - void SetUseJitCompilation(bool b) { use_jit_compilation_ = b; } @@ -312,12 +306,6 @@ class Jit { bool with_backedges) REQUIRES_SHARED(Locks::mutator_lock_); - void InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object, - ArtMethod* caller, - uint32_t dex_pc, - ArtMethod* callee) - REQUIRES_SHARED(Locks::mutator_lock_); - void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller) REQUIRES_SHARED(Locks::mutator_lock_) { if (!IgnoreSamplesForMethod(caller)) { diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 59f1576e7c..a1a96d2ff8 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -1191,25 +1191,8 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { // Start polling the liveness of compiled code to prepare for the next full collection. if (next_collection_will_be_full) { - if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) { - for (ProfilingInfo* info : profiling_infos_) { - info->SetBaselineHotnessCount(0); - } - } else { - // Save the entry point of methods we have compiled, and update the entry - // point of those methods to the interpreter. If the method is invoked, the - // interpreter will update its entry point to the compiled code and call it. - for (ProfilingInfo* info : profiling_infos_) { - const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); - if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) { - info->SetSavedEntryPoint(entry_point); - // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring - // class of the method. We may be concurrently running a GC which makes accessing - // the class unsafe. We know it is OK to bypass the instrumentation as we've just - // checked that the current entry point is JIT compiled code. - info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); - } - } + for (ProfilingInfo* info : profiling_infos_) { + info->SetBaselineHotnessCount(0); } // Change entry points of native methods back to the GenericJNI entrypoint. @@ -1349,54 +1332,24 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { { MutexLock mu(self, *Locks::jit_lock_); - if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) { - // Update to interpreter the methods that have baseline entrypoints and whose baseline - // hotness count is zero. - // Note that these methods may be in thread stack or concurrently revived - // between. That's OK, as the thread executing it will mark it. - for (ProfilingInfo* info : profiling_infos_) { - if (info->GetBaselineHotnessCount() == 0) { - const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); - if (ContainsPc(entry_point)) { - OatQuickMethodHeader* method_header = - OatQuickMethodHeader::FromEntryPoint(entry_point); - if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) { - info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); - } - } - } - } - // TODO: collect profiling info - // TODO: collect optimized code? - } else { - if (collect_profiling_info) { - // Clear the profiling info of methods that do not have compiled code as entrypoint. - // Also remove the saved entry point from the ProfilingInfo objects. - for (ProfilingInfo* info : profiling_infos_) { - const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); - if (!ContainsPc(ptr) && - !IsMethodBeingCompiled(info->GetMethod()) && - !info->IsInUseByCompiler() && - !IsInZygoteDataSpace(info)) { - info->GetMethod()->SetProfilingInfo(nullptr); - } - - if (info->GetSavedEntryPoint() != nullptr) { - info->SetSavedEntryPoint(nullptr); - // We are going to move this method back to interpreter. Clear the counter now to - // give it a chance to be hot again. - ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true); + // Update to interpreter the methods that have baseline entrypoints and whose baseline + // hotness count is zero. + // Note that these methods may be in thread stack or concurrently revived + // between. That's OK, as the thread executing it will mark it. + for (ProfilingInfo* info : profiling_infos_) { + if (info->GetBaselineHotnessCount() == 0) { + const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); + if (ContainsPc(entry_point)) { + OatQuickMethodHeader* method_header = + OatQuickMethodHeader::FromEntryPoint(entry_point); + if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) { + info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); } } - } else if (kIsDebugBuild) { - // Check that the profiling infos do not have a dangling entry point. - for (ProfilingInfo* info : profiling_infos_) { - DCHECK(!Runtime::Current()->IsZygote()); - const void* entry_point = info->GetSavedEntryPoint(); - DCHECK(entry_point == nullptr || IsInZygoteExecSpace(entry_point)); - } } } + // TODO: collect profiling info + // TODO: collect optimized code // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not // an entry point is either: |