From e872656585952f993eb84633a66e0aedcbdf52ac Mon Sep 17 00:00:00 2001 From: Nicolas Geoffray Date: Wed, 7 Feb 2024 11:53:09 +0000 Subject: Only compile optimized if it is useful. If profiling doesn't benefit the method, switch a baseline compilation into optimized. Reduces the number of JIT compilations on the Sheets benchmark from ~3100 (2250 baseline, 850 optimized) to ~2750 (2250 baseline, 500 optimized). Test: test.py Change-Id: I94760481d130d2dc168152daa94429baf201f66e --- compiler/optimizing/optimizing_compiler.cc | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'compiler/optimizing/optimizing_compiler.cc') diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index a1c4130bc1..65e8e51712 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -905,6 +905,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, } if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) { + graph->SetUsefulOptimizing(); // Branch profiling currently doesn't support running optimizations. RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer); } else { @@ -917,6 +918,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, // this method already, do it now. if (jit != nullptr && compilation_kind == CompilationKind::kBaseline && + graph->IsUsefulOptimizing() && graph->GetProfilingInfo() == nullptr) { ProfilingInfoBuilder( graph, codegen->GetCompilerOptions(), codegen.get(), compilation_stats_.get()).Run(); @@ -1448,6 +1450,11 @@ bool OptimizingCompiler::JitCompile(Thread* self, debug_info = GenerateJitDebugInfo(info); } + if (compilation_kind == CompilationKind::kBaseline && + !codegen->GetGraph()->IsUsefulOptimizing()) { + compilation_kind = CompilationKind::kOptimized; + } + if (!code_cache->Commit(self, region, method, -- cgit v1.2.3-59-g8ed1b