diff options
author | 2023-12-13 17:27:59 +0000 | |
---|---|---|
committer | 2023-12-14 16:45:27 +0000 | |
commit | d4faa43efb17e75fc99930f798103b29553a6a5e (patch) | |
tree | 6aa3c6728a7a7687fb5953e8def0954bb1156811 /compiler/optimizing/optimizing_compiler.cc | |
parent | d0a15c3de2867b2f566831307da1cd51b5957a62 (diff) |
Move the construction of ProfilingInfo in the compiler.
This reduces the number of inline caches when the compiler can
statically determine the target is fixed.
The removal of some inline cache profiling also improves performance of
compose scrolling. Jank data for 20 seconds, average of 50
runs:
- For Go Mokey:
- Before: ~485 frames drawn / ~17.02% janky frames
- After: ~525 frames drawn / ~14.64% janky frames
- For Pixel 8 pro:
- Before: ~2433 frames drawn / 1.02% janky frames
- After: ~2443 frames drawn / 0.91% janky frames
Test: test.py
Change-Id: Ide4fab058d55b65b66dcf10e835f05877e71b7fc
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 0069a20a26..2886e731b5 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -53,6 +53,7 @@ #include "oat_quick_method_header.h" #include "optimizing/write_barrier_elimination.h" #include "prepare_for_register_allocation.h" +#include "profiling_info_builder.h" #include "reference_type_propagation.h" #include "register_allocator_linear_scan.h" #include "select_generator.h" @@ -835,8 +836,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr) { ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current()); - DCHECK_IMPLIES(compilation_kind == CompilationKind::kBaseline, info != nullptr) - << "Compiling a method baseline should always have a ProfilingInfo"; graph->SetProfilingInfo(info); } @@ -920,6 +919,20 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, &pass_observer, regalloc_strategy, compilation_stats_.get()); + // If we are compiling baseline and we haven't created a profiling info for + // this method already, do it now. + if (jit != nullptr && + compilation_kind == CompilationKind::kBaseline && + graph->GetProfilingInfo() == nullptr) { + ProfilingInfoBuilder(graph, codegen->GetCompilerOptions(), compilation_stats_.get()).Run(); + // We expect a profiling info to be created and attached to the graph. + // However, we may have run out of memory trying to create it, so in this + // case just abort the compilation. + if (graph->GetProfilingInfo() == nullptr) { + MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); + return nullptr; + } + } codegen->Compile(); pass_observer.DumpDisassembly(); |