summaryrefslogtreecommitdiff
path: root/compiler/optimizing/optimizing_compiler.cc
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2023-12-04 12:05:00 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2024-01-03 14:48:12 +0000
commit41c5dde40d1c75d36a7f984c8d72ec65fbff3111 (patch)
tree7fda2cbd5c320582b3f7b65a394d33bf9d7fed58 /compiler/optimizing/optimizing_compiler.cc
parentc4aff3d48d3040b9b5a42b9f807e7d2671e138c0 (diff)
Run optimizations with baseline compilation.
And introduce inlined inline caches, which customize an inline cache for the top-level method being compiled. Reduces jank on compose view scrolling for 20 seconds: - For Go Mokey: - Before: ~525 frames drawn / ~14.64% janky frames - After: ~891 frames drawn / ~4.74% janky frames - For Pixel 8 pro: - Before: ~2443 frames drawn / ~0.91% janky frames - After: ~2447 frames drawn / ~0.65% janky frames Bug: 313040662 Test: test.py Change-Id: Ibaa746c6bd3c665b18ec9cd29cb477cf21023467
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r--compiler/optimizing/optimizing_compiler.cc36
1 files changed, 19 insertions, 17 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d458462226..5868f22ab8 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -368,10 +368,10 @@ class OptimizingCompiler final : public Compiler {
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer) const;
- bool RunBaselineOptimizations(HGraph* graph,
- CodeGenerator* codegen,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer) const;
+ bool RunRequiredPasses(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer) const;
std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
@@ -444,10 +444,10 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
instruction_set == InstructionSet::kX86_64;
}
-bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
- CodeGenerator* codegen,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer) const {
+bool OptimizingCompiler::RunRequiredPasses(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer) const {
switch (codegen->GetCompilerOptions().GetInstructionSet()) {
#if defined(ART_ENABLE_CODEGEN_arm)
case InstructionSet::kThumb2:
@@ -904,21 +904,15 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
}
- if (compilation_kind == CompilationKind::kBaseline) {
- RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
+ if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
+ // Branch profiling currently doesn't support running optimizations.
+ RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
} else {
RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer);
WriteBarrierElimination(graph, compilation_stats_.get()).Run();
}
- RegisterAllocator::Strategy regalloc_strategy =
- compiler_options.GetRegisterAllocationStrategy();
- AllocateRegisters(graph,
- codegen.get(),
- &pass_observer,
- regalloc_strategy,
- compilation_stats_.get());
// If we are compiling baseline and we haven't created a profiling info for
// this method already, do it now.
if (jit != nullptr &&
@@ -935,6 +929,14 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
}
+ RegisterAllocator::Strategy regalloc_strategy =
+ compiler_options.GetRegisterAllocationStrategy();
+ AllocateRegisters(graph,
+ codegen.get(),
+ &pass_observer,
+ regalloc_strategy,
+ compilation_stats_.get());
+
codegen->Compile();
pass_observer.DumpDisassembly();