diff options
author | 2024-01-29 14:24:31 +0000 | |
---|---|---|
committer | 2024-01-30 17:22:07 +0000 | |
commit | 33e9f1a70d5f58639b524f40bf39a8e233c04ba8 (patch) | |
tree | f78d2949cac297400c0854ef57a4190f77d28e1b /compiler/optimizing/optimizing_compiler.cc | |
parent | 516020a3fbfe3db43f7faf0ac3daf5a45dbeeb6b (diff) |
Reland^2 "Run optimizations with baseline compilation."
This reverts commit 3dccb13f4e92db37a13359e126c5ddc12cb674b5.
Also includes the fix for incrementing hotness that got reverted:
aosp/2906378
Bug: 313040662
Reduces jank on compose view scrolling for 4 iterations:
- For Go Mokey:
- Before: ~698 frames drawn / ~13.87% janky frames
- After: ~937 frames drawn / ~5.52% janky frames
- For Pixel 8 pro:
- Before: ~2440 frames drawn / ~0.90% janky frames
- After: ~2450 frames drawn / ~0.55% janky frames
Reason for revert: Reduce inlining threshold for baseline.
Change-Id: Iee5cd4c3ceb7715caf9299b56551aae6f0259769
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 2f795e6e70..70d9013f7d 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -368,10 +368,10 @@ class OptimizingCompiler final : public Compiler { const DexCompilationUnit& dex_compilation_unit, PassObserver* pass_observer) const; - bool RunBaselineOptimizations(HGraph* graph, - CodeGenerator* codegen, - const DexCompilationUnit& dex_compilation_unit, - PassObserver* pass_observer) const; + bool RunRequiredPasses(HGraph* graph, + CodeGenerator* codegen, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer) const; std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info); @@ -444,10 +444,10 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) { instruction_set == InstructionSet::kX86_64; } -bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph, - CodeGenerator* codegen, - const DexCompilationUnit& dex_compilation_unit, - PassObserver* pass_observer) const { +bool OptimizingCompiler::RunRequiredPasses(HGraph* graph, + CodeGenerator* codegen, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer) const { switch (codegen->GetCompilerOptions().GetInstructionSet()) { #if defined(ART_ENABLE_CODEGEN_arm) case InstructionSet::kThumb2: @@ -904,21 +904,15 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, } } - if (compilation_kind == CompilationKind::kBaseline) { - RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer); + if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) { + // Branch profiling currently doesn't support running optimizations. + RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer); } else { RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer); PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer); WriteBarrierElimination(graph, compilation_stats_.get()).Run(); } - RegisterAllocator::Strategy regalloc_strategy = - compiler_options.GetRegisterAllocationStrategy(); - AllocateRegisters(graph, - codegen.get(), - &pass_observer, - regalloc_strategy, - compilation_stats_.get()); // If we are compiling baseline and we haven't created a profiling info for // this method already, do it now. if (jit != nullptr && @@ -935,6 +929,14 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, } } + RegisterAllocator::Strategy regalloc_strategy = + compiler_options.GetRegisterAllocationStrategy(); + AllocateRegisters(graph, + codegen.get(), + &pass_observer, + regalloc_strategy, + compilation_stats_.get()); + codegen->Compile(); pass_observer.DumpDisassembly(); |