summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2024-02-09 11:30:36 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2024-02-13 17:57:29 +0000
commit0d5dbf73461556afcd6e1a7e1109199d0fc16887 (patch)
treed5904ca2d1cb0682efe283f3219495c2a10361ed /compiler
parent008db94e90e07bc4d262c80ef2975681299b32fa (diff)
Reland "Only compile optimized if it is useful."
This reverts commit 35a1479ab434257e9db629fda5f4ca96bfbef3fc. Reason for revert: Disable failing test on debuggable Change-Id: Icd012ac9e3b37c1187adf5e915ba7c1ffc415805
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc4
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/inliner.cc14
-rw-r--r--compiler/optimizing/nodes.h8
-rw-r--r--compiler/optimizing/optimizing_compiler.cc7
-rw-r--r--compiler/optimizing/profiling_info_builder.cc7
9 files changed, 48 insertions, 10 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5ba26b4754..e22b24ef2f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1306,7 +1306,9 @@ void CodeGeneratorARM64::MaybeIncrementHotness(HSuspendCheck* suspend_check, boo
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 85f61f5303..75fae4e859 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2302,7 +2302,9 @@ void CodeGeneratorARMVIXL::MaybeIncrementHotness(HSuspendCheck* suspend_check,
}
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index ed57683e0a..93bd35b618 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -5763,7 +5763,9 @@ void CodeGeneratorRISCV64::MaybeIncrementHotness(HSuspendCheck* suspend_check,
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a61dca3022..21d3492e8a 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1357,9 +1357,9 @@ void CodeGeneratorX86::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool
}
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
- // Note the slow path doesn't save SIMD registers, so if we were to
- // call it on loop back edge, we would need to fix this.
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
uint32_t address = reinterpret_cast32<uint32_t>(info) +
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index db4062b00d..af6c6255e5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1788,7 +1788,9 @@ void CodeGeneratorX86_64::MaybeIncrementHotness(HSuspendCheck* suspend_check, bo
__ Bind(&overflow);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
CHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index fd3e787fc8..d7ca17b646 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -541,6 +541,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
<< " statically resolve the target";
// For baseline compilation, we will collect inline caches, so we should not
// try to inline using them.
+ outermost_graph_->SetUsefulOptimizing();
return false;
}
@@ -1552,9 +1553,7 @@ bool HInliner::IsInliningEncouraged(const HInvoke* invoke_instruction,
return false;
}
- size_t inline_max_code_units = graph_->IsCompilingBaseline()
- ? CompilerOptions::kBaselineInlineMaxCodeUnits
- : codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
<< "Method " << method->PrettyMethod()
@@ -1565,6 +1564,14 @@ bool HInliner::IsInliningEncouraged(const HInvoke* invoke_instruction,
return false;
}
+ if (graph_->IsCompilingBaseline() &&
+ accessor.InsnsSizeInCodeUnits() > CompilerOptions::kBaselineInlineMaxCodeUnits) {
+ LOG_FAIL_NO_STAT() << "Reached baseline maximum code unit for inlining "
+ << method->PrettyMethod();
+ outermost_graph_->SetUsefulOptimizing();
+ return false;
+ }
+
if (invoke_instruction->GetBlock()->GetLastInstruction()->IsThrow()) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedEndsWithThrow)
<< "Method " << method->PrettyMethod()
@@ -2129,6 +2136,7 @@ bool HInliner::CanInlineBody(const HGraph* callee_graph,
if (depth_ + 1 > maximum_inlining_depth_for_baseline) {
LOG_FAIL_NO_STAT() << "Reached maximum depth for inlining in baseline compilation: "
<< depth_ << " for " << callee_graph->GetArtMethod()->PrettyMethod();
+ outermost_graph_->SetUsefulOptimizing();
return false;
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9c96937ff7..4d6b909629 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -425,6 +425,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
cached_current_method_(nullptr),
art_method_(nullptr),
compilation_kind_(compilation_kind),
+ useful_optimizing_(false),
cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
@@ -742,6 +743,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
void SetNumberOfCHAGuards(uint32_t num) { number_of_cha_guards_ = num; }
void IncrementNumberOfCHAGuards() { number_of_cha_guards_++; }
+ void SetUsefulOptimizing() { useful_optimizing_ = true; }
+ bool IsUsefulOptimizing() const { return useful_optimizing_; }
+
private:
void RemoveDeadBlocksInstructionsAsUsersAndDisconnect(const ArenaBitVector& visited) const;
void RemoveDeadBlocks(const ArenaBitVector& visited);
@@ -897,6 +901,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// directly jump to.
const CompilationKind compilation_kind_;
+ // Whether after compiling baseline it is still useful re-optimizing this
+ // method.
+ bool useful_optimizing_;
+
// List of methods that are assumed to have single implementation.
ArenaSet<ArtMethod*> cha_single_implementation_list_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a1c4130bc1..65e8e51712 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -905,6 +905,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
+ graph->SetUsefulOptimizing();
// Branch profiling currently doesn't support running optimizations.
RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
} else {
@@ -917,6 +918,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
// this method already, do it now.
if (jit != nullptr &&
compilation_kind == CompilationKind::kBaseline &&
+ graph->IsUsefulOptimizing() &&
graph->GetProfilingInfo() == nullptr) {
ProfilingInfoBuilder(
graph, codegen->GetCompilerOptions(), codegen.get(), compilation_stats_.get()).Run();
@@ -1448,6 +1450,11 @@ bool OptimizingCompiler::JitCompile(Thread* self,
debug_info = GenerateJitDebugInfo(info);
}
+ if (compilation_kind == CompilationKind::kBaseline &&
+ !codegen->GetGraph()->IsUsefulOptimizing()) {
+ compilation_kind = CompilationKind::kOptimized;
+ }
+
if (!code_cache->Commit(self,
region,
method,
diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc
index 905d8d0f2f..7faf2bf5be 100644
--- a/compiler/optimizing/profiling_info_builder.cc
+++ b/compiler/optimizing/profiling_info_builder.cc
@@ -28,6 +28,7 @@
namespace art HIDDEN {
void ProfilingInfoBuilder::Run() {
+ DCHECK(GetGraph()->IsUsefulOptimizing());
DCHECK_EQ(GetGraph()->GetProfilingInfo(), nullptr);
// Order does not matter.
for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
@@ -119,6 +120,12 @@ bool ProfilingInfoBuilder::IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* c
return false;
}
}
+
+ if (!codegen->GetGraph()->IsUsefulOptimizing()) {
+ // Earlier pass knew what the calling target was. No need for an inline
+ // cache.
+ return false;
+ }
return true;
}