summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2024-02-08 21:20:51 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2024-02-09 09:16:58 +0000
commit35a1479ab434257e9db629fda5f4ca96bfbef3fc (patch)
tree3993d38d279623e86deae3d5b288b3b5c7c5288a /compiler
parentced2fc97ec95f2924059544f13d6ff353973d552 (diff)
Revert "Only compile optimized if it is useful."
This reverts commit e872656585952f993eb84633a66e0aedcbdf52ac. Reason for revert: Test failures Change-Id: I05aadb695b87f661063ff87f63eb68048d16e050
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc4
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/inliner.cc14
-rw-r--r--compiler/optimizing/nodes.h8
-rw-r--r--compiler/optimizing/optimizing_compiler.cc7
-rw-r--r--compiler/optimizing/profiling_info_builder.cc7
9 files changed, 10 insertions, 48 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e22b24ef2f..5ba26b4754 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1306,9 +1306,7 @@ void CodeGeneratorARM64::MaybeIncrementHotness(HSuspendCheck* suspend_check, boo
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() &&
- GetGraph()->IsUsefulOptimizing() &&
- !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 75fae4e859..85f61f5303 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2302,9 +2302,7 @@ void CodeGeneratorARMVIXL::MaybeIncrementHotness(HSuspendCheck* suspend_check,
}
}
- if (GetGraph()->IsCompilingBaseline() &&
- GetGraph()->IsUsefulOptimizing() &&
- !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 93bd35b618..ed57683e0a 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -5763,9 +5763,7 @@ void CodeGeneratorRISCV64::MaybeIncrementHotness(HSuspendCheck* suspend_check,
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() &&
- GetGraph()->IsUsefulOptimizing() &&
- !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 21d3492e8a..a61dca3022 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1357,9 +1357,9 @@ void CodeGeneratorX86::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool
}
}
- if (GetGraph()->IsCompilingBaseline() &&
- GetGraph()->IsUsefulOptimizing() &&
- !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ // Note the slow path doesn't save SIMD registers, so if we were to
+ // call it on loop back edge, we would need to fix this.
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
uint32_t address = reinterpret_cast32<uint32_t>(info) +
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index af6c6255e5..db4062b00d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1788,9 +1788,7 @@ void CodeGeneratorX86_64::MaybeIncrementHotness(HSuspendCheck* suspend_check, bo
__ Bind(&overflow);
}
- if (GetGraph()->IsCompilingBaseline() &&
- GetGraph()->IsUsefulOptimizing() &&
- !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
CHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d7ca17b646..fd3e787fc8 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -541,7 +541,6 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
<< " statically resolve the target";
// For baseline compilation, we will collect inline caches, so we should not
// try to inline using them.
- outermost_graph_->SetUsefulOptimizing();
return false;
}
@@ -1553,7 +1552,9 @@ bool HInliner::IsInliningEncouraged(const HInvoke* invoke_instruction,
return false;
}
- size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ size_t inline_max_code_units = graph_->IsCompilingBaseline()
+ ? CompilerOptions::kBaselineInlineMaxCodeUnits
+ : codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
<< "Method " << method->PrettyMethod()
@@ -1564,14 +1565,6 @@ bool HInliner::IsInliningEncouraged(const HInvoke* invoke_instruction,
return false;
}
- if (graph_->IsCompilingBaseline() &&
- accessor.InsnsSizeInCodeUnits() > CompilerOptions::kBaselineInlineMaxCodeUnits) {
- LOG_FAIL_NO_STAT() << "Reached baseline maximum code unit for inlining "
- << method->PrettyMethod();
- outermost_graph_->SetUsefulOptimizing();
- return false;
- }
-
if (invoke_instruction->GetBlock()->GetLastInstruction()->IsThrow()) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedEndsWithThrow)
<< "Method " << method->PrettyMethod()
@@ -2136,7 +2129,6 @@ bool HInliner::CanInlineBody(const HGraph* callee_graph,
if (depth_ + 1 > maximum_inlining_depth_for_baseline) {
LOG_FAIL_NO_STAT() << "Reached maximum depth for inlining in baseline compilation: "
<< depth_ << " for " << callee_graph->GetArtMethod()->PrettyMethod();
- outermost_graph_->SetUsefulOptimizing();
return false;
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4d6b909629..9c96937ff7 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -425,7 +425,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
cached_current_method_(nullptr),
art_method_(nullptr),
compilation_kind_(compilation_kind),
- useful_optimizing_(false),
cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
@@ -743,9 +742,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
void SetNumberOfCHAGuards(uint32_t num) { number_of_cha_guards_ = num; }
void IncrementNumberOfCHAGuards() { number_of_cha_guards_++; }
- void SetUsefulOptimizing() { useful_optimizing_ = true; }
- bool IsUsefulOptimizing() const { return useful_optimizing_; }
-
private:
void RemoveDeadBlocksInstructionsAsUsersAndDisconnect(const ArenaBitVector& visited) const;
void RemoveDeadBlocks(const ArenaBitVector& visited);
@@ -901,10 +897,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// directly jump to.
const CompilationKind compilation_kind_;
- // Whether after compiling baseline it is still useful re-optimizing this
- // method.
- bool useful_optimizing_;
-
// List of methods that are assumed to have single implementation.
ArenaSet<ArtMethod*> cha_single_implementation_list_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 65e8e51712..a1c4130bc1 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -905,7 +905,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
- graph->SetUsefulOptimizing();
// Branch profiling currently doesn't support running optimizations.
RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
} else {
@@ -918,7 +917,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
// this method already, do it now.
if (jit != nullptr &&
compilation_kind == CompilationKind::kBaseline &&
- graph->IsUsefulOptimizing() &&
graph->GetProfilingInfo() == nullptr) {
ProfilingInfoBuilder(
graph, codegen->GetCompilerOptions(), codegen.get(), compilation_stats_.get()).Run();
@@ -1450,11 +1448,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
debug_info = GenerateJitDebugInfo(info);
}
- if (compilation_kind == CompilationKind::kBaseline &&
- !codegen->GetGraph()->IsUsefulOptimizing()) {
- compilation_kind = CompilationKind::kOptimized;
- }
-
if (!code_cache->Commit(self,
region,
method,
diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc
index 7faf2bf5be..905d8d0f2f 100644
--- a/compiler/optimizing/profiling_info_builder.cc
+++ b/compiler/optimizing/profiling_info_builder.cc
@@ -28,7 +28,6 @@
namespace art HIDDEN {
void ProfilingInfoBuilder::Run() {
- DCHECK(GetGraph()->IsUsefulOptimizing());
DCHECK_EQ(GetGraph()->GetProfilingInfo(), nullptr);
// Order does not matter.
for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
@@ -120,12 +119,6 @@ bool ProfilingInfoBuilder::IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* c
return false;
}
}
-
- if (!codegen->GetGraph()->IsUsefulOptimizing()) {
- // Earlier pass knew what the calling target was. No need for an inline
- // cache.
- return false;
- }
return true;
}