Introduce a 'baseline' variant for the compiler.
Implemented as a stripped down version of the optimizing compiler,
not running any optimization.
Adjust code to still work with expectations in code generators.
bug: 111397239
Test: test.py --baseline
Change-Id: I4328283825f9a890616e7496ed4c1e77d6bcc5dd
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 46754fe..a95ddff 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -298,6 +298,7 @@
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger)
override
@@ -383,6 +384,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const;
@@ -399,6 +401,12 @@
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
+ bool RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const;
+
void GenerateJitDebugInfo(ArtMethod* method,
const debug::MethodDebugInfo& method_debug_info)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -457,6 +465,48 @@
|| instruction_set == InstructionSet::kX86_64;
}
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const {
+ switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#ifdef ART_ENABLE_CODEGEN_mips
+ case InstructionSet::kMips: {
+ OptimizationDef mips_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsMips)
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ mips_optimizations);
+ }
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case InstructionSet::kX86: {
+ OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsX86),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ x86_optimizations);
+ }
+#endif
+ default:
+ UNUSED(graph);
+ UNUSED(codegen);
+ UNUSED(dex_compilation_unit);
+ UNUSED(pass_observer);
+ UNUSED(handles);
+ return false;
+ }
+}
+
bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
@@ -738,6 +788,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
@@ -860,11 +911,11 @@
}
}
- RunOptimizations(graph,
- codegen.get(),
- dex_compilation_unit,
- &pass_observer,
- handles);
+ if (baseline) {
+ RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ } else {
+ RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ }
RegisterAllocator::Strategy regalloc_strategy =
compiler_options.GetRegisterAllocationStrategy();
@@ -1041,7 +1092,8 @@
&code_allocator,
dex_compilation_unit,
method,
- /* osr */ false,
+ compiler_driver->GetCompilerOptions().IsBaseline(),
+ /* osr= */ false,
&handles));
}
}
@@ -1201,6 +1253,7 @@
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger) {
StackHandleScope<3> hs(self);
@@ -1315,6 +1368,7 @@
&code_allocator,
dex_compilation_unit,
method,
+ baseline,
osr,
&handles));
if (codegen.get() == nullptr) {