Introduce a 'baseline' variant for the compiler.
Implemented as a stripped down version of the optimizing compiler,
not running any optimization.
Adjust code to still work with expectations in code generators.
bug: 111397239
Test: test.py --baseline
Change-Id: I4328283825f9a890616e7496ed4c1e77d6bcc5dd
diff --git a/compiler/compiler.h b/compiler/compiler.h
index ef3d87f..8c07773 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -71,6 +71,7 @@
virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
+ bool baseline ATTRIBUTE_UNUSED,
bool osr ATTRIBUTE_UNUSED,
jit::JitLogger* jit_logger ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 8cc6cf1..3ab9afc 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -47,6 +47,7 @@
boot_image_(false),
core_image_(false),
app_image_(false),
+ baseline_(false),
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 34aceba..e9cbf74 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -193,6 +193,10 @@
return boot_image_;
}
+ bool IsBaseline() const {
+ return baseline_;
+ }
+
// Are we compiling a core image (small boot image only used for ART testing)?
bool IsCoreImage() const {
// Ensure that `core_image_` => `boot_image_`.
@@ -346,6 +350,7 @@
bool boot_image_;
bool core_image_;
bool app_image_;
+ bool baseline_;
bool debuggable_;
bool generate_debug_info_;
bool generate_mini_debug_info_;
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index 32fc887..9914d81 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -58,6 +58,9 @@
if (map.Exists(Base::Debuggable)) {
options->debuggable_ = true;
}
+ if (map.Exists(Base::Baseline)) {
+ options->baseline_ = true;
+ }
map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_);
@@ -159,6 +162,9 @@
.Define("--debuggable")
.IntoKey(Map::Debuggable)
+ .Define("--baseline")
+ .IntoKey(Map::Baseline)
+
.Define("--top-k-profile-threshold=_")
.template WithType<double>().WithRange(0.0, 100.0)
.IntoKey(Map::TopKProfileThreshold)
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 529d43f..238cd46 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -48,6 +48,7 @@
COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo)
COMPILER_OPTIONS_KEY (bool, GenerateBuildID)
COMPILER_OPTIONS_KEY (Unit, Debuggable)
+COMPILER_OPTIONS_KEY (Unit, Baseline)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3fc559e..bc8641a 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -188,7 +188,7 @@
TimingLogger::ScopedTiming t2("Compiling", &logger);
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
success = compiler_driver_->GetCompiler()->JitCompile(
- self, code_cache, method, osr, jit_logger_.get());
+ self, code_cache, method, /* baseline= */ false, osr, jit_logger_.get());
}
// Trim maps to reduce memory usage.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 17d9736..130ccf3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -4971,7 +4971,7 @@
__ Rrx(o_l, low);
}
} else {
- DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
+ DCHECK(0 <= shift_value && shift_value < 32) << shift_value;
if (op->IsShl()) {
__ Lsl(o_h, high, shift_value);
__ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value));
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index ad50bb8..2b6ae20 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1181,8 +1181,7 @@
HInstruction* input = instruction->GetInput();
DataType::Type input_type = input->GetType();
DataType::Type result_type = instruction->GetResultType();
- if (DataType::IsTypeConversionImplicit(input_type, result_type)) {
- // Remove the implicit conversion; this includes conversion to the same type.
+ if (instruction->IsImplicitConversion()) {
instruction->ReplaceWith(input);
instruction->GetBlock()->RemoveInstruction(instruction);
RecordSimplification();
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 59012fa..8245453 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -240,11 +240,14 @@
// Defines a list of unreached intrinsics: that is, method calls that are recognized as
// an intrinsic, and then always converted into HIR instructions before they reach any
-// architecture-specific intrinsics code generator.
+// architecture-specific intrinsics code generator. This only applies to non-baseline
+// compilation.
#define UNREACHABLE_INTRINSIC(Arch, Name) \
void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
- LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
- << " should have been converted to HIR"; \
+ if (!codegen_->GetCompilerOptions().IsBaseline()) { \
+ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
+ << " should have been converted to HIR"; \
+ } \
} \
void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7921061..97b50d3 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5660,6 +5660,10 @@
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
+ // Return whether the conversion is implicit. This includes conversion to the same type.
+ bool IsImplicitConversion() const {
+ return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType());
+ }
// Try to statically evaluate the conversion and return a HConstant
// containing the result. If the input cannot be converted, return nullptr.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 46754fe..a95ddff 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -298,6 +298,7 @@
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger)
override
@@ -383,6 +384,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const;
@@ -399,6 +401,12 @@
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
+ bool RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const;
+
void GenerateJitDebugInfo(ArtMethod* method,
const debug::MethodDebugInfo& method_debug_info)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -457,6 +465,48 @@
|| instruction_set == InstructionSet::kX86_64;
}
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const {
+ switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#ifdef ART_ENABLE_CODEGEN_mips
+ case InstructionSet::kMips: {
+ OptimizationDef mips_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsMips)
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ mips_optimizations);
+ }
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case InstructionSet::kX86: {
+ OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsX86),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ x86_optimizations);
+ }
+#endif
+ default:
+ UNUSED(graph);
+ UNUSED(codegen);
+ UNUSED(dex_compilation_unit);
+ UNUSED(pass_observer);
+ UNUSED(handles);
+ return false;
+ }
+}
+
bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
@@ -738,6 +788,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
@@ -860,11 +911,11 @@
}
}
- RunOptimizations(graph,
- codegen.get(),
- dex_compilation_unit,
- &pass_observer,
- handles);
+ if (baseline) {
+ RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ } else {
+ RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ }
RegisterAllocator::Strategy regalloc_strategy =
compiler_options.GetRegisterAllocationStrategy();
@@ -1041,7 +1092,8 @@
&code_allocator,
dex_compilation_unit,
method,
- /* osr */ false,
+ compiler_driver->GetCompilerOptions().IsBaseline(),
+ /* osr= */ false,
&handles));
}
}
@@ -1201,6 +1253,7 @@
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger) {
StackHandleScope<3> hs(self);
@@ -1315,6 +1368,7 @@
&code_allocator,
dex_compilation_unit,
method,
+ baseline,
osr,
&handles));
if (codegen.get() == nullptr) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index fc81740..12db8a0 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -304,4 +304,13 @@
return true;
}
+void PrepareForRegisterAllocation::VisitTypeConversion(HTypeConversion* instruction) {
+ // For simplicity, our code generators don't handle implicit type conversion, so ensure
+ // there are none before hitting codegen.
+ if (instruction->IsImplicitConversion()) {
+ instruction->ReplaceWith(instruction->GetInput());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a8ab256..e0bb76e 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -55,6 +55,7 @@
void VisitConstructorFence(HConstructorFence* constructor_fence) override;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
void VisitDeoptimize(HDeoptimize* deoptimize) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;