Introduce an enum for the compilation kind.
Test: test.py
Change-Id: I5329e50a6b4521933b6b171c8c0fbc618c3f67cd
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8d4aa9f..bae402e 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -298,8 +298,7 @@
jit::JitCodeCache* code_cache,
jit::JitMemoryRegion* region,
ArtMethod* method,
- bool baseline,
- bool osr,
+ CompilationKind compilation_kind,
jit::JitLogger* jit_logger)
override
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -379,8 +378,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
- bool baseline,
- bool osr,
+ CompilationKind compilation_kind,
VariableSizedHandleScope* handles) const;
CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
@@ -717,8 +715,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
- bool baseline,
- bool osr,
+ CompilationKind compilation_kind,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
const CompilerOptions& compiler_options = GetCompilerOptions();
@@ -787,8 +784,7 @@
kInvalidInvokeType,
dead_reference_safe,
compiler_options.GetDebuggable(),
- /* osr= */ osr,
- /* baseline= */ baseline);
+ compilation_kind);
if (method != nullptr) {
graph->SetArtMethod(method);
@@ -861,7 +857,7 @@
}
}
- if (baseline) {
+ if (compilation_kind == CompilationKind::kBaseline) {
RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
} else {
RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
@@ -914,7 +910,7 @@
kInvalidInvokeType,
/* dead_reference_safe= */ true, // Intrinsics don't affect dead reference safety.
compiler_options.GetDebuggable(),
- /* osr= */ false);
+ CompilationKind::kOptimized);
DCHECK(Runtime::Current()->IsAotCompiler());
DCHECK(method != nullptr);
@@ -1047,8 +1043,9 @@
&code_allocator,
dex_compilation_unit,
method,
- compiler_options.IsBaseline(),
- /* osr= */ false,
+ compiler_options.IsBaseline()
+ ? CompilationKind::kBaseline
+ : CompilationKind::kOptimized,
&handles));
}
}
@@ -1194,10 +1191,14 @@
jit::JitCodeCache* code_cache,
jit::JitMemoryRegion* region,
ArtMethod* method,
- bool baseline,
- bool osr,
+ CompilationKind compilation_kind,
jit::JitLogger* jit_logger) {
const CompilerOptions& compiler_options = GetCompilerOptions();
+ // If the baseline flag was explicitly passed, change the compilation kind
+ // from optimized to baseline.
+ if (compiler_options.IsBaseline() && compilation_kind == CompilationKind::kOptimized) {
+ compilation_kind = CompilationKind::kBaseline;
+ }
DCHECK(compiler_options.IsJitCompiler());
DCHECK_EQ(compiler_options.IsJitCompilerForSharedCode(), code_cache->IsSharedRegion(*region));
StackHandleScope<3> hs(self);
@@ -1275,7 +1276,7 @@
ArrayRef<const uint8_t>(stack_map),
debug_info,
/* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
- osr,
+ compilation_kind,
/* has_should_deoptimize_flag= */ false,
cha_single_implementation_list)) {
code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
@@ -1316,8 +1317,7 @@
&code_allocator,
dex_compilation_unit,
method,
- baseline || compiler_options.IsBaseline(),
- osr,
+ compilation_kind,
&handles));
if (codegen.get() == nullptr) {
return false;
@@ -1384,7 +1384,7 @@
ArrayRef<const uint8_t>(stack_map),
debug_info,
/* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
- osr,
+ compilation_kind,
codegen->GetGraph()->HasShouldDeoptimizeFlag(),
codegen->GetGraph()->GetCHASingleImplementationList())) {
code_cache->Free(self, region, reserved_code.data(), reserved_data.data());