diff options
author | 2018-10-09 08:45:24 +0100 | |
---|---|---|
committer | 2018-10-10 11:10:08 +0100 | |
commit | acc56ac6a33a23abbf4a9a62528e352ea28c5407 (patch) | |
tree | e1dd4569fd5b49879ec8dfd1b5ef5204a1084407 /compiler/optimizing | |
parent | 4ca6cc7ddcbd568b365c428371ea9675799775af (diff) |
Introduce a 'baseline' variant for the compiler.
Implemented as a stripped down version of the optimizing compiler,
not running any optimization.
Adjust code to still work with expectations in code generators.
bug: 111397239
Test: test.py --baseline
Change-Id: I4328283825f9a890616e7496ed4c1e77d6bcc5dd
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics.h | 9 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 66 | ||||
-rw-r--r-- | compiler/optimizing/prepare_for_register_allocation.cc | 9 | ||||
-rw-r--r-- | compiler/optimizing/prepare_for_register_allocation.h | 1 |
7 files changed, 82 insertions, 12 deletions
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 17d973653a..130ccf3126 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -4971,7 +4971,7 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { __ Rrx(o_l, low); } } else { - DCHECK(2 <= shift_value && shift_value < 32) << shift_value; + DCHECK(0 <= shift_value && shift_value < 32) << shift_value; if (op->IsShl()) { __ Lsl(o_h, high, shift_value); __ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value)); diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index ad50bb877e..2b6ae2019f 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -1181,8 +1181,7 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct HInstruction* input = instruction->GetInput(); DataType::Type input_type = input->GetType(); DataType::Type result_type = instruction->GetResultType(); - if (DataType::IsTypeConversionImplicit(input_type, result_type)) { - // Remove the implicit conversion; this includes conversion to the same type. + if (instruction->IsImplicitConversion()) { instruction->ReplaceWith(input); instruction->GetBlock()->RemoveInstruction(instruction); RecordSimplification(); diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 59012faea7..8245453ab5 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -240,11 +240,14 @@ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNU // Defines a list of unreached intrinsics: that is, method calls that are recognized as // an intrinsic, and then always converted into HIR instructions before they reach any -// architecture-specific intrinsics code generator. +// architecture-specific intrinsics code generator. This only applies to non-baseline +// compilation. #define UNREACHABLE_INTRINSIC(Arch, Name) \ void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \ - LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ - << " should have been converted to HIR"; \ + if (!codegen_->GetCompilerOptions().IsBaseline()) { \ + LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ + << " should have been converted to HIR"; \ + } \ } \ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 7921061326..97b50d36da 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -5660,6 +5660,10 @@ class HTypeConversion final : public HExpression<1> { bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } + // Return whether the conversion is implicit. This includes conversion to the same type. + bool IsImplicitConversion() const { + return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType()); + } // Try to statically evaluate the conversion and return a HConstant // containing the result. If the input cannot be converted, return nullptr. diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 46754fe33f..a95ddff188 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -298,6 +298,7 @@ class OptimizingCompiler final : public Compiler { bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, + bool baseline, bool osr, jit::JitLogger* jit_logger) override @@ -383,6 +384,7 @@ class OptimizingCompiler final : public Compiler { CodeVectorAllocator* code_allocator, const DexCompilationUnit& dex_compilation_unit, ArtMethod* method, + bool baseline, bool osr, VariableSizedHandleScope* handles) const; @@ -399,6 +401,12 @@ class OptimizingCompiler final : public Compiler { PassObserver* pass_observer, VariableSizedHandleScope* handles) const; + bool RunBaselineOptimizations(HGraph* graph, + CodeGenerator* codegen, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer, + VariableSizedHandleScope* handles) const; + void GenerateJitDebugInfo(ArtMethod* method, const debug::MethodDebugInfo& method_debug_info) REQUIRES_SHARED(Locks::mutator_lock_); @@ -457,6 +465,48 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) { || instruction_set == InstructionSet::kX86_64; } +bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph, + CodeGenerator* codegen, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer, + VariableSizedHandleScope* handles) const { + switch (codegen->GetCompilerOptions().GetInstructionSet()) { +#ifdef ART_ENABLE_CODEGEN_mips + case InstructionSet::kMips: { + OptimizationDef mips_optimizations[] = { + OptDef(OptimizationPass::kPcRelativeFixupsMips) + }; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + mips_optimizations); + } +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case InstructionSet::kX86: { + OptimizationDef x86_optimizations[] = { + OptDef(OptimizationPass::kPcRelativeFixupsX86), + }; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + x86_optimizations); + } +#endif + default: + UNUSED(graph); + UNUSED(codegen); + UNUSED(dex_compilation_unit); + UNUSED(pass_observer); + UNUSED(handles); + return false; + } +} + bool OptimizingCompiler::RunArchOptimizations(HGraph* graph, CodeGenerator* codegen, const DexCompilationUnit& dex_compilation_unit, @@ -738,6 +788,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, CodeVectorAllocator* code_allocator, const DexCompilationUnit& dex_compilation_unit, ArtMethod* method, + bool baseline, bool osr, VariableSizedHandleScope* handles) const { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation); @@ -860,11 +911,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, } } - RunOptimizations(graph, - codegen.get(), - dex_compilation_unit, - &pass_observer, - handles); + if (baseline) { + RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles); + } else { + RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles); + } RegisterAllocator::Strategy regalloc_strategy = compiler_options.GetRegisterAllocationStrategy(); @@ -1041,7 +1092,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, &code_allocator, dex_compilation_unit, method, - /* osr */ false, + compiler_driver->GetCompilerOptions().IsBaseline(), + /* osr= */ false, &handles)); } } @@ -1201,6 +1253,7 @@ bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* bool OptimizingCompiler::JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, + bool baseline, bool osr, jit::JitLogger* jit_logger) { StackHandleScope<3> hs(self); @@ -1315,6 +1368,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, &code_allocator, dex_compilation_unit, method, + baseline, osr, &handles)); if (codegen.get() == nullptr) { diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index fc81740013..12db8a06de 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -304,4 +304,13 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input, return true; } +void PrepareForRegisterAllocation::VisitTypeConversion(HTypeConversion* instruction) { + // For simplicity, our code generators don't handle implicit type conversion, so ensure + // there are none before hitting codegen. + if (instruction->IsImplicitConversion()) { + instruction->ReplaceWith(instruction->GetInput()); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + } // namespace art diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index a8ab256e27..e0bb76eb22 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -55,6 +55,7 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { void VisitConstructorFence(HConstructorFence* constructor_fence) override; void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override; void VisitDeoptimize(HDeoptimize* deoptimize) override; + void VisitTypeConversion(HTypeConversion* instruction) override; bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const; bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const; |