diff options
| author | 2018-10-10 12:23:26 +0000 | |
|---|---|---|
| committer | 2018-10-10 12:23:26 +0000 | |
| commit | dc3b4670b170b39a8bd6498d4de69c1513af1db2 (patch) | |
| tree | e1dd4569fd5b49879ec8dfd1b5ef5204a1084407 /compiler | |
| parent | 4ca6cc7ddcbd568b365c428371ea9675799775af (diff) | |
| parent | acc56ac6a33a23abbf4a9a62528e352ea28c5407 (diff) | |
Merge "Introduce a 'baseline' variant for the compiler."
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/compiler.h | 1 | ||||
| -rw-r--r-- | compiler/driver/compiler_options.cc | 1 | ||||
| -rw-r--r-- | compiler/driver/compiler_options.h | 5 | ||||
| -rw-r--r-- | compiler/driver/compiler_options_map-inl.h | 6 | ||||
| -rw-r--r-- | compiler/driver/compiler_options_map.def | 1 | ||||
| -rw-r--r-- | compiler/jit/jit_compiler.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/instruction_simplifier.cc | 3 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics.h | 9 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 4 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 66 | ||||
| -rw-r--r-- | compiler/optimizing/prepare_for_register_allocation.cc | 9 | ||||
| -rw-r--r-- | compiler/optimizing/prepare_for_register_allocation.h | 1 |
13 files changed, 97 insertions, 13 deletions
diff --git a/compiler/compiler.h b/compiler/compiler.h index ef3d87f02b..8c07773f4c 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -71,6 +71,7 @@ class Compiler { virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED, jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + bool baseline ATTRIBUTE_UNUSED, bool osr ATTRIBUTE_UNUSED, jit::JitLogger* jit_logger ATTRIBUTE_UNUSED) REQUIRES_SHARED(Locks::mutator_lock_) { diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc index 8cc6cf10f0..3ab9afc5d6 100644 --- a/compiler/driver/compiler_options.cc +++ b/compiler/driver/compiler_options.cc @@ -47,6 +47,7 @@ CompilerOptions::CompilerOptions() boot_image_(false), core_image_(false), app_image_(false), + baseline_(false), debuggable_(false), generate_debug_info_(kDefaultGenerateDebugInfo), generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo), diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index 34aceba1c4..e9cbf74428 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -193,6 +193,10 @@ class CompilerOptions final { return boot_image_; } + bool IsBaseline() const { + return baseline_; + } + // Are we compiling a core image (small boot image only used for ART testing)? bool IsCoreImage() const { // Ensure that `core_image_` => `boot_image_`. @@ -346,6 +350,7 @@ class CompilerOptions final { bool boot_image_; bool core_image_; bool app_image_; + bool baseline_; bool debuggable_; bool generate_debug_info_; bool generate_mini_debug_info_; diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h index 32fc887b8e..9914d81122 100644 --- a/compiler/driver/compiler_options_map-inl.h +++ b/compiler/driver/compiler_options_map-inl.h @@ -58,6 +58,9 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string if (map.Exists(Base::Debuggable)) { options->debuggable_ = true; } + if (map.Exists(Base::Baseline)) { + options->baseline_ = true; + } map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_); map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_); map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_); @@ -159,6 +162,9 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) { .Define("--debuggable") .IntoKey(Map::Debuggable) + .Define("--baseline") + .IntoKey(Map::Baseline) + .Define("--top-k-profile-threshold=_") .template WithType<double>().WithRange(0.0, 100.0) .IntoKey(Map::TopKProfileThreshold) diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def index 529d43fc72..238cd465df 100644 --- a/compiler/driver/compiler_options_map.def +++ b/compiler/driver/compiler_options_map.def @@ -48,6 +48,7 @@ COMPILER_OPTIONS_KEY (bool, GenerateDebugInfo) COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo) COMPILER_OPTIONS_KEY (bool, GenerateBuildID) COMPILER_OPTIONS_KEY (Unit, Debuggable) +COMPILER_OPTIONS_KEY (Unit, Baseline) COMPILER_OPTIONS_KEY (double, TopKProfileThreshold) COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure) COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure) diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 3fc559e13b..bc8641a114 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -188,7 +188,7 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { TimingLogger::ScopedTiming t2("Compiling", &logger); JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); success = compiler_driver_->GetCompiler()->JitCompile( - self, code_cache, method, osr, jit_logger_.get()); + self, code_cache, method, /* baseline= */ false, osr, jit_logger_.get()); } // Trim maps to reduce memory usage. diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 17d973653a..130ccf3126 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -4971,7 +4971,7 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { __ Rrx(o_l, low); } } else { - DCHECK(2 <= shift_value && shift_value < 32) << shift_value; + DCHECK(0 <= shift_value && shift_value < 32) << shift_value; if (op->IsShl()) { __ Lsl(o_h, high, shift_value); __ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value)); diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index ad50bb877e..2b6ae2019f 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -1181,8 +1181,7 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct HInstruction* input = instruction->GetInput(); DataType::Type input_type = input->GetType(); DataType::Type result_type = instruction->GetResultType(); - if (DataType::IsTypeConversionImplicit(input_type, result_type)) { - // Remove the implicit conversion; this includes conversion to the same type. + if (instruction->IsImplicitConversion()) { instruction->ReplaceWith(input); instruction->GetBlock()->RemoveInstruction(instruction); RecordSimplification(); diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 59012faea7..8245453ab5 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -240,11 +240,14 @@ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNU // Defines a list of unreached intrinsics: that is, method calls that are recognized as // an intrinsic, and then always converted into HIR instructions before they reach any -// architecture-specific intrinsics code generator. +// architecture-specific intrinsics code generator. This only applies to non-baseline +// compilation. #define UNREACHABLE_INTRINSIC(Arch, Name) \ void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \ - LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ - << " should have been converted to HIR"; \ + if (!codegen_->GetCompilerOptions().IsBaseline()) { \ + LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ + << " should have been converted to HIR"; \ + } \ } \ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 7921061326..97b50d36da 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -5660,6 +5660,10 @@ class HTypeConversion final : public HExpression<1> { bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override { return true; } + // Return whether the conversion is implicit. This includes conversion to the same type. + bool IsImplicitConversion() const { + return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType()); + } // Try to statically evaluate the conversion and return a HConstant // containing the result. If the input cannot be converted, return nullptr. diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 46754fe33f..a95ddff188 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -298,6 +298,7 @@ class OptimizingCompiler final : public Compiler { bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, + bool baseline, bool osr, jit::JitLogger* jit_logger) override @@ -383,6 +384,7 @@ class OptimizingCompiler final : public Compiler { CodeVectorAllocator* code_allocator, const DexCompilationUnit& dex_compilation_unit, ArtMethod* method, + bool baseline, bool osr, VariableSizedHandleScope* handles) const; @@ -399,6 +401,12 @@ class OptimizingCompiler final : public Compiler { PassObserver* pass_observer, VariableSizedHandleScope* handles) const; + bool RunBaselineOptimizations(HGraph* graph, + CodeGenerator* codegen, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer, + VariableSizedHandleScope* handles) const; + void GenerateJitDebugInfo(ArtMethod* method, const debug::MethodDebugInfo& method_debug_info) REQUIRES_SHARED(Locks::mutator_lock_); @@ -457,6 +465,48 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) { || instruction_set == InstructionSet::kX86_64; } +bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph, + CodeGenerator* codegen, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer, + VariableSizedHandleScope* handles) const { + switch (codegen->GetCompilerOptions().GetInstructionSet()) { +#ifdef ART_ENABLE_CODEGEN_mips + case InstructionSet::kMips: { + OptimizationDef mips_optimizations[] = { + OptDef(OptimizationPass::kPcRelativeFixupsMips) + }; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + mips_optimizations); + } +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case InstructionSet::kX86: { + OptimizationDef x86_optimizations[] = { + OptDef(OptimizationPass::kPcRelativeFixupsX86), + }; + return RunOptimizations(graph, + codegen, + dex_compilation_unit, + pass_observer, + handles, + x86_optimizations); + } +#endif + default: + UNUSED(graph); + UNUSED(codegen); + UNUSED(dex_compilation_unit); + UNUSED(pass_observer); + UNUSED(handles); + return false; + } +} + bool OptimizingCompiler::RunArchOptimizations(HGraph* graph, CodeGenerator* codegen, const DexCompilationUnit& dex_compilation_unit, @@ -738,6 +788,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, CodeVectorAllocator* code_allocator, const DexCompilationUnit& dex_compilation_unit, ArtMethod* method, + bool baseline, bool osr, VariableSizedHandleScope* handles) const { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation); @@ -860,11 +911,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, } } - RunOptimizations(graph, - codegen.get(), - dex_compilation_unit, - &pass_observer, - handles); + if (baseline) { + RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles); + } else { + RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles); + } RegisterAllocator::Strategy regalloc_strategy = compiler_options.GetRegisterAllocationStrategy(); @@ -1041,7 +1092,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, &code_allocator, dex_compilation_unit, method, - /* osr */ false, + compiler_driver->GetCompilerOptions().IsBaseline(), + /* osr= */ false, &handles)); } } @@ -1201,6 +1253,7 @@ bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* bool OptimizingCompiler::JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, + bool baseline, bool osr, jit::JitLogger* jit_logger) { StackHandleScope<3> hs(self); @@ -1315,6 +1368,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, &code_allocator, dex_compilation_unit, method, + baseline, osr, &handles)); if (codegen.get() == nullptr) { diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index fc81740013..12db8a06de 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -304,4 +304,13 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input, return true; } +void PrepareForRegisterAllocation::VisitTypeConversion(HTypeConversion* instruction) { + // For simplicity, our code generators don't handle implicit type conversion, so ensure + // there are none before hitting codegen. + if (instruction->IsImplicitConversion()) { + instruction->ReplaceWith(instruction->GetInput()); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + } // namespace art diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index a8ab256e27..e0bb76eb22 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -55,6 +55,7 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { void VisitConstructorFence(HConstructorFence* constructor_fence) override; void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override; void VisitDeoptimize(HDeoptimize* deoptimize) override; + void VisitTypeConversion(HTypeConversion* instruction) override; bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const; bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const; |