diff options
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 42 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 9 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 9 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 9 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 121 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips.h | 19 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 9 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 14 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 14 | ||||
-rw-r--r-- | compiler/optimizing/codegen_test.cc | 227 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 6 | ||||
-rw-r--r-- | compiler/optimizing/nodes_mips.h | 35 | ||||
-rw-r--r-- | compiler/optimizing/pc_relative_fixups_mips.cc | 19 |
13 files changed, 357 insertions, 176 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 2087888f4e..6732670ffc 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1081,13 +1081,6 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } } -bool CodeGenerator::IsImplicitNullCheckAllowed(HNullCheck* null_check) const { - return compiler_options_.GetImplicitNullChecks() && - // Null checks which might throw into a catch block need to save live - // registers and therefore cannot be done implicitly. - !null_check->CanThrowIntoCatchBlock(); -} - bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves(); @@ -1096,6 +1089,10 @@ bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { } void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { + if (!compiler_options_.GetImplicitNullChecks()) { + return; + } + // If we are from a static path don't record the pc as we can't throw NPE. // NB: having the checks here makes the code much less verbose in the arch // specific code generators. @@ -1114,16 +1111,35 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { // and needs to record the pc. if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) { HNullCheck* null_check = first_prev_not_move->AsNullCheck(); - if (IsImplicitNullCheckAllowed(null_check)) { - // TODO: The parallel moves modify the environment. Their changes need to be - // reverted otherwise the stack maps at the throw point will not be correct. - RecordPcInfo(null_check, null_check->GetDexPc()); - } + // TODO: The parallel moves modify the environment. Their changes need to be + // reverted otherwise the stack maps at the throw point will not be correct. + RecordPcInfo(null_check, null_check->GetDexPc()); } } +LocationSummary* CodeGenerator::CreateNullCheckLocations(HNullCheck* null_check) { + // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the + // HSuspendCheck from entry block). However, it will still get a valid stack frame + // because the HNullCheck needs an environment. + LocationSummary::CallKind call_kind = LocationSummary::kNoCall; + // When throwing from a try block, we may need to retrieve dalvik registers from + // physical registers and we also need to set up stack mask for GC. This is + // implicitly achieved by passing kCallOnSlowPath to the LocationSummary. + bool can_throw_into_catch_block = null_check->CanThrowIntoCatchBlock(); + if (can_throw_into_catch_block) { + call_kind = LocationSummary::kCallOnSlowPath; + } + LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(null_check, call_kind); + if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) { + locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + } + locations->SetInAt(0, Location::RequiresRegister()); + DCHECK(!null_check->HasUses()); + return locations; +} + void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) { - if (IsImplicitNullCheckAllowed(instruction)) { + if (compiler_options_.GetImplicitNullChecks()) { MaybeRecordStat(kImplicitNullCheckGenerated); GenerateImplicitNullCheck(instruction); } else { diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 0c60a98139..b4d4b9b760 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -313,6 +313,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { bool CanMoveNullCheckToUser(HNullCheck* null_check); void MaybeRecordImplicitNullCheck(HInstruction* instruction); + LocationSummary* CreateNullCheckLocations(HNullCheck* null_check); void GenerateNullCheck(HNullCheck* null_check); virtual void GenerateImplicitNullCheck(HNullCheck* null_check) = 0; virtual void GenerateExplicitNullCheck(HNullCheck* null_check) = 0; @@ -322,12 +323,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // TODO: Replace with a catch-entering instruction that records the environment. void RecordCatchBlockInfo(); - // Returns true if implicit null checks are allowed in the compiler options - // and if the null check is not inside a try block. We currently cannot do - // implicit null checks in that case because we need the NullCheckSlowPath to - // save live registers, which may be needed by the runtime to set catch phis. - bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const; - // TODO: Avoid creating the `std::unique_ptr` here. void AddSlowPath(SlowPathCode* slow_path) { slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path)); @@ -713,6 +708,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { bool is_leaf_; // Whether an instruction in the graph accesses the current method. + // TODO: Rename: this actually indicates that some instruction in the method + // needs the environment including a valid stack frame. bool requires_current_method_; friend class OptimizingCFITest; diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 3cc2598f8f..40c2b9c1ec 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -4251,14 +4251,7 @@ void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet( } void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } + codegen_->CreateNullCheckLocations(instruction); } void CodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) { diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 179bf76f5b..c00ab56a4c 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -4384,14 +4384,7 @@ void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) { } void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } + codegen_->CreateNullCheckLocations(instruction); } void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index f07f8a0d91..b767aa5ef2 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -5075,14 +5075,7 @@ void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) { } void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } + codegen_->CreateNullCheckLocations(instruction); } void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -5824,13 +5817,11 @@ void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) { locations->SetInAt(0, Location::RequiresRegister()); } -void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) { - int32_t lower_bound = switch_instr->GetStartValue(); - int32_t num_entries = switch_instr->GetNumEntries(); - LocationSummary* locations = switch_instr->GetLocations(); - Register value_reg = locations->InAt(0).AsRegister<Register>(); - HBasicBlock* default_block = switch_instr->GetDefaultBlock(); - +void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block) { // Create a set of compare/jumps. Register temp_reg = TMP; __ Addiu32(temp_reg, value_reg, -lower_bound); @@ -5839,7 +5830,7 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr // this case, index >= num_entries must be true. So that we can save one branch instruction. __ Bltz(temp_reg, codegen_->GetLabelOf(default_block)); - const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors(); + const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors(); // Jump to successors[0] if value == lower_bound. __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0])); int32_t last_index = 0; @@ -5857,11 +5848,107 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr } // And the default for any other value. - if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) { + if (!codegen_->GoesToNextBlock(switch_block, default_block)) { __ B(codegen_->GetLabelOf(default_block)); } } +void InstructionCodeGeneratorMIPS::GenTableBasedPackedSwitch(Register value_reg, + Register constant_area, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block) { + // Create a jump table. + std::vector<MipsLabel*> labels(num_entries); + const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors(); + for (uint32_t i = 0; i < num_entries; i++) { + labels[i] = codegen_->GetLabelOf(successors[i]); + } + JumpTable* table = __ CreateJumpTable(std::move(labels)); + + // Is the value in range? + __ Addiu32(TMP, value_reg, -lower_bound); + if (IsInt<16>(static_cast<int32_t>(num_entries))) { + __ Sltiu(AT, TMP, num_entries); + __ Beqz(AT, codegen_->GetLabelOf(default_block)); + } else { + __ LoadConst32(AT, num_entries); + __ Bgeu(TMP, AT, codegen_->GetLabelOf(default_block)); + } + + // We are in the range of the table. + // Load the target address from the jump table, indexing by the value. + __ LoadLabelAddress(AT, constant_area, table->GetLabel()); + __ Sll(TMP, TMP, 2); + __ Addu(TMP, TMP, AT); + __ Lw(TMP, TMP, 0); + // Compute the absolute target address by adding the table start address + // (the table contains offsets to targets relative to its start). + __ Addu(TMP, TMP, AT); + // And jump. + __ Jr(TMP); + __ NopIfNoReordering(); +} + +void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) { + int32_t lower_bound = switch_instr->GetStartValue(); + uint32_t num_entries = switch_instr->GetNumEntries(); + LocationSummary* locations = switch_instr->GetLocations(); + Register value_reg = locations->InAt(0).AsRegister<Register>(); + HBasicBlock* switch_block = switch_instr->GetBlock(); + HBasicBlock* default_block = switch_instr->GetDefaultBlock(); + + if (codegen_->GetInstructionSetFeatures().IsR6() && + num_entries > kPackedSwitchJumpTableThreshold) { + // R6 uses PC-relative addressing to access the jump table. + // R2, OTOH, requires an HMipsComputeBaseMethodAddress input to access + // the jump table and it is implemented by changing HPackedSwitch to + // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress. + // See VisitMipsPackedSwitch() for the table-based implementation on R2. + GenTableBasedPackedSwitch(value_reg, + ZERO, + lower_bound, + num_entries, + switch_block, + default_block); + } else { + GenPackedSwitchWithCompares(value_reg, + lower_bound, + num_entries, + switch_block, + default_block); + } +} + +void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + // Constant area pointer (HMipsComputeBaseMethodAddress). + locations->SetInAt(1, Location::RequiresRegister()); +} + +void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) { + int32_t lower_bound = switch_instr->GetStartValue(); + uint32_t num_entries = switch_instr->GetNumEntries(); + LocationSummary* locations = switch_instr->GetLocations(); + Register value_reg = locations->InAt(0).AsRegister<Register>(); + Register constant_area = locations->InAt(1).AsRegister<Register>(); + HBasicBlock* switch_block = switch_instr->GetBlock(); + HBasicBlock* default_block = switch_instr->GetDefaultBlock(); + + // This is an R2-only path. HPackedSwitch has been changed to + // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress + // required to address the jump table relative to PC. + GenTableBasedPackedSwitch(value_reg, + constant_area, + lower_bound, + num_entries, + switch_block, + default_block); +} + void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress( HMipsComputeBaseMethodAddress* insn) { LocationSummary* locations = diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 003998129e..956a466f9b 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -218,6 +218,14 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator { MipsAssembler* GetAssembler() const { return assembler_; } + // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit + // instructions for N cases. + // Table-based packed switch generates approx. 11 32-bit instructions + // and N 32-bit data words for N cases. + // At N = 6 they come out as 18 and 17 32-bit words respectively. + // We switch to the table-based method starting with 7 cases. + static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6; + private: void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg); void GenerateMemoryBarrier(MemBarrierKind kind); @@ -262,6 +270,17 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator { void GenerateDivRemIntegral(HBinaryOperation* instruction); void HandleGoto(HInstruction* got, HBasicBlock* successor); auto GetImplicitNullChecker(HInstruction* instruction); + void GenPackedSwitchWithCompares(Register value_reg, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block); + void GenTableBasedPackedSwitch(Register value_reg, + Register constant_area, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block); MipsAssembler* const assembler_; CodeGeneratorMIPS* const codegen_; diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 664d498b18..4d87523206 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -3461,14 +3461,7 @@ void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) { } void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } + codegen_->CreateNullCheckLocations(instruction); } void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index e18b366411..28db29cb58 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -4950,16 +4950,10 @@ void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet( } void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - Location loc = codegen_->IsImplicitNullCheckAllowed(instruction) - ? Location::RequiresRegister() - : Location::Any(); - locations->SetInAt(0, loc); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); + LocationSummary* locations = codegen_->CreateNullCheckLocations(instruction); + if (!codegen_->GetCompilerOptions().GetImplicitNullChecks()) { + // Explicit null checks can use any location. + locations->SetInAt(0, Location::Any()); } } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 15307fe50c..88d98fc1e1 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -4459,16 +4459,10 @@ void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet( } void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - Location loc = codegen_->IsImplicitNullCheckAllowed(instruction) - ? Location::RequiresRegister() - : Location::Any(); - locations->SetInAt(0, loc); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); + LocationSummary* locations = codegen_->CreateNullCheckLocations(instruction); + if (!codegen_->GetCompilerOptions().GetImplicitNullChecks()) { + // Explicit null checks can use any location. + locations->SetInAt(0, Location::Any()); } } diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index d00a7867e8..d9347f604e 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -74,6 +74,24 @@ namespace art { +typedef CodeGenerator* (*CreateCodegenFn)(HGraph*, const CompilerOptions&); + +class CodegenTargetConfig { + public: + CodegenTargetConfig(InstructionSet isa, CreateCodegenFn create_codegen) + : isa_(isa), create_codegen_(create_codegen) { + } + InstructionSet GetInstructionSet() const { return isa_; } + CodeGenerator* CreateCodeGenerator(HGraph* graph, const CompilerOptions& compiler_options) { + return create_codegen_(graph, compiler_options); + } + + private: + CodegenTargetConfig() {} + InstructionSet isa_; + CreateCodegenFn create_codegen_; +}; + #ifdef ART_ENABLE_CODEGEN_arm // Provide our own codegen, that ensures the C calling conventions // are preserved. Currently, ART and C do not match as R4 is caller-save @@ -222,12 +240,7 @@ static void Run(const InternalCodeAllocator& allocator, VerifyGeneratedCode(target_isa, f, has_result, expected); } -template <typename Expected> -static void RunCode(CodeGenerator* codegen, - HGraph* graph, - std::function<void(HGraph*)> hook_before_codegen, - bool has_result, - Expected expected) { +static void ValidateGraph(HGraph* graph) { GraphChecker graph_checker(graph); graph_checker.Run(); if (!graph_checker.IsValid()) { @@ -236,92 +249,129 @@ static void RunCode(CodeGenerator* codegen, } } ASSERT_TRUE(graph_checker.IsValid()); +} +template <typename Expected> +static void RunCodeNoCheck(CodeGenerator* codegen, + HGraph* graph, + std::function<void(HGraph*)> hook_before_codegen, + bool has_result, + Expected expected) { SsaLivenessAnalysis liveness(graph, codegen); - PrepareForRegisterAllocation(graph).Run(); liveness.Analyze(); RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters(); hook_before_codegen(graph); - InternalCodeAllocator allocator; codegen->Compile(&allocator); Run(allocator, *codegen, has_result, expected); } template <typename Expected> -static void RunCode(InstructionSet target_isa, +static void RunCode(CodeGenerator* codegen, + HGraph* graph, + std::function<void(HGraph*)> hook_before_codegen, + bool has_result, + Expected expected) { + ValidateGraph(graph); + RunCodeNoCheck(codegen, graph, hook_before_codegen, has_result, expected); +} + +template <typename Expected> +static void RunCode(CodegenTargetConfig target_config, HGraph* graph, std::function<void(HGraph*)> hook_before_codegen, bool has_result, Expected expected) { CompilerOptions compiler_options; + CodeGenerator* codegen = target_config.CreateCodeGenerator(graph, compiler_options); + RunCode(codegen, graph, hook_before_codegen, has_result, expected); +} + #ifdef ART_ENABLE_CODEGEN_arm - if (target_isa == kArm || target_isa == kThumb2) { - std::unique_ptr<const ArmInstructionSetFeatures> features_arm( - ArmInstructionSetFeatures::FromCppDefines()); - TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options); - RunCode(&codegenARM, graph, hook_before_codegen, has_result, expected); - } +CodeGenerator* create_codegen_arm(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const ArmInstructionSetFeatures> features_arm( + ArmInstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) TestCodeGeneratorARM(graph, + *features_arm.get(), + compiler_options); +} #endif + #ifdef ART_ENABLE_CODEGEN_arm64 - if (target_isa == kArm64) { - std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64( - Arm64InstructionSetFeatures::FromCppDefines()); - arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options); - RunCode(&codegenARM64, graph, hook_before_codegen, has_result, expected); - } +CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64( + Arm64InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) arm64::CodeGeneratorARM64(graph, + *features_arm64.get(), + compiler_options); +} #endif + #ifdef ART_ENABLE_CODEGEN_x86 - if (target_isa == kX86) { - std::unique_ptr<const X86InstructionSetFeatures> features_x86( - X86InstructionSetFeatures::FromCppDefines()); - TestCodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options); - RunCode(&codegenX86, graph, hook_before_codegen, has_result, expected); - } +CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const X86InstructionSetFeatures> features_x86( + X86InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options); +} #endif + #ifdef ART_ENABLE_CODEGEN_x86_64 - if (target_isa == kX86_64) { - std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64( - X86_64InstructionSetFeatures::FromCppDefines()); - x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options); - RunCode(&codegenX86_64, graph, hook_before_codegen, has_result, expected); - } +CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64( + X86_64InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) + x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options); +} #endif + #ifdef ART_ENABLE_CODEGEN_mips - if (target_isa == kMips) { - std::unique_ptr<const MipsInstructionSetFeatures> features_mips( - MipsInstructionSetFeatures::FromCppDefines()); - mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options); - RunCode(&codegenMIPS, graph, hook_before_codegen, has_result, expected); - } +CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const MipsInstructionSetFeatures> features_mips( + MipsInstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) + mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options); +} #endif + #ifdef ART_ENABLE_CODEGEN_mips64 - if (target_isa == kMips64) { - std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64( - Mips64InstructionSetFeatures::FromCppDefines()); - mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options); - RunCode(&codegenMIPS64, graph, hook_before_codegen, has_result, expected); - } -#endif +CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) { + std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64( + Mips64InstructionSetFeatures::FromCppDefines()); + return new (graph->GetArena()) + mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options); } +#endif -static ::std::vector<InstructionSet> GetTargetISAs() { - ::std::vector<InstructionSet> v; - // Add all ISAs that are executable on hardware or on simulator. - const ::std::vector<InstructionSet> executable_isa_candidates = { - kArm, - kArm64, - kThumb2, - kX86, - kX86_64, - kMips, - kMips64 +// Return all combinations of ISA and code generator that are executable on +// hardware, or on simulator, and that we'd like to test. +static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { + ::std::vector<CodegenTargetConfig> v; + ::std::vector<CodegenTargetConfig> test_config_candidates = { +#ifdef ART_ENABLE_CODEGEN_arm + CodegenTargetConfig(kArm, create_codegen_arm), + CodegenTargetConfig(kThumb2, create_codegen_arm), +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + CodegenTargetConfig(kArm64, create_codegen_arm64), +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + CodegenTargetConfig(kX86, create_codegen_x86), +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + CodegenTargetConfig(kX86_64, create_codegen_x86_64), +#endif +#ifdef ART_ENABLE_CODEGEN_mips + CodegenTargetConfig(kMips, create_codegen_mips), +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 + CodegenTargetConfig(kMips64, create_codegen_mips64) +#endif }; - for (auto target_isa : executable_isa_candidates) { - if (CanExecute(target_isa)) { - v.push_back(target_isa); + for (auto test_config : test_config_candidates) { + if (CanExecute(test_config.GetInstructionSet())) { + v.push_back(test_config); } } @@ -331,26 +381,26 @@ static ::std::vector<InstructionSet> GetTargetISAs() { static void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0) { - for (InstructionSet target_isa : GetTargetISAs()) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { ArenaPool pool; ArenaAllocator arena(&pool); HGraph* graph = CreateCFG(&arena, data); // Remove suspend checks, they cannot be executed in this context. RemoveSuspendChecks(graph); - RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected); + RunCode(target_config, graph, [](HGraph*) {}, has_result, expected); } } static void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) { - for (InstructionSet target_isa : GetTargetISAs()) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { ArenaPool pool; ArenaAllocator arena(&pool); HGraph* graph = CreateCFG(&arena, data, Primitive::kPrimLong); // Remove suspend checks, they cannot be executed in this context. RemoveSuspendChecks(graph); - RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected); + RunCode(target_config, graph, [](HGraph*) {}, has_result, expected); } } @@ -667,7 +717,7 @@ TEST_F(CodegenTest, ReturnMulIntLit16) { } TEST_F(CodegenTest, NonMaterializedCondition) { - for (InstructionSet target_isa : GetTargetISAs()) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { ArenaPool pool; ArenaAllocator allocator(&pool); @@ -715,12 +765,12 @@ TEST_F(CodegenTest, NonMaterializedCondition) { block->InsertInstructionBefore(move, block->GetLastInstruction()); }; - RunCode(target_isa, graph, hook_before_codegen, true, 0); + RunCode(target_config, graph, hook_before_codegen, true, 0); } } TEST_F(CodegenTest, MaterializedCondition1) { - for (InstructionSet target_isa : GetTargetISAs()) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { // Check that condition are materialized correctly. A materialized condition // should yield `1` if it evaluated to true, and `0` otherwise. // We force the materialization of comparisons for different combinations of @@ -762,13 +812,13 @@ TEST_F(CodegenTest, MaterializedCondition1) { HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; - RunCode(target_isa, graph, hook_before_codegen, true, lhs[i] < rhs[i]); + RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]); } } } TEST_F(CodegenTest, MaterializedCondition2) { - for (InstructionSet target_isa : GetTargetISAs()) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { // Check that HIf correctly interprets a materialized condition. // We force the materialization of comparisons for different combinations of // inputs. An HIf takes the materialized combination as input and returns a @@ -830,7 +880,7 @@ TEST_F(CodegenTest, MaterializedCondition2) { HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; - RunCode(target_isa, graph, hook_before_codegen, true, lhs[i] < rhs[i]); + RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]); } } } @@ -859,7 +909,7 @@ static void TestComparison(IfCondition condition, int64_t i, int64_t j, Primitive::Type type, - const InstructionSet target_isa) { + const CodegenTargetConfig target_config) { ArenaPool pool; ArenaAllocator allocator(&pool); HGraph* graph = CreateGraph(&allocator); @@ -941,23 +991,16 @@ static void TestComparison(IfCondition condition, block->AddInstruction(new (&allocator) HReturn(comparison)); graph->BuildDominatorTree(); - RunCode(target_isa, graph, [](HGraph*) {}, true, expected_result); + RunCode(target_config, graph, [](HGraph*) {}, true, expected_result); } TEST_F(CodegenTest, ComparisonsInt) { - for (InstructionSet target_isa : GetTargetISAs()) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { for (int64_t i = -1; i <= 1; i++) { for (int64_t j = -1; j <= 1; j++) { - TestComparison(kCondEQ, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondNE, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondLT, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondLE, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondGT, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondGE, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondB, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondBE, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondA, i, j, Primitive::kPrimInt, target_isa); - TestComparison(kCondAE, i, j, Primitive::kPrimInt, target_isa); + for (int cond = kCondFirst; cond <= kCondLast; cond++) { + TestComparison(static_cast<IfCondition>(cond), i, j, Primitive::kPrimInt, target_config); + } } } } @@ -969,23 +1012,17 @@ TEST_F(CodegenTest, ComparisonsLong) { return; } - for (InstructionSet target_isa : GetTargetISAs()) { - if (target_isa == kMips || target_isa == kMips64) { + for (CodegenTargetConfig target_config : GetTargetConfigs()) { + if ((target_config.GetInstructionSet() == kMips) || + (target_config.GetInstructionSet() == kMips64)) { continue; } for (int64_t i = -1; i <= 1; i++) { for (int64_t j = -1; j <= 1; j++) { - TestComparison(kCondEQ, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondNE, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondLT, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondLE, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondGT, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondGE, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondB, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondBE, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondA, i, j, Primitive::kPrimLong, target_isa); - TestComparison(kCondAE, i, j, Primitive::kPrimLong, target_isa); + for (int cond = kCondFirst; cond <= kCondLast; cond++) { + TestComparison(static_cast<IfCondition>(cond), i, j, Primitive::kPrimLong, target_config); + } } } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 149a71d1b9..119b62a16b 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -109,6 +109,9 @@ enum IfCondition { kCondBE, // <= kCondA, // > kCondAE, // >= + // First and last aliases. + kCondFirst = kCondEQ, + kCondLast = kCondAE, }; enum GraphAnalysisResult { @@ -1311,7 +1314,8 @@ class HLoopInformationOutwardIterator : public ValueObject { #else #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \ M(MipsComputeBaseMethodAddress, Instruction) \ - M(MipsDexCacheArraysBase, Instruction) + M(MipsDexCacheArraysBase, Instruction) \ + M(MipsPackedSwitch, Instruction) #endif #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h index de77245e17..36431c1fb9 100644 --- a/compiler/optimizing/nodes_mips.h +++ b/compiler/optimizing/nodes_mips.h @@ -66,6 +66,41 @@ class HMipsDexCacheArraysBase : public HExpression<0> { DISALLOW_COPY_AND_ASSIGN(HMipsDexCacheArraysBase); }; +// Mips version of HPackedSwitch that holds a pointer to the base method address. +class HMipsPackedSwitch FINAL : public HTemplateInstruction<2> { + public: + HMipsPackedSwitch(int32_t start_value, + int32_t num_entries, + HInstruction* input, + HMipsComputeBaseMethodAddress* method_base, + uint32_t dex_pc) + : HTemplateInstruction(SideEffects::None(), dex_pc), + start_value_(start_value), + num_entries_(num_entries) { + SetRawInputAt(0, input); + SetRawInputAt(1, method_base); + } + + bool IsControlFlow() const OVERRIDE { return true; } + + int32_t GetStartValue() const { return start_value_; } + + int32_t GetNumEntries() const { return num_entries_; } + + HBasicBlock* GetDefaultBlock() const { + // Last entry is the default block. + return GetBlock()->GetSuccessors()[num_entries_]; + } + + DECLARE_INSTRUCTION(MipsPackedSwitch); + + private: + const int32_t start_value_; + const int32_t num_entries_; + + DISALLOW_COPY_AND_ASSIGN(HMipsPackedSwitch); +}; + } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_MIPS_H_ diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc index c6d297df4f..6006e6cf5d 100644 --- a/compiler/optimizing/pc_relative_fixups_mips.cc +++ b/compiler/optimizing/pc_relative_fixups_mips.cc @@ -92,6 +92,25 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } + void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE { + if (switch_insn->GetNumEntries() <= + InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) { + return; + } + // We need to replace the HPackedSwitch with a HMipsPackedSwitch in order to + // address the constant area. + InitializePCRelativeBasePointer(); + HGraph* graph = GetGraph(); + HBasicBlock* block = switch_insn->GetBlock(); + HMipsPackedSwitch* mips_switch = new (graph->GetArena()) HMipsPackedSwitch( + switch_insn->GetStartValue(), + switch_insn->GetNumEntries(), + switch_insn->InputAt(0), + base_, + switch_insn->GetDexPc()); + block->ReplaceAndRemoveInstructionWith(switch_insn, mips_switch); + } + void HandleInvoke(HInvoke* invoke) { // If this is an invoke-static/-direct with PC-relative dex cache array // addressing, we need the PC-relative address base. |