diff options
Diffstat (limited to 'compiler')
23 files changed, 402 insertions, 135 deletions
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h index eb897f00e3..d1d5ad9715 100644 --- a/compiler/dex/bb_optimizations.h +++ b/compiler/dex/bb_optimizations.h @@ -71,26 +71,28 @@ class CacheMethodLoweringInfo : public PassME { }; /** - * @class CallInlining - * @brief Perform method inlining pass. + * @class SpecialMethodInliner + * @brief Performs method inlining pass on special kinds of methods. + * @details Special methods are methods that fall in one of the following categories: + * empty, instance getter, instance setter, argument return, and constant return. */ -class CallInlining : public PassME { +class SpecialMethodInliner : public PassME { public: - CallInlining() : PassME("CallInlining") { + SpecialMethodInliner() : PassME("SpecialMethodInliner") { } bool Gate(const PassDataHolder* data) const { DCHECK(data != nullptr); CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit; DCHECK(cUnit != nullptr); - return cUnit->mir_graph->InlineCallsGate(); + return cUnit->mir_graph->InlineSpecialMethodsGate(); } void Start(PassDataHolder* data) const { DCHECK(data != nullptr); CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit; DCHECK(cUnit != nullptr); - cUnit->mir_graph->InlineCallsStart(); + cUnit->mir_graph->InlineSpecialMethodsStart(); } bool Worker(const PassDataHolder* data) const { @@ -100,7 +102,7 @@ class CallInlining : public PassME { DCHECK(cUnit != nullptr); BasicBlock* bb = pass_me_data_holder->bb; DCHECK(bb != nullptr); - cUnit->mir_graph->InlineCalls(bb); + cUnit->mir_graph->InlineSpecialMethods(bb); // No need of repeating, so just return false. return false; } @@ -109,7 +111,7 @@ class CallInlining : public PassME { DCHECK(data != nullptr); CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit; DCHECK(cUnit != nullptr); - cUnit->mir_graph->InlineCallsEnd(); + cUnit->mir_graph->InlineSpecialMethodsEnd(); } }; diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index b16cf14b02..711743d69b 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -97,14 +97,6 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = { // 2 = kArm64. TODO(Arm64): enable optimizations once backend is mature enough. (1 << kLoadStoreElimination) | (1 << kLoadHoisting) | - (1 << kSuppressLoads) | - (1 << kClassInitCheckElimination) | - (1 << kTrackLiveTemps) | - (1 << kSafeOptimizations) | - (1 << kBBOpt) | - (1 << kMatch) | - (1 << kPromoteCompilerTemps) | - (1 << kSuppressExceptionEdges) | 0, // 3 = kThumb2. 0, @@ -582,7 +574,7 @@ static bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, // Check if we support the byte code. if (std::find(unsupport_list, unsupport_list + unsupport_list_size, opcode) != unsupport_list + unsupport_list_size) { - if (!cu.mir_graph->IsPseudoMirOp(opcode)) { + if (!MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { VLOG(compiler) << "Unsupported dalvik byte code : " << mir->dalvikInsn.opcode; } else { diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc index e372206228..3de448344a 100644 --- a/compiler/dex/mir_analysis.cc +++ b/compiler/dex/mir_analysis.cc @@ -902,7 +902,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) { while (!done) { tbb->visited = true; for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) { - if (IsPseudoMirOp(mir->dalvikInsn.opcode)) { + if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) { // Skip any MIR pseudo-op. continue; } diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index bc99a272a6..b82c5c7f00 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -909,6 +909,16 @@ void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) { def_v->SetBit(dalvik_reg_id); } +void MIRGraph::HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v, + ArenaBitVector* live_in_v, + const MIR::DecodedInstruction& d_insn) { + switch (static_cast<int>(d_insn.opcode)) { + default: + LOG(ERROR) << "Unexpected Extended Opcode " << d_insn.opcode; + break; + } +} + /* * Find out live-in variables for natural loops. Variables that are live-in in * the main loop body are considered to be defined in the entry block. @@ -966,6 +976,9 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) { HandleDef(def_v, d_insn->vA+1); } } + if (df_attributes & DF_FORMAT_EXTENDED) { + HandleExtended(use_v, def_v, live_in_v, mir->dalvikInsn); + } } return true; } @@ -1048,6 +1061,14 @@ void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) { } } +void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) { + switch (static_cast<int>(mir->dalvikInsn.opcode)) { + default: + LOG(ERROR) << "Missing case for extended MIR: " << mir->dalvikInsn.opcode; + break; + } +} + /* Entry function to convert a block into SSA representation */ bool MIRGraph::DoSSAConversion(BasicBlock* bb) { MIR* mir; @@ -1063,7 +1084,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) { uint64_t df_attributes = GetDataFlowAttributes(mir); // If not a pseudo-op, note non-leaf or can throw - if (!IsPseudoMirOp(mir->dalvikInsn.opcode)) { + if (!MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) { int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode); if ((flags & Instruction::kInvoke) != 0 && (mir->optimization_flags & MIR_INLINED) == 0) { @@ -1083,6 +1104,11 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) { continue; } + if (df_attributes & DF_FORMAT_EXTENDED) { + DataFlowSSAFormatExtended(mir); + continue; + } + if (df_attributes & DF_HAS_USES) { if (df_attributes & DF_UA) { num_uses++; diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 4fbace26e7..1c8a9b5079 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -193,14 +193,16 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, bottom_block->successor_block_list_type = orig_block->successor_block_list_type; bottom_block->successor_blocks = orig_block->successor_blocks; orig_block->successor_block_list_type = kNotUsed; - orig_block->successor_blocks = NULL; + orig_block->successor_blocks = nullptr; GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_blocks); while (true) { SuccessorBlockInfo* successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == nullptr) break; BasicBlock* bb = GetBasicBlock(successor_block_info->block); - bb->predecessors->Delete(orig_block->id); - bb->predecessors->Insert(bottom_block->id); + if (bb != nullptr) { + bb->predecessors->Delete(orig_block->id); + bb->predecessors->Insert(bottom_block->id); + } } } @@ -222,7 +224,7 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, DCHECK(insn == bottom_block->first_mir_insn); DCHECK_EQ(insn->offset, bottom_block->start_offset); DCHECK(static_cast<int>(insn->dalvikInsn.opcode) == kMirOpCheck || - !IsPseudoMirOp(insn->dalvikInsn.opcode)); + !MIR::DecodedInstruction::IsPseudoMirOp(insn->dalvikInsn.opcode)); DCHECK_EQ(dex_pc_to_block_map_.Get(insn->offset), orig_block->id); MIR* p = insn; dex_pc_to_block_map_.Put(p->offset, bottom_block->id); @@ -237,7 +239,7 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, * CHECK and work portions. Since the 2nd half of a split operation is always * the first in a BasicBlock, we can't hit it here. */ - if ((opcode == kMirOpCheck) || !IsPseudoMirOp(opcode)) { + if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { DCHECK_EQ(dex_pc_to_block_map_.Get(p->offset), orig_block->id); dex_pc_to_block_map_.Put(p->offset, bottom_block->id); } @@ -861,11 +863,17 @@ uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) { /* Dump the CFG into a DOT graph */ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) { FILE* file; + static AtomicInteger cnt(0); + + // Increment counter to get a unique file number. + cnt++; + std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file)); ReplaceSpecialChars(fname); - fname = StringPrintf("%s%s%x%s.dot", dir_prefix, fname.c_str(), + fname = StringPrintf("%s%s%x%s_%d.dot", dir_prefix, fname.c_str(), GetBasicBlock(GetEntryBlock()->fall_through)->start_offset, - suffix == nullptr ? "" : suffix); + suffix == nullptr ? "" : suffix, + cnt.LoadRelaxed()); file = fopen(fname.c_str(), "w"); if (file == NULL) { return; @@ -882,6 +890,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff BasicBlock* bb = GetBasicBlock(block_idx); if (bb == NULL) continue; if (bb->block_type == kDead) continue; + if (bb->hidden) continue; if (bb->block_type == kEntryBlock) { fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id); } else if (bb->block_type == kExitBlock) { @@ -916,7 +925,8 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff } else { fprintf(file, " {%04x %s %s %s %s\\l}%s\\\n", mir->offset, mir->ssa_rep ? GetDalvikDisassembly(mir) : - !IsPseudoMirOp(opcode) ? Instruction::Name(mir->dalvikInsn.opcode) : + !MIR::DecodedInstruction::IsPseudoMirOp(opcode) ? + Instruction::Name(mir->dalvikInsn.opcode) : extended_mir_op_names_[opcode - kMirOpFirst], (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ", (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ", @@ -1222,7 +1232,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { nop = true; } - if (IsPseudoMirOp(opcode)) { + if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { str.append(extended_mir_op_names_[opcode - kMirOpFirst]); } else { dalvik_format = Instruction::FormatOf(insn.opcode); @@ -1693,11 +1703,13 @@ BasicBlock* ChildBlockIterator::Next() { // We visited both taken and fallthrough. Now check if we have successors we need to visit. if (have_successors_ == true) { // Get information about next successor block. - SuccessorBlockInfo* successor_block_info = successor_iter_.Next(); - - // If we don't have anymore successors, return nullptr. - if (successor_block_info != nullptr) { - return mir_graph_->GetBasicBlock(successor_block_info->block); + for (SuccessorBlockInfo* successor_block_info = successor_iter_.Next(); + successor_block_info != nullptr; + successor_block_info = successor_iter_.Next()) { + // If block was replaced by zero block, take next one. + if (successor_block_info->block != NullBasicBlockId) { + return mir_graph_->GetBasicBlock(successor_block_info->block); + } } } diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index d09732891c..1556a19da7 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -80,6 +80,7 @@ enum DataFlowAttributePos { kSetsConst, kFormat35c, kFormat3rc, + kFormatExtended, // Extended format for extended MIRs. kNullCheckSrc0, // Null check of uses[0]. kNullCheckSrc1, // Null check of uses[1]. kNullCheckSrc2, // Null check of uses[2]. @@ -118,6 +119,7 @@ enum DataFlowAttributePos { #define DF_SETS_CONST (UINT64_C(1) << kSetsConst) #define DF_FORMAT_35C (UINT64_C(1) << kFormat35c) #define DF_FORMAT_3RC (UINT64_C(1) << kFormat3rc) +#define DF_FORMAT_EXTENDED (UINT64_C(1) << kFormatExtended) #define DF_NULL_CHK_0 (UINT64_C(1) << kNullCheckSrc0) #define DF_NULL_CHK_1 (UINT64_C(1) << kNullCheckSrc1) #define DF_NULL_CHK_2 (UINT64_C(1) << kNullCheckSrc2) @@ -284,34 +286,46 @@ struct MIR { */ bool GetConstant(int64_t* ptr_value, bool* wide) const; + static bool IsPseudoMirOp(Instruction::Code opcode) { + return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst); + } + + static bool IsPseudoMirOp(int opcode) { + return opcode >= static_cast<int>(kMirOpFirst); + } + + bool IsInvoke() const { + return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kInvoke) == Instruction::kInvoke); + } + bool IsStore() const { - return ((Instruction::FlagsOf(opcode) & Instruction::kStore) == Instruction::kStore); + return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kStore) == Instruction::kStore); } bool IsLoad() const { - return ((Instruction::FlagsOf(opcode) & Instruction::kLoad) == Instruction::kLoad); + return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kLoad) == Instruction::kLoad); } bool IsConditionalBranch() const { - return (Instruction::FlagsOf(opcode) == (Instruction::kContinue | Instruction::kBranch)); + return !IsPseudoMirOp(opcode) && (Instruction::FlagsOf(opcode) == (Instruction::kContinue | Instruction::kBranch)); } /** * @brief Is the register C component of the decoded instruction a constant? */ bool IsCFieldOrConstant() const { - return ((Instruction::FlagsOf(opcode) & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant); + return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant); } /** * @brief Is the register C component of the decoded instruction a constant? */ bool IsBFieldOrConstant() const { - return ((Instruction::FlagsOf(opcode) & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant); + return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant); } bool IsCast() const { - return ((Instruction::FlagsOf(opcode) & Instruction::kCast) == Instruction::kCast); + return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kCast) == Instruction::kCast); } /** @@ -321,11 +335,11 @@ struct MIR { * when crossing such an instruction. */ bool Clobbers() const { - return ((Instruction::FlagsOf(opcode) & Instruction::kClobber) == Instruction::kClobber); + return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kClobber) == Instruction::kClobber); } bool IsLinear() const { - return (Instruction::FlagsOf(opcode) & (Instruction::kAdd | Instruction::kSubtract)) != 0; + return !IsPseudoMirOp(opcode) && (Instruction::FlagsOf(opcode) & (Instruction::kAdd | Instruction::kSubtract)) != 0; } } dalvikInsn; @@ -877,14 +891,6 @@ class MIRGraph { return backward_branches_ + forward_branches_; } - static bool IsPseudoMirOp(Instruction::Code opcode) { - return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst); - } - - static bool IsPseudoMirOp(int opcode) { - return opcode >= static_cast<int>(kMirOpFirst); - } - // Is this vreg in the in set? bool IsInVReg(int vreg) { return (vreg >= cu_->num_regs); @@ -956,10 +962,10 @@ class MIRGraph { void ComputeTopologicalSortOrder(); BasicBlock* CreateNewBB(BBType block_type); - bool InlineCallsGate(); - void InlineCallsStart(); - void InlineCalls(BasicBlock* bb); - void InlineCallsEnd(); + bool InlineSpecialMethodsGate(); + void InlineSpecialMethodsStart(); + void InlineSpecialMethods(BasicBlock* bb); + void InlineSpecialMethodsEnd(); /** * @brief Perform the initial preparation for the Method Uses. @@ -1059,6 +1065,9 @@ class MIRGraph { void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v, ArenaBitVector* live_in_v, int dalvik_reg_id); void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id); + void HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v, + ArenaBitVector* live_in_v, + const MIR::DecodedInstruction& d_insn); bool DoSSAConversion(BasicBlock* bb); bool InvokeUsesMethodStar(MIR* mir); int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction); @@ -1080,6 +1089,7 @@ class MIRGraph { void HandleSSAUse(int* uses, int dalvik_reg, int reg_index); void DataFlowSSAFormat35C(MIR* mir); void DataFlowSSAFormat3RC(MIR* mir); + void DataFlowSSAFormatExtended(MIR* mir); bool FindLocalLiveIn(BasicBlock* bb); bool VerifyPredInfo(BasicBlock* bb); BasicBlock* NeedsVisit(BasicBlock* bb); diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index dc1057f277..869c48f66c 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -137,7 +137,7 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) { break; } // Keep going if pseudo op, otherwise terminate - if (IsPseudoMirOp(mir->dalvikInsn.opcode)) { + if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) { mir = AdvanceMIR(&tbb, mir); } else { mir = NULL; @@ -877,7 +877,7 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) { struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through); for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL; tmir =tmir->next) { - if (IsPseudoMirOp(tmir->dalvikInsn.opcode)) { + if (MIR::DecodedInstruction::IsPseudoMirOp(tmir->dalvikInsn.opcode)) { continue; } // First non-pseudo should be MOVE_RESULT_OBJECT @@ -1220,7 +1220,7 @@ void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, iget_or_iput->meta.ifield_lowering_info = field_info_index; } -bool MIRGraph::InlineCallsGate() { +bool MIRGraph::InlineSpecialMethodsGate() { if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 || method_lowering_infos_.Size() == 0u) { return false; @@ -1232,7 +1232,7 @@ bool MIRGraph::InlineCallsGate() { return true; } -void MIRGraph::InlineCallsStart() { +void MIRGraph::InlineSpecialMethodsStart() { // Prepare for inlining getters/setters. Since we're inlining at most 1 IGET/IPUT from // each INVOKE, we can index the data by the MIR::meta::method_lowering_info index. @@ -1246,12 +1246,12 @@ void MIRGraph::InlineCallsStart() { temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray)); } -void MIRGraph::InlineCalls(BasicBlock* bb) { +void MIRGraph::InlineSpecialMethods(BasicBlock* bb) { if (bb->block_type != kDalvikByteCode) { return; } for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { - if (IsPseudoMirOp(mir->dalvikInsn.opcode)) { + if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) { continue; } if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) { @@ -1270,17 +1270,17 @@ void MIRGraph::InlineCalls(BasicBlock* bb) { MethodReference target = method_info.GetTargetMethod(); if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file) ->GenInline(this, bb, mir, target.dex_method_index)) { - if (cu_->verbose) { - LOG(INFO) << "In \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file) - << "\" @0x" << std::hex << mir->offset - << " inlined " << method_info.GetInvokeType() << " (" << sharp_type << ") call to \"" - << PrettyMethod(target.dex_method_index, *target.dex_file) << "\""; + if (cu_->verbose || cu_->print_pass) { + LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " (" + << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file) + << "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file) + << "\" @0x" << std::hex << mir->offset; } } } } -void MIRGraph::InlineCallsEnd() { +void MIRGraph::InlineSpecialMethodsEnd() { DCHECK(temp_insn_data_ != nullptr); temp_insn_data_ = nullptr; DCHECK(temp_bit_vector_ != nullptr); diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc index 4c9bed65dc..c72a4a667e 100644 --- a/compiler/dex/pass_driver_me_opts.cc +++ b/compiler/dex/pass_driver_me_opts.cc @@ -35,7 +35,7 @@ template<> const Pass* const PassDriver<PassDriverMEOpts>::g_passes[] = { GetPassInstance<CacheFieldLoweringInfo>(), GetPassInstance<CacheMethodLoweringInfo>(), - GetPassInstance<CallInlining>(), + GetPassInstance<SpecialMethodInliner>(), GetPassInstance<CodeLayout>(), GetPassInstance<NullCheckEliminationAndTypeInference>(), GetPassInstance<ClassInitCheckElimination>(), diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index e8f5cb9f09..3ee3e2e61d 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -91,17 +91,121 @@ void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { RegLocation rl_dest = mir_graph_->GetDest(mir); RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg; RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg; + rl_src = LoadValue(rl_src, src_reg_class); + // rl_src may be aliased with rl_result/rl_dest, so do compare early. + OpRegImm(kOpCmp, rl_src.reg, 0); + ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode); - RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]]; - RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]]; - rl_true = LoadValue(rl_true, result_reg_class); - rl_false = LoadValue(rl_false, result_reg_class); - rl_result = EvalLoc(rl_dest, result_reg_class, true); - OpRegImm(kOpCmp, rl_src.reg, 0); - NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rl_true.reg.GetReg(), - rl_false.reg.GetReg(), code); + // The kMirOpSelect has two variants, one for constants and one for moves. + bool is_wide = rl_dest.ref || rl_dest.wide; + + if (mir->ssa_rep->num_uses == 1) { + uint32_t true_val = mir->dalvikInsn.vB; + uint32_t false_val = mir->dalvikInsn.vC; + + int opcode; // The opcode. + int left_op, right_op; // The operands. + bool rl_result_evaled = false; + + // Check some simple cases. + // TODO: Improve this. + int zero_reg = (is_wide ? rs_xzr : rs_wzr).GetReg(); + + if ((true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0)) { + // CSInc cheap based on wzr. + if (true_val == 1) { + // Negate. + code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); + } + + left_op = right_op = zero_reg; + opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc; + } else if ((true_val == 0 && false_val == 0xFFFFFFFF) || + (true_val == 0xFFFFFFFF && false_val == 0)) { + // CSneg cheap based on wzr. + if (true_val == 0xFFFFFFFF) { + // Negate. + code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); + } + + left_op = right_op = zero_reg; + opcode = is_wide ? WIDE(kA64Csneg4rrrc) : kA64Csneg4rrrc; + } else if (true_val == 0 || false_val == 0) { + // Csel half cheap based on wzr. + rl_result = EvalLoc(rl_dest, result_reg_class, true); + rl_result_evaled = true; + if (false_val == 0) { + // Negate. + code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); + } + LoadConstantNoClobber(rl_result.reg, true_val == 0 ? false_val : true_val); + left_op = zero_reg; + right_op = rl_result.reg.GetReg(); + opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc; + } else if (true_val == 1 || false_val == 1) { + // CSInc half cheap based on wzr. + rl_result = EvalLoc(rl_dest, result_reg_class, true); + rl_result_evaled = true; + if (true_val == 1) { + // Negate. + code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); + } + LoadConstantNoClobber(rl_result.reg, true_val == 1 ? false_val : true_val); + left_op = rl_result.reg.GetReg(); + right_op = zero_reg; + opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc; + } else if (true_val == 0xFFFFFFFF || false_val == 0xFFFFFFFF) { + // CSneg half cheap based on wzr. + rl_result = EvalLoc(rl_dest, result_reg_class, true); + rl_result_evaled = true; + if (true_val == 0xFFFFFFFF) { + // Negate. + code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); + } + LoadConstantNoClobber(rl_result.reg, true_val == 0xFFFFFFFF ? false_val : true_val); + left_op = rl_result.reg.GetReg(); + right_op = zero_reg; + opcode = is_wide ? WIDE(kA64Csneg4rrrc) : kA64Csneg4rrrc; + } else { + // Csel. The rest. Use rl_result and a temp. + // TODO: To minimize the constants being loaded, check whether one can be inexpensively + // loaded as n - 1 or ~n. + rl_result = EvalLoc(rl_dest, result_reg_class, true); + rl_result_evaled = true; + LoadConstantNoClobber(rl_result.reg, true_val); + RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class); + if (rl_dest.wide) { + if (t_reg2.Is32Bit()) { + t_reg2 = As64BitReg(t_reg2); + } + } + LoadConstantNoClobber(t_reg2, false_val); + + // Use csel. + left_op = rl_result.reg.GetReg(); + right_op = t_reg2.GetReg(); + opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc; + } + + if (!rl_result_evaled) { + rl_result = EvalLoc(rl_dest, result_reg_class, true); + } + + NewLIR4(opcode, rl_result.reg.GetReg(), left_op, right_op, code); + } else { + RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]]; + RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]]; + + rl_true = LoadValue(rl_true, result_reg_class); + rl_false = LoadValue(rl_false, result_reg_class); + rl_result = EvalLoc(rl_dest, result_reg_class, true); + + int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc; + NewLIR4(opcode, rl_result.reg.GetReg(), + rl_true.reg.GetReg(), rl_false.reg.GetReg(), code); + } StoreValue(rl_dest, rl_result); } @@ -110,7 +214,6 @@ void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2); LIR* taken = &block_label_list_[bb->taken]; LIR* not_taken = &block_label_list_[bb->fall_through]; - rl_src1 = LoadValueWide(rl_src1, kCoreReg); // Normalize such that if either operand is constant, src2 will be constant. ConditionCode ccode = mir->meta.ccode; if (rl_src1.is_const) { @@ -118,16 +221,22 @@ void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { ccode = FlipComparisonOrder(ccode); } + rl_src1 = LoadValueWide(rl_src1, kCoreReg); + if (rl_src2.is_const) { - rl_src2 = UpdateLocWide(rl_src2); + // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.) + int64_t val = mir_graph_->ConstantValueWide(rl_src2); // Special handling using cbz & cbnz. if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) { OpCmpImmBranch(ccode, rl_src1.reg, 0, taken); OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken); return; + } + // Only handle Imm if src2 is not already in a register. - } else if (rl_src2.location != kLocPhysReg) { + rl_src2 = UpdateLocWide(rl_src2); + if (rl_src2.location != kLocPhysReg) { OpRegImm64(kOpCmp, rl_src1.reg, val); OpCondBranch(ccode, taken); OpCondBranch(NegateComparison(ccode), not_taken); diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 5870d22208..048aca3735 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1046,9 +1046,19 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() { } // Push a marker to take place of lr. vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); - // fp regs already sorted. - for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { - vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment); + if (cu_->instruction_set == kThumb2) { + // fp regs already sorted. + for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { + vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment); + } + } else { + // For other platforms regs may have been inserted out of order - sort first. + std::sort(fp_vmap_table_.begin(), fp_vmap_table_.end()); + for (size_t i = 0 ; i < fp_vmap_table_.size(); ++i) { + // Copy, stripping out the phys register sort key. + vmap_encoder.PushBackUnsigned( + ~(-1 << VREG_NUM_WIDTH) & (fp_vmap_table_[i] + VmapTable::kEntryAdjustment)); + } } } else { DCHECK_EQ(POPCOUNT(core_spill_mask_), 0); diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index 6191e4b0a1..45dd7f08a6 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -96,7 +96,7 @@ MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) { uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) { DCHECK_LT(arg, invoke->dalvikInsn.vA); - DCHECK(!MIRGraph::IsPseudoMirOp(invoke->dalvikInsn.opcode)); + DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode)); if (Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc) { return invoke->dalvikInsn.vC + arg; // Non-range invoke. } else { @@ -107,7 +107,7 @@ uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) { bool WideArgIsInConsecutiveDalvikRegs(MIR* invoke, uint32_t arg) { DCHECK_LT(arg + 1, invoke->dalvikInsn.vA); - DCHECK(!MIRGraph::IsPseudoMirOp(invoke->dalvikInsn.opcode)); + DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode)); return Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc || invoke->dalvikInsn.arg[arg + 1u] == invoke->dalvikInsn.arg[arg] + 1u; } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index caadc0ad89..07c615f342 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -1185,7 +1185,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { work_half->meta.throw_insn = mir; } - if (MIRGraph::IsPseudoMirOp(opcode)) { + if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { HandleExtendedMethodMIR(bb, mir); continue; } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 48855012c3..87509b636c 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -531,7 +531,7 @@ class Mir2Lir : public Backend { LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast, LIR* cont = nullptr) : m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) { - m2l->StartSlowPath(cont); + m2l->StartSlowPath(this); } virtual ~LIRSlowPath() {} virtual void Compile() = 0; @@ -705,17 +705,17 @@ class Mir2Lir : public Backend { int AssignLiteralOffset(CodeOffset offset); int AssignSwitchTablesOffset(CodeOffset offset); int AssignFillArrayDataOffset(CodeOffset offset); - LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); + virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec); void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec); - virtual void StartSlowPath(LIR *label) {} + virtual void StartSlowPath(LIRSlowPath* slowpath) {} virtual void BeginInvoke(CallInfo* info) {} virtual void EndInvoke(CallInfo* info) {} // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated. - RegLocation NarrowRegLoc(RegLocation loc); + virtual RegLocation NarrowRegLoc(RegLocation loc); // Shared by all targets - implemented in local_optimizations.cc void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src); @@ -763,7 +763,7 @@ class Mir2Lir : public Backend { virtual bool IsTemp(RegStorage reg); bool IsPromoted(RegStorage reg); bool IsDirty(RegStorage reg); - void LockTemp(RegStorage reg); + virtual void LockTemp(RegStorage reg); void ResetDef(RegStorage reg); void NullifyRange(RegStorage reg, int s_reg); void MarkDef(RegLocation rl, LIR *start, LIR *finish); diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 9000514856..8e2a1e3532 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -234,8 +234,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { NewLIR0(kPseudoMethodEntry); /* Spill core callee saves */ SpillCoreRegs(); - /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */ - DCHECK_EQ(num_fp_spills_, 0); + SpillFPRegs(); if (!skip_overflow_check) { class StackOverflowSlowPath : public LIRSlowPath { public: @@ -309,6 +308,7 @@ void X86Mir2Lir::GenExitSequence() { NewLIR0(kPseudoMethodExit); UnSpillCoreRegs(); + UnSpillFPRegs(); /* Remove frame except for return address */ stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set)); NewLIR0(kX86Ret); diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index ff7b30eeec..b0c54e86e9 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -319,6 +319,8 @@ class X86Mir2Lir : public Mir2Lir { void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset); void SpillCoreRegs(); void UnSpillCoreRegs(); + void UnSpillFPRegs(); + void SpillFPRegs(); static const X86EncodingMap EncodingMap[kX86Last]; bool InexpensiveConstantInt(int32_t value); bool InexpensiveConstantFloat(int32_t value); diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index e81f505f2f..1ebbbbd5ee 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -52,6 +52,13 @@ static constexpr RegStorage dp_regs_arr_64[] = { rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 }; +static constexpr RegStorage xp_regs_arr_32[] = { + rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, +}; +static constexpr RegStorage xp_regs_arr_64[] = { + rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, + rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 +}; static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; @@ -60,6 +67,24 @@ static constexpr RegStorage core_temps_arr_64[] = { rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, rs_r8, rs_r9, rs_r10, rs_r11 }; + +// How to add register to be available for promotion: +// 1) Remove register from array defining temp +// 2) Update ClobberCallerSave +// 3) Update JNI compiler ABI: +// 3.1) add reg in JniCallingConvention method +// 3.2) update CoreSpillMask/FpSpillMask +// 4) Update entrypoints +// 4.1) Update constants in asm_support_x86_64.h for new frame size +// 4.2) Remove entry in SmashCallerSaves +// 4.3) Update jni_entrypoints to spill/unspill new callee save reg +// 4.4) Update quick_entrypoints to spill/unspill new callee save reg +// 5) Update runtime ABI +// 5.1) Update quick_method_frame_info with new required spills +// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms +// Note that you cannot use register corresponding to incoming args +// according to ABI and QCG needs one additional XMM temp for +// bulk copy in preparation to call. static constexpr RegStorage core_temps_arr_64q[] = { rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, rs_r8q, rs_r9q, rs_r10q, rs_r11q @@ -69,14 +94,14 @@ static constexpr RegStorage sp_temps_arr_32[] = { }; static constexpr RegStorage sp_temps_arr_64[] = { rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, - rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 + rs_fr8, rs_fr9, rs_fr10, rs_fr11 }; static constexpr RegStorage dp_temps_arr_32[] = { rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, }; static constexpr RegStorage dp_temps_arr_64[] = { rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, - rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 + rs_dr8, rs_dr9, rs_dr10, rs_dr11 }; static constexpr RegStorage xp_temps_arr_32[] = { @@ -84,7 +109,7 @@ static constexpr RegStorage xp_temps_arr_32[] = { }; static constexpr RegStorage xp_temps_arr_64[] = { rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, - rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 + rs_xr8, rs_xr9, rs_xr10, rs_xr11 }; static constexpr ArrayRef<const RegStorage> empty_pool; @@ -95,6 +120,8 @@ static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); +static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32); +static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64); static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); @@ -437,21 +464,13 @@ bool X86Mir2Lir::IsByteRegister(RegStorage reg) { /* Clobber all regs that might be used by an external C call */ void X86Mir2Lir::ClobberCallerSave() { - Clobber(rs_rAX); - Clobber(rs_rCX); - Clobber(rs_rDX); - Clobber(rs_rBX); - - Clobber(rs_fr0); - Clobber(rs_fr1); - Clobber(rs_fr2); - Clobber(rs_fr3); - Clobber(rs_fr4); - Clobber(rs_fr5); - Clobber(rs_fr6); - Clobber(rs_fr7); - if (cu_->target64) { + Clobber(rs_rAX); + Clobber(rs_rCX); + Clobber(rs_rDX); + Clobber(rs_rSI); + Clobber(rs_rDI); + Clobber(rs_r8); Clobber(rs_r9); Clobber(rs_r10); @@ -461,11 +480,21 @@ void X86Mir2Lir::ClobberCallerSave() { Clobber(rs_fr9); Clobber(rs_fr10); Clobber(rs_fr11); - Clobber(rs_fr12); - Clobber(rs_fr13); - Clobber(rs_fr14); - Clobber(rs_fr15); + } else { + Clobber(rs_rAX); + Clobber(rs_rCX); + Clobber(rs_rDX); + Clobber(rs_rBX); } + + Clobber(rs_fr0); + Clobber(rs_fr1); + Clobber(rs_fr2); + Clobber(rs_fr3); + Clobber(rs_fr4); + Clobber(rs_fr5); + Clobber(rs_fr6); + Clobber(rs_fr7); } RegLocation X86Mir2Lir::GetReturnWideAlt() { @@ -599,11 +628,15 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() { // Target-specific adjustments. // Add in XMM registers. - const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; - for (RegStorage reg : *xp_temps) { + const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32; + for (RegStorage reg : *xp_regs) { RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); reginfo_map_.Put(reg.GetReg(), info); - info->SetIsTemp(true); + } + const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; + for (RegStorage reg : *xp_temps) { + RegisterInfo* xp_reg_info = GetRegInfo(reg); + xp_reg_info->SetIsTemp(true); } // Alias single precision xmm to double xmms. @@ -665,9 +698,11 @@ void X86Mir2Lir::SpillCoreRegs() { // Spill mask not including fake return address register uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); + OpSize size = cu_->target64 ? k64 : k32; for (int reg = 0; mask; mask >>= 1, reg++) { if (mask & 0x1) { - StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); + StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), + size, kNotVolatile); offset += GetInstructionSetPointerSize(cu_->instruction_set); } } @@ -680,14 +715,46 @@ void X86Mir2Lir::UnSpillCoreRegs() { // Spill mask not including fake return address register uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); + OpSize size = cu_->target64 ? k64 : k32; for (int reg = 0; mask; mask >>= 1, reg++) { if (mask & 0x1) { - LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); + LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), + size, kNotVolatile); offset += GetInstructionSetPointerSize(cu_->instruction_set); } } } +void X86Mir2Lir::SpillFPRegs() { + if (num_fp_spills_ == 0) { + return; + } + uint32_t mask = fp_spill_mask_; + int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); + for (int reg = 0; mask; mask >>= 1, reg++) { + if (mask & 0x1) { + StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), + k64, kNotVolatile); + offset += sizeof(double); + } + } +} +void X86Mir2Lir::UnSpillFPRegs() { + if (num_fp_spills_ == 0) { + return; + } + uint32_t mask = fp_spill_mask_; + int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); + for (int reg = 0; mask; mask >>= 1, reg++) { + if (mask & 0x1) { + LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), + k64, kNotVolatile); + offset += sizeof(double); + } + } +} + + bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); } diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 657160ffd1..5c7c91b5b5 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -917,7 +917,7 @@ void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) { for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { int opcode = mir->dalvikInsn.opcode; - if (MIRGraph::IsPseudoMirOp(opcode)) { + if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { AnalyzeExtendedMIR(opcode, bb, mir); } else { AnalyzeMIR(opcode, bb, mir); diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h index 2789923bb9..56573810ca 100644 --- a/compiler/dex/quick/x86/x86_lir.h +++ b/compiler/dex/quick/x86/x86_lir.h @@ -66,7 +66,9 @@ namespace art { * XMM6: caller | caller, arg7 | caller, scratch | caller, arg7, scratch * XMM7: caller | caller, arg8 | caller, scratch | caller, arg8, scratch * --- x86-64/x32 registers - * XMM8 .. 15: caller save available as scratch registers for ART. + * XMM8 .. 11: caller save available as scratch registers for ART. + * XMM12 .. 15: callee save available as promoted registers for ART. + * This change (XMM12..15) is for QCG only, for others they are caller save. * * X87 is a necessary evil outside of ART code for x86: * ST0: x86 float/double native return value, caller save diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index db383c4d0b..892b30284f 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -251,7 +251,8 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) { // Special-case handling for format 35c/3rc invokes Instruction::Code opcode = mir->dalvikInsn.opcode; - int flags = IsPseudoMirOp(opcode) ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode); + int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ? + 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode); if ((flags & Instruction::kInvoke) && (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) { DCHECK_EQ(next, 0); diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc index 5febed24fe..525f05c522 100644 --- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc @@ -130,6 +130,10 @@ X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_s callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R13)); callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R14)); callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R15)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM12)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM13)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM14)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM15)); } uint32_t X86_64JniCallingConvention::CoreSpillMask() const { @@ -137,6 +141,10 @@ uint32_t X86_64JniCallingConvention::CoreSpillMask() const { 1 << kNumberOfCpuRegisters; } +uint32_t X86_64JniCallingConvention::FpSpillMask() const { + return 1 << XMM12 | 1 << XMM13 | 1 << XMM14 | 1 << XMM15; +} + size_t X86_64JniCallingConvention::FrameSize() { // Method*, return address and callee save area size, local reference segment state size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) + diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h index 1ba5353289..7a90c6e94e 100644 --- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h @@ -61,9 +61,7 @@ class X86_64JniCallingConvention FINAL : public JniCallingConvention { } ManagedRegister ReturnScratchRegister() const OVERRIDE; uint32_t CoreSpillMask() const OVERRIDE; - uint32_t FpSpillMask() const OVERRIDE { - return 0; - } + uint32_t FpSpillMask() const OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 4d5d613015..78738d8934 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -1671,16 +1671,31 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, const std::vector<ManagedRegister>& spill_regs, const ManagedRegisterEntrySpills& entry_spills) { CHECK_ALIGNED(frame_size, kStackAlignment); + int gpr_count = 0; for (int i = spill_regs.size() - 1; i >= 0; --i) { - pushq(spill_regs.at(i).AsX86_64().AsCpuRegister()); + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsCpuRegister()) { + pushq(spill.AsCpuRegister()); + gpr_count++; + } } // return address then method on stack - addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(frame_size) + (spill_regs.size() * kFramePointerSize) + - sizeof(StackReference<mirror::ArtMethod>) /*method*/ + - kFramePointerSize /*return address*/)); + int64_t rest_of_frame = static_cast<int64_t>(frame_size) + - (gpr_count * kFramePointerSize) + - kFramePointerSize /*return address*/; + subq(CpuRegister(RSP), Immediate(rest_of_frame)); + // spill xmms + int64_t offset = rest_of_frame; + for (int i = spill_regs.size() - 1; i >= 0; --i) { + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsXmmRegister()) { + offset -= sizeof(double); + movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister()); + } + } DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>)); - subq(CpuRegister(RSP), Immediate(4)); + movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister()); for (size_t i = 0; i < entry_spills.size(); ++i) { @@ -1707,9 +1722,24 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, void X86_64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& spill_regs) { CHECK_ALIGNED(frame_size, kStackAlignment); - addq(CpuRegister(RSP), Immediate(static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - kFramePointerSize)); + int gpr_count = 0; + // unspill xmms + int64_t offset = static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - 2 * kFramePointerSize; for (size_t i = 0; i < spill_regs.size(); ++i) { - popq(spill_regs.at(i).AsX86_64().AsCpuRegister()); + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsXmmRegister()) { + offset += sizeof(double); + movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset)); + } else { + gpr_count++; + } + } + addq(CpuRegister(RSP), Immediate(static_cast<int64_t>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize)); + for (size_t i = 0; i < spill_regs.size(); ++i) { + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsCpuRegister()) { + popq(spill.AsCpuRegister()); + } } ret(); } diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index f7bad8b057..dc1758ffdf 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -246,11 +246,9 @@ std::string buildframe_test_fn(x86_64::X86_64Assembler* assembler) { str << "pushq %rsi\n"; str << "pushq %r10\n"; // 2) Move down the stack pointer. - ssize_t displacement = -static_cast<ssize_t>(frame_size) + spill_regs.size() * 8 + - sizeof(StackReference<mirror::ArtMethod>) + 8; - str << "addq $" << displacement << ", %rsp\n"; - // 3) Make space for method reference, and store it. - str << "subq $4, %rsp\n"; + ssize_t displacement = static_cast<ssize_t>(frame_size) - (spill_regs.size() * 8 + 8); + str << "subq $" << displacement << ", %rsp\n"; + // 3) Store method reference. str << "movl %edi, (%rsp)\n"; // 4) Entry spills. str << "movq %rax, " << frame_size + 0 << "(%rsp)\n"; |