diff options
Diffstat (limited to 'compiler')
47 files changed, 1286 insertions, 491 deletions
diff --git a/compiler/compiler.h b/compiler/compiler.h index 9b4dbe02e2..8788dc1950 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -77,9 +77,8 @@ class Compiler { * information. * @note This is used for backtrace information in generated code. */ - virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) - const { - UNUSED(driver); + virtual std::vector<uint8_t>* GetCallFrameInformationInitialization( + const CompilerDriver& driver ATTRIBUTE_UNUSED) const { return nullptr; } diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 4efe4af896..b0972d98d4 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -511,9 +511,8 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs /* Process instructions with the kSwitch flag */ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, - int width, int flags, + int width, int flags ATTRIBUTE_UNUSED, ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { - UNUSED(flags); const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + static_cast<int32_t>(insn->dalvikInsn.vB)); @@ -592,11 +591,15 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs } /* Process instructions with the kThrow flag */ -BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, - int width, int flags, ArenaBitVector* try_block_addr, - const uint16_t* code_ptr, const uint16_t* code_end, +BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, + MIR* insn, + DexOffset cur_offset, + int width, + int flags ATTRIBUTE_UNUSED, + ArenaBitVector* try_block_addr, + const uint16_t* code_ptr, + const uint16_t* code_end, ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { - UNUSED(flags); bool in_try_block = try_block_addr->IsBitSet(cur_offset); bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW); diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h index 0def056f4f..16414efada 100644 --- a/compiler/dex/pass.h +++ b/compiler/dex/pass.h @@ -53,10 +53,7 @@ class Pass { * @param data the PassDataHolder. * @return whether or not to execute the pass. */ - virtual bool Gate(const PassDataHolder* data) const { - // Unused parameter. - UNUSED(data); - + virtual bool Gate(const PassDataHolder* data ATTRIBUTE_UNUSED) const { // Base class says yes. return true; } @@ -64,17 +61,13 @@ class Pass { /** * @brief Start of the pass: called before the Worker function. */ - virtual void Start(PassDataHolder* data) const { - // Unused parameter. - UNUSED(data); + virtual void Start(PassDataHolder* data ATTRIBUTE_UNUSED) const { } /** * @brief End of the pass: called after the WalkBasicBlocks function. */ - virtual void End(PassDataHolder* data) const { - // Unused parameter. - UNUSED(data); + virtual void End(PassDataHolder* data ATTRIBUTE_UNUSED) const { } /** diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h index 8762b53af4..34a6f630f1 100644 --- a/compiler/dex/pass_driver.h +++ b/compiler/dex/pass_driver.h @@ -125,8 +125,7 @@ class PassDriver { * @brief Dispatch a patch. * Gives the ability to add logic when running the patch. */ - virtual void DispatchPass(const Pass* pass) { - UNUSED(pass); + virtual void DispatchPass(const Pass* pass ATTRIBUTE_UNUSED) { } /** @brief List of passes: provides the order to execute the passes. diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index db76cc6f53..b2bd6faca2 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -216,8 +216,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code, int32_t true_val, int32_t false_val, RegStorage rs_dest, - RegisterClass dest_reg_class) { - UNUSED(dest_reg_class); + RegisterClass dest_reg_class ATTRIBUTE_UNUSED) { // TODO: Generalize the IT below to accept more than one-instruction loads. DCHECK(InexpensiveConstantInt(true_val)); DCHECK(InexpensiveConstantInt(false_val)); @@ -239,8 +238,7 @@ void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi OpEndIT(it); } -void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { - UNUSED(bb); +void ArmMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) { RegLocation rl_result; RegLocation rl_src = mir_graph_->GetSrc(mir, 0); RegLocation rl_dest = mir_graph_->GetDest(mir); @@ -516,9 +514,8 @@ static const MagicTable magic_table[] = { }; // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4) -bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, +bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div, RegLocation rl_src, RegLocation rl_dest, int lit) { - UNUSED(dalvik_opcode); if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) { return false; } @@ -728,16 +725,19 @@ bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) return true; } -RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2, bool is_div, int flags) { - UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags); +RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + RegLocation rl_src2 ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED, + int flags ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRem for Arm"; UNREACHABLE(); } -RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, - bool is_div) { - UNUSED(rl_dest, rl_src1, lit, is_div); +RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm"; UNREACHABLE(); } @@ -1160,9 +1160,8 @@ void ArmMir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLoc } void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, - RegLocation rl_result, int lit, + RegLocation rl_result, int lit ATTRIBUTE_UNUSED, int first_bit, int second_bit) { - UNUSED(lit); OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kArmLsl, second_bit - first_bit)); if (first_bit != 0) { @@ -1257,9 +1256,8 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { StoreValueWide(rl_dest, rl_result); } -void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest, +void ArmMir2Lir::GenMulLong(Instruction::Code opcode ATTRIBUTE_UNUSED, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { - UNUSED(opcode); /* * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed * dest = src1.lo * src2.lo; @@ -1564,8 +1562,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift, - int flags) { - UNUSED(flags); + int flags ATTRIBUTE_UNUSED) { rl_src = LoadValueWide(rl_src, kCoreReg); // Per spec, we only care about low 6 bits of shift amount. int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f; diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index 5f27338e6b..355485e03b 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -987,8 +987,7 @@ int ArmMir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) { return count; } -void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { - UNUSED(bb); +void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) { DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)); RegLocation rl_src[3]; RegLocation rl_dest = mir_graph_->GetBadLoc(); diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 062f7aff66..c31f46b8fe 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -419,20 +419,26 @@ LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) return OpRegRegShift(op, r_dest_src1, r_src2, 0); } -LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) { - UNUSED(r_dest, r_base, offset, move_type); +LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED, + RegStorage r_base ATTRIBUTE_UNUSED, + int offset ATTRIBUTE_UNUSED, + MoveType move_type ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); UNREACHABLE(); } -LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { - UNUSED(r_base, offset, r_src, move_type); +LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED, + int offset ATTRIBUTE_UNUSED, + RegStorage r_src ATTRIBUTE_UNUSED, + MoveType move_type ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); UNREACHABLE(); } -LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { - UNUSED(op, cc, r_dest, r_src); +LIR* ArmMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED, + ConditionCode cc ATTRIBUTE_UNUSED, + RegStorage r_dest ATTRIBUTE_UNUSED, + RegStorage r_src ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm"; UNREACHABLE(); } @@ -1243,14 +1249,17 @@ LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { return res; } -LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { - UNUSED(op, r_base, disp); +LIR* ArmMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED, + RegStorage r_base ATTRIBUTE_UNUSED, + int disp ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpMem for Arm"; UNREACHABLE(); } -LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) { - UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt. +LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, + RegStorage r_tgt, + // The address of the trampoline is already loaded into r_tgt. + QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) { return OpReg(op, r_tgt); } diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index 31cf6675af..d92dea21c2 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -37,14 +37,12 @@ LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage s return OpCondBranch(cond, target); } -LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) { - UNUSED(ccode, guide); +LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpIT for Arm64"; UNREACHABLE(); } -void Arm64Mir2Lir::OpEndIT(LIR* it) { - UNUSED(it); +void Arm64Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpEndIT for Arm64"; } @@ -188,8 +186,7 @@ void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Con GenSelect(true_val, false_val, code, rs_dest, dest_reg_class); } -void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { - UNUSED(bb); +void Arm64Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) { RegLocation rl_src = mir_graph_->GetSrc(mir, 0); rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg); // rl_src may be aliased with rl_result/rl_dest, so do compare early. @@ -413,9 +410,11 @@ static const MagicTable magic_table[] = { }; // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4) -bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, - RegLocation rl_src, RegLocation rl_dest, int lit) { - UNUSED(dalvik_opcode); +bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, + bool is_div, + RegLocation rl_src, + RegLocation rl_dest, + int lit) { if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) { return false; } @@ -457,9 +456,11 @@ bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_d return true; } -bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, - RegLocation rl_src, RegLocation rl_dest, int64_t lit) { - UNUSED(dalvik_opcode); +bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, + bool is_div, + RegLocation rl_src, + RegLocation rl_dest, + int64_t lit) { if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) { return false; } @@ -599,15 +600,17 @@ bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_d return true; } -bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { - UNUSED(rl_src, rl_dest, lit); +bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED, + RegLocation rl_dest ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64"; UNREACHABLE(); } -RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, - bool is_div) { - UNUSED(rl_dest, rl_src1, lit, is_div); +RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64"; UNREACHABLE(); } @@ -626,9 +629,11 @@ RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int return rl_result; } -RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2, bool is_div, int flags) { - UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags); +RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + RegLocation rl_src2 ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED, + int flags ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRem for Arm64"; UNREACHABLE(); } @@ -963,14 +968,12 @@ void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, dex_cache_access_insns_.push_back(ldr); } -LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) { - UNUSED(r_base, count); +LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpVldm for Arm64"; UNREACHABLE(); } -LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) { - UNUSED(r_base, count); +LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpVstm for Arm64"; UNREACHABLE(); } diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc index 6efa11e1fd..691bfd9edd 100644 --- a/compiler/dex/quick/arm64/target_arm64.cc +++ b/compiler/dex/quick/arm64/target_arm64.cc @@ -881,8 +881,7 @@ int Arm64Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* /*info*/, int /*first*/, int c return count; } -void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { - UNUSED(bb); +void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) { DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)); RegLocation rl_src[3]; RegLocation rl_dest = mir_graph_->GetBadLoc(); diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc index 483231f931..58769ea9cc 100644 --- a/compiler/dex/quick/arm64/utility_arm64.cc +++ b/compiler/dex/quick/arm64/utility_arm64.cc @@ -672,22 +672,26 @@ LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2 } } -LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, - MoveType move_type) { - UNUSED(r_dest, r_base, offset, move_type); +LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED, + RegStorage r_base ATTRIBUTE_UNUSED, + int offset ATTRIBUTE_UNUSED, + MoveType move_type ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); UNREACHABLE(); } -LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, - MoveType move_type) { - UNUSED(r_base, offset, r_src, move_type); +LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED, + int offset ATTRIBUTE_UNUSED, + RegStorage r_src ATTRIBUTE_UNUSED, + MoveType move_type ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); return nullptr; } -LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { - UNUSED(op, cc, r_dest, r_src); +LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED, + ConditionCode cc ATTRIBUTE_UNUSED, + RegStorage r_dest ATTRIBUTE_UNUSED, + RegStorage r_src ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64"; UNREACHABLE(); } @@ -1381,14 +1385,15 @@ LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage return store; } -LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { - UNUSED(r_dest, r_src); +LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest ATTRIBUTE_UNUSED, + RegStorage r_src ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64"; UNREACHABLE(); } -LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { - UNUSED(op, r_base, disp); +LIR* Arm64Mir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED, + RegStorage r_base ATTRIBUTE_UNUSED, + int disp ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpMem for Arm64"; UNREACHABLE(); } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index dbcc868657..cde99b3fae 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -992,8 +992,7 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { } /* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */ -void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) { - UNUSED(offset); +void Mir2Lir::MarkBoundary(DexOffset offset ATTRIBUTE_UNUSED, const char* inst_str) { // NOTE: only used for debug listings. NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str))); } @@ -1358,8 +1357,8 @@ RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) { return loc; } -void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { - UNUSED(bb, mir); +void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, + MIR* mir ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unknown MIR opcode not supported on this architecture"; UNREACHABLE(); } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 2a1d64425b..2b60a51e22 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -2102,15 +2102,15 @@ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { } /* Call out to helper assembly routine that will null check obj and then lock it. */ -void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { - UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper. +void Mir2Lir::GenMonitorEnter(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) { + // TODO: avoid null check with specialized non-null helper. FlushAllRegs(); CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true); } /* Call out to helper assembly routine that will null check obj and then unlock it. */ -void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { - UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper. +void Mir2Lir::GenMonitorExit(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) { + // TODO: avoid null check with specialized non-null helper. FlushAllRegs(); CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true); } diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 3c5c2fe010..422d82ffa2 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -521,10 +521,9 @@ static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_fro * kArg1 here rather than the standard GenDalvikArgs. */ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, - int state, const MethodReference& target_method, + int state, const MethodReference& target_method ATTRIBUTE_UNUSED, uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) { - UNUSED(target_method); Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); /* * This is the fast path in which the target virtual method is @@ -607,10 +606,12 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, return state + 1; } -static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, - QuickEntrypointEnum trampoline, int state, - const MethodReference& target_method, uint32_t method_idx) { - UNUSED(info, method_idx); +static int NextInvokeInsnSP(CompilationUnit* cu, + CallInfo* info ATTRIBUTE_UNUSED, + QuickEntrypointEnum trampoline, + int state, + const MethodReference& target_method, + uint32_t method_idx ATTRIBUTE_UNUSED) { Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); /* @@ -1266,35 +1267,31 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { return true; } -bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) { +bool Mir2Lir::GenInlinedReverseBits(CallInfo* info ATTRIBUTE_UNUSED, OpSize size ATTRIBUTE_UNUSED) { // Currently implemented only for ARM64. - UNUSED(info, size); return false; } -bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) { +bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info ATTRIBUTE_UNUSED, + bool is_min ATTRIBUTE_UNUSED, + bool is_double ATTRIBUTE_UNUSED) { // Currently implemented only for ARM64. - UNUSED(info, is_min, is_double); return false; } -bool Mir2Lir::GenInlinedCeil(CallInfo* info) { - UNUSED(info); +bool Mir2Lir::GenInlinedCeil(CallInfo* info ATTRIBUTE_UNUSED) { return false; } -bool Mir2Lir::GenInlinedFloor(CallInfo* info) { - UNUSED(info); +bool Mir2Lir::GenInlinedFloor(CallInfo* info ATTRIBUTE_UNUSED) { return false; } -bool Mir2Lir::GenInlinedRint(CallInfo* info) { - UNUSED(info); +bool Mir2Lir::GenInlinedRint(CallInfo* info ATTRIBUTE_UNUSED) { return false; } -bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) { - UNUSED(info, is_double); +bool Mir2Lir::GenInlinedRound(CallInfo* info ATTRIBUTE_UNUSED, bool is_double ATTRIBUTE_UNUSED) { return false; } @@ -1328,8 +1325,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) { return true; } -bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { - UNUSED(info); +bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info ATTRIBUTE_UNUSED) { return false; } diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index 8863c058a1..4a736f3d93 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -32,9 +32,10 @@ namespace art { -bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) { +bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb ATTRIBUTE_UNUSED, + MIR* mir ATTRIBUTE_UNUSED, + const InlineMethod& special ATTRIBUTE_UNUSED) { // TODO - UNUSED(bb, mir, special); return false; } diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 45fd1a9433..52706df7a5 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -115,17 +115,17 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest StoreValueWide(rl_dest, rl_result); } -void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, - int32_t constant) { +void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + int32_t constant ATTRIBUTE_UNUSED) { // TODO: need mips implementation. - UNUSED(rl_dest, rl_src1, constant); LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips"; } -void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, - int64_t constant) { +void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + int64_t constant ATTRIBUTE_UNUSED) { // TODO: need mips implementation. - UNUSED(rl_dest, rl_src1, constant); LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips"; } @@ -254,8 +254,10 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLoc StoreValue(rl_dest, rl_result); } -void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) { - UNUSED(bb, mir, gt_bias, is_double); +void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED, + MIR* mir ATTRIBUTE_UNUSED, + bool gt_bias ATTRIBUTE_UNUSED, + bool is_double ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch"; } @@ -288,9 +290,10 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { StoreValueWide(rl_dest, rl_result); } -bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) { +bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info ATTRIBUTE_UNUSED, + bool is_min ATTRIBUTE_UNUSED, + bool is_long ATTRIBUTE_UNUSED) { // TODO: need Mips implementation. - UNUSED(info, is_min, is_long); return false; } diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 1099303f7d..8ca53ea228 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -279,8 +279,7 @@ void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) { void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code, int32_t true_val, int32_t false_val, RegStorage rs_dest, - RegisterClass dest_reg_class) { - UNUSED(dest_reg_class); + RegisterClass dest_reg_class ATTRIBUTE_UNUSED) { // Implement as a branch-over. // TODO: Conditional move? LoadConstant(rs_dest, true_val); @@ -290,13 +289,12 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond ne_branchover->target = target_label; } -void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { - UNUSED(bb, mir); +void MipsMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Need codegen for select"; } -void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { - UNUSED(bb, mir); +void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED, + MIR* mir ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch"; } @@ -327,39 +325,40 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int return rl_result; } -RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, - bool is_div, int flags) { - UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags); +RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + RegLocation rl_src2 ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED, + int flags ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRem for Mips"; UNREACHABLE(); } -RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, - bool is_div) { - UNUSED(rl_dest, rl_src1, lit, is_div); +RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips"; UNREACHABLE(); } -bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { - UNUSED(info, is_long, is_object); +bool MipsMir2Lir::GenInlinedCas(CallInfo* info ATTRIBUTE_UNUSED, + bool is_long ATTRIBUTE_UNUSED, + bool is_object ATTRIBUTE_UNUSED) { return false; } -bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) { - UNUSED(info); +bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info ATTRIBUTE_UNUSED) { // TODO: add Mips implementation. return false; } -bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) { - UNUSED(info); +bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info ATTRIBUTE_UNUSED) { // TODO: add Mips implementation. return false; } -bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) { - UNUSED(info); +bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info ATTRIBUTE_UNUSED) { return false; } @@ -408,27 +407,26 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { return true; } -void MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) { - UNUSED(reg, target); +void MipsMir2Lir::OpPcRelLoad(RegStorage reg ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips"; UNREACHABLE(); } -LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) { - UNUSED(r_base, count); +LIR* MipsMir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpVldm for Mips"; UNREACHABLE(); } -LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) { - UNUSED(r_base, count); +LIR* MipsMir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpVstm for Mips"; UNREACHABLE(); } -void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, - int first_bit, int second_bit) { - UNUSED(lit); +void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, + RegLocation rl_result, + int lit ATTRIBUTE_UNUSED, + int first_bit, + int second_bit) { RegStorage t_reg = AllocTemp(); OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit); OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg); @@ -462,27 +460,28 @@ LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targ return OpCmpImmBranch(c_code, reg, 0, target); } -bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, - RegLocation rl_src, RegLocation rl_dest, int lit) { - UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit); - LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips"; +bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED, + RegLocation rl_src ATTRIBUTE_UNUSED, + RegLocation rl_dest ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED) { + LOG(FATAL) << "Unexpected use of smallLiteralDivRem in Mips"; UNREACHABLE(); } -bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { - UNUSED(rl_src, rl_dest, lit); +bool MipsMir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED, + RegLocation rl_dest ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of easyMultiply in Mips"; UNREACHABLE(); } -LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) { - UNUSED(cond, guide); +LIR* MipsMir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpIT in Mips"; UNREACHABLE(); } -void MipsMir2Lir::OpEndIT(LIR* it) { - UNUSED(it); +void MipsMir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpEndIT in Mips"; } @@ -621,9 +620,12 @@ void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati StoreValueWide(rl_dest, rl_result); } -void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2, bool is_div, int flags) { - UNUSED(opcode); +void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode ATTRIBUTE_UNUSED, + RegLocation rl_dest, + RegLocation rl_src1, + RegLocation rl_src2, + bool is_div, + int flags) { // TODO: Implement easy div/rem? rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); @@ -855,9 +857,11 @@ void MipsMir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, StoreValueWide(rl_dest, rl_result); } -void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_shift, int flags) { - UNUSED(flags); +void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, + RegLocation rl_dest, + RegLocation rl_src1, + RegLocation rl_shift, + int flags ATTRIBUTE_UNUSED) { if (!cu_->target64) { // Default implementation is just to ignore the constant case. GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift); diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index ec2475a7f7..372fe2b599 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -103,18 +103,15 @@ bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) { return ((value == 0) || IsUint<16>(value) || IsInt<16>(value)); } -bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) { - UNUSED(value); +bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value ATTRIBUTE_UNUSED) { return false; // TUNING } -bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) { - UNUSED(value); +bool MipsMir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) { return false; // TUNING } -bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) { - UNUSED(value); +bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value ATTRIBUTE_UNUSED) { return false; // TUNING } @@ -520,21 +517,26 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg()); } -LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, - MoveType move_type) { - UNUSED(r_dest, r_base, offset, move_type); +LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED, + RegStorage r_base ATTRIBUTE_UNUSED, + int offset ATTRIBUTE_UNUSED, + MoveType move_type ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); UNREACHABLE(); } -LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { - UNUSED(r_base, offset, r_src, move_type); +LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED, + int offset ATTRIBUTE_UNUSED, + RegStorage r_src ATTRIBUTE_UNUSED, + MoveType move_type ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); UNREACHABLE(); } -LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { - UNUSED(op, cc, r_dest, r_src); +LIR* MipsMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED, + ConditionCode cc ATTRIBUTE_UNUSED, + RegStorage r_dest ATTRIBUTE_UNUSED, + RegStorage r_src ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS"; UNREACHABLE(); } @@ -1031,14 +1033,14 @@ LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage return store; } -LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { - UNUSED(op, r_base, disp); +LIR* MipsMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED, + RegStorage r_base ATTRIBUTE_UNUSED, + int disp ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpMem for MIPS"; UNREACHABLE(); } -LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { - UNUSED(cc, target); +LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS"; UNREACHABLE(); } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index c50246d182..8da386368b 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -1411,8 +1411,7 @@ void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report); } -size_t Mir2Lir::GetInstructionOffset(LIR* lir) { - UNUSED(lir); +size_t Mir2Lir::GetInstructionOffset(LIR* lir ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()"; UNREACHABLE(); } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 4e3aab2f0b..a0db1e87ba 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -1463,8 +1463,7 @@ class Mir2Lir { virtual bool InexpensiveConstantFloat(int32_t value) = 0; virtual bool InexpensiveConstantLong(int64_t value) = 0; virtual bool InexpensiveConstantDouble(int64_t value) = 0; - virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) { - UNUSED(opcode); + virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode ATTRIBUTE_UNUSED) { return InexpensiveConstantInt(value); } diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index c2fe5538b7..6673ea8ac5 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -851,8 +851,8 @@ uintptr_t QuickCompiler::GetEntryPointOf(ArtMethod* method) const { InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); } -Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) { - UNUSED(compilation_unit); +Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, + void* compilation_unit ATTRIBUTE_UNUSED) { Mir2Lir* mir_to_lir = nullptr; switch (cu->instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 8ec86fa56c..d9d0434e8a 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -320,15 +320,13 @@ RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) { } // TODO: this is Thumb2 only. Remove when DoPromotion refactored. -RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) { - UNUSED(s_reg); +RegStorage Mir2Lir::AllocPreservedDouble(int s_reg ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble"; UNREACHABLE(); } // TODO: this is Thumb2 only. Remove when DoPromotion refactored. -RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) { - UNUSED(s_reg); +RegStorage Mir2Lir::AllocPreservedSingle(int s_reg ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle"; UNREACHABLE(); } @@ -1553,8 +1551,7 @@ int Mir2Lir::GetSRegHi(int lowSreg) { return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1; } -bool Mir2Lir::LiveOut(int s_reg) { - UNUSED(s_reg); +bool Mir2Lir::LiveOut(int s_reg ATTRIBUTE_UNUSED) { // For now. return true; } diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index 12523aca76..64becb9b85 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -1629,8 +1629,8 @@ void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) { * instruction. In those cases we will try to substitute a new code * sequence or request that the trace be shortened and retried. */ -AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr) { - UNUSED(start_addr); +AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn, + CodeOffset start_addr ATTRIBUTE_UNUSED) { LIR *lir; AssemblerStatus res = kSuccess; // Assume success diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 8e81746db5..b11d41caf0 100755 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -124,17 +124,17 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode, StoreValueWide(rl_dest, rl_result); } -void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, - int32_t constant) { +void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + int32_t constant ATTRIBUTE_UNUSED) { // TODO: need x86 implementation. - UNUSED(rl_dest, rl_src1, constant); LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in x86"; } -void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, - int64_t constant) { +void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1 ATTRIBUTE_UNUSED, + int64_t constant ATTRIBUTE_UNUSED) { // TODO: need x86 implementation. - UNUSED(rl_dest, rl_src1, constant); LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in x86"; } diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index ecd23e9ef0..a8706c3b09 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -270,8 +270,7 @@ void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi } } -void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { - UNUSED(bb); +void X86Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) { RegLocation rl_result; RegLocation rl_src = mir_graph_->GetSrc(mir, 0); RegLocation rl_dest = mir_graph_->GetDest(mir); @@ -597,8 +596,10 @@ void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& sh shift = (is_long) ? p - 64 : p - 32; } -RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) { - UNUSED(rl_dest, reg_lo, lit, is_div); +RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegStorage reg_lo ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRemLit for x86"; UNREACHABLE(); } @@ -766,16 +767,19 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, return rl_result; } -RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, - bool is_div) { - UNUSED(rl_dest, reg_lo, reg_hi, is_div); +RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegStorage reg_lo ATTRIBUTE_UNUSED, + RegStorage reg_hi ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of GenDivRem for x86"; UNREACHABLE(); } -RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2, bool is_div, int flags) { - UNUSED(rl_dest); +RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED, + RegLocation rl_src1, + RegLocation rl_src2, + bool is_div, + int flags) { // We have to use fixed registers, so flush all the temps. // Prepare for explicit register usage. @@ -1449,22 +1453,21 @@ void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, R } } -LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) { - UNUSED(r_base, count); +LIR* X86Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpVldm for x86"; UNREACHABLE(); } -LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) { - UNUSED(r_base, count); +LIR* X86Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpVstm for x86"; UNREACHABLE(); } void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, - RegLocation rl_result, int lit, - int first_bit, int second_bit) { - UNUSED(lit); + RegLocation rl_result, + int lit ATTRIBUTE_UNUSED, + int first_bit, + int second_bit) { RegStorage t_reg = AllocTemp(); OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit); OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg); @@ -1595,27 +1598,28 @@ LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targe return OpCondBranch(c_code, target); } -bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, - RegLocation rl_src, RegLocation rl_dest, int lit) { - UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit); - LOG(FATAL) << "Unexpected use of smallLiteralDive in x86"; +bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, + bool is_div ATTRIBUTE_UNUSED, + RegLocation rl_src ATTRIBUTE_UNUSED, + RegLocation rl_dest ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED) { + LOG(FATAL) << "Unexpected use of smallLiteralDivRem in x86"; UNREACHABLE(); } -bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { - UNUSED(rl_src, rl_dest, lit); +bool X86Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED, + RegLocation rl_dest ATTRIBUTE_UNUSED, + int lit ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of easyMultiply in x86"; UNREACHABLE(); } -LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) { - UNUSED(cond, guide); +LIR* X86Mir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpIT in x86"; UNREACHABLE(); } -void X86Mir2Lir::OpEndIT(LIR* it) { - UNUSED(it); +void X86Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of OpEndIT in x86"; UNREACHABLE(); } @@ -1634,8 +1638,10 @@ void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) { } } -void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) { - UNUSED(sreg); +void X86Mir2Lir::GenImulMemImm(RegStorage dest, + int sreg ATTRIBUTE_UNUSED, + int displacement, + int val) { // All memory accesses below reference dalvik regs. ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); @@ -2548,9 +2554,11 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, } } -RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src, int shift_amount, int flags) { - UNUSED(flags); +RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, + RegLocation rl_dest, + RegLocation rl_src, + int shift_amount, + int flags ATTRIBUTE_UNUSED) { RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); if (cu_->target64) { OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index c62cd47315..25fb8869b6 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -254,8 +254,7 @@ RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const { : RegStorage32FromSpecialTargetRegister_Target32[reg]; } -RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { - UNUSED(reg); +RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg ATTRIBUTE_UNUSED) { LOG(FATAL) << "Do not use this function!!!"; UNREACHABLE(); } @@ -861,8 +860,7 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, } // Not used in x86(-64) -RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) { - UNUSED(trampoline); +RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unexpected use of LoadHelper in x86"; UNREACHABLE(); } @@ -2323,13 +2321,11 @@ void X86Mir2Lir::GenSetVector(MIR* mir) { } } -void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) { - UNUSED(bb, mir); +void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported."; } -void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) { - UNUSED(bb, mir); +void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported."; } diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index b16ae982f2..61354dfc53 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -57,8 +57,7 @@ LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { return res; } -bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) { - UNUSED(value); +bool X86Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) { return true; } @@ -66,8 +65,7 @@ bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) { return value == 0; } -bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) { - UNUSED(value); +bool X86Mir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) { return true; } @@ -942,9 +940,14 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r return store; } -LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, - int offset, int check_value, LIR* target, LIR** compare) { - UNUSED(temp_reg); // Comparison performed directly with memory. +LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, + // Comparison performed directly with memory. + RegStorage temp_reg ATTRIBUTE_UNUSED, + RegStorage base_reg, + int offset, + int check_value, + LIR* target, + LIR** compare) { LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset, check_value); if (compare != nullptr) { @@ -1114,8 +1117,11 @@ RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) { return loc; } -LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) { - UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register. +LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, + // Call to absolute memory location doesn't + // need a temporary target register. + RegStorage r_tgt ATTRIBUTE_UNUSED, + QuickEntrypointEnum trampoline) { if (cu_->target64) { return OpThreadMem(op, GetThreadOffset<8>(trampoline)); } else { diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc index 6f2b2341e0..65b0ad6400 100644 --- a/compiler/dex/verification_results.cc +++ b/compiler/dex/verification_results.cc @@ -34,7 +34,6 @@ VerificationResults::VerificationResults(const CompilerOptions* compiler_options verified_methods_(), rejected_classes_lock_("compiler rejected classes lock"), rejected_classes_() { - UNUSED(compiler_options); } VerificationResults::~VerificationResults() { diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index b6a40a203b..a45df95d1d 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -19,6 +19,7 @@ #include "art_method-inl.h" #include "arch/instruction_set.h" #include "arch/instruction_set_features.h" +#include "base/stringpiece.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "compiler_callbacks.h" @@ -86,7 +87,37 @@ JitCompiler::JitCompiler() : total_time_(0) { /* init_failure_output */ nullptr, /* abort_on_hard_verifier_failure */ false)); const InstructionSet instruction_set = kRuntimeISA; - instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines()); + for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) { + VLOG(compiler) << "JIT compiler option " << option; + std::string error_msg; + if (option.starts_with("--instruction-set-variant=")) { + StringPiece str = option.substr(strlen("--instruction-set-variant=")).data(); + VLOG(compiler) << "JIT instruction set variant " << str; + instruction_set_features_.reset(InstructionSetFeatures::FromVariant( + instruction_set, str.as_string(), &error_msg)); + if (instruction_set_features_ == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } else if (option.starts_with("--instruction-set-features=")) { + StringPiece str = option.substr(strlen("--instruction-set-features=")).data(); + VLOG(compiler) << "JIT instruction set features " << str; + if (instruction_set_features_.get() == nullptr) { + instruction_set_features_.reset(InstructionSetFeatures::FromVariant( + instruction_set, "default", &error_msg)); + if (instruction_set_features_ == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } + instruction_set_features_.reset( + instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg)); + if (instruction_set_features_ == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } + } + if (instruction_set_features_ == nullptr) { + instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines()); + } cumulative_logger_.reset(new CumulativeLogger("jit times")); verification_results_.reset(new VerificationResults(compiler_options_.get())); method_inliner_map_.reset(new DexFileToMethodInlinerMap); diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc index 5af2242963..16b4386938 100644 --- a/compiler/jni/jni_cfi_test_expected.inc +++ b/compiler/jni/jni_cfi_test_expected.inc @@ -394,76 +394,77 @@ static constexpr uint8_t expected_cfi_kMips[] = { // 0x0000006c: .cfi_def_cfa_offset: 64 static constexpr uint8_t expected_asm_kMips64[] = { - 0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF, - 0x48, 0x00, 0xBC, 0xFF, 0x40, 0x00, 0xB7, 0xFF, 0x38, 0x00, 0xB6, 0xFF, - 0x30, 0x00, 0xB5, 0xFF, 0x28, 0x00, 0xB4, 0xFF, 0x20, 0x00, 0xB3, 0xFF, - 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x68, 0x00, 0xA5, 0xAF, - 0x6C, 0x00, 0xAE, 0xE7, 0x70, 0x00, 0xA7, 0xAF, 0x74, 0x00, 0xA8, 0xAF, - 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x18, 0x00, 0xB2, 0xDF, - 0x20, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xB4, 0xDF, 0x30, 0x00, 0xB5, 0xDF, - 0x38, 0x00, 0xB6, 0xDF, 0x40, 0x00, 0xB7, 0xDF, 0x48, 0x00, 0xBC, 0xDF, - 0x50, 0x00, 0xBE, 0xDF, 0x58, 0x00, 0xBF, 0xDF, 0x60, 0x00, 0xBD, 0x67, + 0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF, + 0x58, 0x00, 0xBC, 0xFF, 0x50, 0x00, 0xB7, 0xFF, 0x48, 0x00, 0xB6, 0xFF, + 0x40, 0x00, 0xB5, 0xFF, 0x38, 0x00, 0xB4, 0xFF, 0x30, 0x00, 0xB3, 0xFF, + 0x28, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x78, 0x00, 0xA5, 0xAF, + 0x7C, 0x00, 0xAE, 0xE7, 0x80, 0x00, 0xA7, 0xAF, 0x84, 0x00, 0xA8, 0xAF, + 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x28, 0x00, 0xB2, 0xDF, + 0x30, 0x00, 0xB3, 0xDF, 0x38, 0x00, 0xB4, 0xDF, 0x40, 0x00, 0xB5, 0xDF, + 0x48, 0x00, 0xB6, 0xDF, 0x50, 0x00, 0xB7, 0xDF, 0x58, 0x00, 0xBC, 0xDF, + 0x60, 0x00, 0xBE, 0xDF, 0x68, 0x00, 0xBF, 0xDF, 0x70, 0x00, 0xBD, 0x67, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00, }; static constexpr uint8_t expected_cfi_kMips64[] = { - 0x44, 0x0E, 0x60, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06, + 0x44, 0x0E, 0x70, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06, 0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E, - 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x80, 0x01, 0x44, 0x0E, - 0x60, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, + 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x90, 0x01, 0x44, 0x0E, + 0x70, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, - 0x0B, 0x0E, 0x60, + 0x0B, 0x0E, 0x70, }; -// 0x00000000: daddiu r29, r29, -96 -// 0x00000004: .cfi_def_cfa_offset: 96 -// 0x00000004: sd r31, +88(r29) +// 0x00000000: daddiu r29, r29, -112 +// 0x00000004: .cfi_def_cfa_offset: 112 +// 0x00000004: sd r31, +104(r29) // 0x00000008: .cfi_offset: r31 at cfa-8 -// 0x00000008: sd r30, +80(r29) +// 0x00000008: sd r30, +96(r29) // 0x0000000c: .cfi_offset: r30 at cfa-16 -// 0x0000000c: sd r28, +72(r29) +// 0x0000000c: sd r28, +88(r29) // 0x00000010: .cfi_offset: r28 at cfa-24 -// 0x00000010: sd r23, +64(r29) +// 0x00000010: sd r23, +80(r29) // 0x00000014: .cfi_offset: r23 at cfa-32 -// 0x00000014: sd r22, +56(r29) +// 0x00000014: sd r22, +72(r29) // 0x00000018: .cfi_offset: r22 at cfa-40 -// 0x00000018: sd r21, +48(r29) +// 0x00000018: sd r21, +64(r29) // 0x0000001c: .cfi_offset: r21 at cfa-48 -// 0x0000001c: sd r20, +40(r29) +// 0x0000001c: sd r20, +56(r29) // 0x00000020: .cfi_offset: r20 at cfa-56 -// 0x00000020: sd r19, +32(r29) +// 0x00000020: sd r19, +48(r29) // 0x00000024: .cfi_offset: r19 at cfa-64 -// 0x00000024: sd r18, +24(r29) +// 0x00000024: sd r18, +40(r29) // 0x00000028: .cfi_offset: r18 at cfa-72 // 0x00000028: sd r4, +0(r29) -// 0x0000002c: sw r5, +104(r29) -// 0x00000030: swc1 f14, +108(r29) -// 0x00000034: sw r7, +112(r29) -// 0x00000038: sw r8, +116(r29) +// 0x0000002c: sw r5, +120(r29) +// 0x00000030: swc1 f14, +124(r29) +// 0x00000034: sw r7, +128(r29) +// 0x00000038: sw r8, +132(r29) // 0x0000003c: daddiu r29, r29, -32 -// 0x00000040: .cfi_def_cfa_offset: 128 +// 0x00000040: .cfi_def_cfa_offset: 144 // 0x00000040: daddiu r29, r29, 32 -// 0x00000044: .cfi_def_cfa_offset: 96 +// 0x00000044: .cfi_def_cfa_offset: 112 // 0x00000044: .cfi_remember_state -// 0x00000044: ld r18, +24(r29) +// 0x00000044: ld r18, +40(r29) // 0x00000048: .cfi_restore: r18 -// 0x00000048: ld r19, +32(r29) +// 0x00000048: ld r19, +48(r29) // 0x0000004c: .cfi_restore: r19 -// 0x0000004c: ld r20, +40(r29) +// 0x0000004c: ld r20, +56(r29) // 0x00000050: .cfi_restore: r20 -// 0x00000050: ld r21, +48(r29) +// 0x00000050: ld r21, +64(r29) // 0x00000054: .cfi_restore: r21 -// 0x00000054: ld r22, +56(r29) +// 0x00000054: ld r22, +72(r29) // 0x00000058: .cfi_restore: r22 -// 0x00000058: ld r23, +64(r29) +// 0x00000058: ld r23, +80(r29) // 0x0000005c: .cfi_restore: r23 -// 0x0000005c: ld r28, +72(r29) +// 0x0000005c: ld r28, +88(r29) // 0x00000060: .cfi_restore: r28 -// 0x00000060: ld r30, +80(r29) +// 0x00000060: ld r30, +96(r29) // 0x00000064: .cfi_restore: r30 -// 0x00000064: ld r31, +88(r29) +// 0x00000064: ld r31, +104(r29) // 0x00000068: .cfi_restore: r31 -// 0x00000068: daddiu r29, r29, 96 +// 0x00000068: daddiu r29, r29, 112 // 0x0000006c: .cfi_def_cfa_offset: 0 // 0x0000006c: jr r31 // 0x00000070: nop // 0x00000074: .cfi_restore_state -// 0x00000074: .cfi_def_cfa_offset: 96 +// 0x00000074: .cfi_def_cfa_offset: 112 + diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc index ecf143d8f5..2d31a9881e 100644 --- a/compiler/jni/quick/mips/calling_convention_mips.cc +++ b/compiler/jni/quick/mips/calling_convention_mips.cc @@ -183,7 +183,7 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const { } size_t MipsJniCallingConvention::FrameSize() { - // Method*, LR and callee save area size, local reference segment state + // ArtMethod*, RA and callee save area size, local reference segment state size_t frame_data_size = kMipsPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; // References plus 2 words for HandleScope header diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc index 3a11bcfe9c..807d740b4d 100644 --- a/compiler/jni/quick/mips64/calling_convention_mips64.cc +++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc @@ -140,6 +140,7 @@ uint32_t Mips64JniCallingConvention::CoreSpillMask() const { // Compute spill mask to agree with callee saves initialized in the constructor uint32_t result = 0; result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA; + DCHECK_EQ(static_cast<size_t>(POPCOUNT(result)), callee_save_regs_.size() + 1); return result; } @@ -148,9 +149,9 @@ ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const { } size_t Mips64JniCallingConvention::FrameSize() { - // Mehtod* and callee save area size, local reference segment state + // ArtMethod*, RA and callee save area size, local reference segment state size_t frame_data_size = kFramePointerSize + - CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t); + (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t); // References plus 2 words for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); // Plus return value spill area size diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc index 9c7eab1cc7..b6b11ca51f 100644 --- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc @@ -38,8 +38,7 @@ ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const { return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop } -static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) { - UNUSED(jni); +static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) { if (shorty[0] == 'F' || shorty[0] == 'D') { return X86_64ManagedRegister::FromXmmRegister(XMM0); } else if (shorty[0] == 'J') { diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index a82d09eedd..d6cb65bd64 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -281,7 +281,7 @@ class OatWriter { // Offsets of the dex cache arrays for each app dex file. For the // boot image, this information is provided by the ImageWriter. - SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_; + SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_; // DexFiles not owned. // Offset of the oat data from the start of the mmapped region of the elf file. size_t oat_data_offset_; diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 358a35c483..92a5878476 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1141,8 +1141,7 @@ void LocationsBuilderARM::VisitExit(HExit* exit) { exit->SetLocations(nullptr); } -void InstructionCodeGeneratorARM::VisitExit(HExit* exit) { - UNUSED(exit); +void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) { @@ -1569,9 +1568,8 @@ void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) { load->SetLocations(nullptr); } -void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) { +void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(load); } void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) { @@ -1598,8 +1596,7 @@ void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) { } } -void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) { - UNUSED(store); +void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) { } void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) { @@ -1608,9 +1605,8 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) { +void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) { @@ -1619,9 +1615,8 @@ void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) { +void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) { @@ -1630,9 +1625,8 @@ void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) { +void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) { @@ -1641,9 +1635,8 @@ void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) { +void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) { @@ -1652,9 +1645,8 @@ void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) { +void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { @@ -1669,8 +1661,7 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) { ret->SetLocations(nullptr); } -void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) { - UNUSED(ret); +void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) { codegen_->GenerateFrameExit(); } @@ -1680,8 +1671,7 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) { locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType())); } -void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) { - UNUSED(ret); +void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) { codegen_->GenerateFrameExit(); } @@ -3327,8 +3317,7 @@ void LocationsBuilderARM::VisitPhi(HPhi* instruction) { locations->SetOut(Location::Any()); } -void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) { - UNUSED(instruction); +void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unreachable"; } @@ -4289,13 +4278,11 @@ void LocationsBuilderARM::VisitTemporary(HTemporary* temp) { temp->SetLocations(nullptr); } -void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) { +void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(temp); } -void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) { - UNUSED(instruction); +void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unreachable"; } @@ -5343,15 +5330,13 @@ Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_m return DeduplicateMethodLiteral(target_method, &call_patches_); } -void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) { +void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } -void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) { +void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 37f10a5cc5..f68b11b504 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1330,8 +1330,7 @@ enum UnimplementedInstructionBreakCode { }; #define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \ - void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \ - UNUSED(instr); \ + void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \ __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \ } \ void LocationsBuilderARM64::Visit##name(H##name* instr) { \ @@ -2183,8 +2182,8 @@ void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) { - UNUSED(constant); +void InstructionCodeGeneratorARM64::VisitDoubleConstant( + HDoubleConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. } @@ -2192,8 +2191,7 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) { exit->SetLocations(nullptr); } -void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) { - UNUSED(exit); +void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { @@ -2202,8 +2200,7 @@ void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) { - UNUSED(constant); +void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. } @@ -2689,9 +2686,8 @@ void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) { +void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) { @@ -2699,9 +2695,8 @@ void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) { +void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { @@ -3092,9 +3087,8 @@ void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) { load->SetLocations(nullptr); } -void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) { +void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(load); } void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { @@ -3131,9 +3125,8 @@ void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) { +void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { @@ -3400,8 +3393,7 @@ void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { locations->SetOut(Location::Any()); } -void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) { - UNUSED(instruction); +void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unreachable"; } @@ -3471,8 +3463,7 @@ void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { locations->SetInAt(0, ARM64ReturnLocation(return_type)); } -void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) { - UNUSED(instruction); +void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) { codegen_->GenerateFrameExit(); } @@ -3480,8 +3471,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) { instruction->SetLocations(nullptr); } -void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) { - UNUSED(instruction); +void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) { codegen_->GenerateFrameExit(); } @@ -3525,8 +3515,7 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) { } } -void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) { - UNUSED(store); +void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) { } void LocationsBuilderARM64::VisitSub(HSub* instruction) { @@ -3643,9 +3632,8 @@ void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) { temp->SetLocations(nullptr); } -void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) { +void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(temp); } void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { @@ -3744,15 +3732,13 @@ void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { HandleBinaryOp(instruction); } -void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) { +void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } -void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) { +void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8b33f56295..963eec2529 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1052,8 +1052,7 @@ void LocationsBuilderX86::VisitExit(HExit* exit) { exit->SetLocations(nullptr); } -void InstructionCodeGeneratorX86::VisitExit(HExit* exit) { - UNUSED(exit); +void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond, @@ -1332,9 +1331,8 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) { local->SetLocations(nullptr); } -void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) { +void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(load); } void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) { @@ -1361,8 +1359,7 @@ void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) { } } -void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) { - UNUSED(store); +void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) { } void LocationsBuilderX86::VisitCondition(HCondition* cond) { @@ -1544,9 +1541,8 @@ void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) { +void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) { @@ -1555,9 +1551,8 @@ void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant) { +void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) { @@ -1566,9 +1561,8 @@ void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) { +void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) { @@ -1577,9 +1571,8 @@ void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) { +void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) { @@ -1588,9 +1581,8 @@ void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) { +void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { @@ -1605,8 +1597,7 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) { ret->SetLocations(nullptr); } -void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) { - UNUSED(ret); +void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) { codegen_->GenerateFrameExit(); } @@ -3740,8 +3731,7 @@ void LocationsBuilderX86::VisitPhi(HPhi* instruction) { locations->SetOut(Location::Any()); } -void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) { - UNUSED(instruction); +void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unreachable"; } @@ -4739,13 +4729,11 @@ void LocationsBuilderX86::VisitTemporary(HTemporary* temp) { temp->SetLocations(nullptr); } -void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) { +void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(temp); } -void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) { - UNUSED(instruction); +void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unreachable"; } @@ -5668,15 +5656,13 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr } } -void LocationsBuilderX86::VisitBoundType(HBoundType* instruction) { +void LocationsBuilderX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } -void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) { +void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index a2af4ba1f9..ed2e4ca87c 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -1050,8 +1050,7 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) { exit->SetLocations(nullptr); } -void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) { - UNUSED(exit); +void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond, @@ -1278,9 +1277,8 @@ void LocationsBuilderX86_64::VisitLoadLocal(HLoadLocal* local) { local->SetLocations(nullptr); } -void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) { +void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(load); } void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) { @@ -1307,8 +1305,7 @@ void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) { } } -void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) { - UNUSED(store); +void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) { } void LocationsBuilderX86_64::VisitCondition(HCondition* cond) { @@ -1613,9 +1610,8 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) { +void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) { @@ -1624,9 +1620,8 @@ void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant) { +void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) { @@ -1635,9 +1630,8 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) { +void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) { @@ -1646,9 +1640,8 @@ void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) { +void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) { @@ -1657,9 +1650,9 @@ void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) { locations->SetOut(Location::ConstantLocation(constant)); } -void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) { +void InstructionCodeGeneratorX86_64::VisitDoubleConstant( + HDoubleConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. - UNUSED(constant); } void LocationsBuilderX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { @@ -1674,8 +1667,7 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) { ret->SetLocations(nullptr); } -void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) { - UNUSED(ret); +void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) { codegen_->GenerateFrameExit(); } @@ -3629,8 +3621,7 @@ void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) { locations->SetOut(Location::Any()); } -void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) { - UNUSED(instruction); +void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unimplemented"; } @@ -4450,13 +4441,11 @@ void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) { temp->SetLocations(nullptr); } -void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) { +void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) { // Nothing to do, this is driven by the code generator. - UNUSED(temp); } -void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) { - UNUSED(instruction); +void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unimplemented"; } @@ -5331,15 +5320,13 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in } } -void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction) { +void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } -void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) { +void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. - UNUSED(instruction); LOG(FATAL) << "Unreachable"; } diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index f54547534f..4abe5e953c 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -206,7 +206,9 @@ static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || instr->IsCompare() || instr->IsBoundsCheck()) { // Uses aliases of ADD/SUB instructions. - return vixl::Assembler::IsImmAddSub(value); + // If `value` does not fit but `-value` does, VIXL will automatically use + // the 'opposite' instruction. + return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value); } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) { // Uses logical operations. return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize); diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 764a11475f..fe16d00b72 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -43,6 +43,93 @@ ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() { return codegen_->GetGraph()->GetArena(); } +#define __ codegen->GetAssembler()-> + +static void MoveFromReturnRegister(Location trg, + Primitive::Type type, + CodeGeneratorMIPS64* codegen) { + if (!trg.IsValid()) { + DCHECK_EQ(type, Primitive::kPrimVoid); + return; + } + + DCHECK_NE(type, Primitive::kPrimVoid); + + if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) { + GpuRegister trg_reg = trg.AsRegister<GpuRegister>(); + if (trg_reg != V0) { + __ Move(V0, trg_reg); + } + } else { + FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>(); + if (trg_reg != F0) { + if (type == Primitive::kPrimFloat) { + __ MovS(F0, trg_reg); + } else { + __ MovD(F0, trg_reg); + } + } + } +} + +static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) { + InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor; + IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor); +} + +// Slow-path for fallback (calling the managed code to handle the +// intrinsic) in an intrinsified call. This will copy the arguments +// into the positions for a regular call. +// +// Note: The actual parameters are required to be in the locations +// given by the invoke's location summary. If an intrinsic +// modifies those locations before a slowpath call, they must be +// restored! +class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 { + public: + explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : invoke_(invoke) { } + + void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { + CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in); + + __ Bind(GetEntryLabel()); + + SaveLiveRegisters(codegen, invoke_->GetLocations()); + + MoveArguments(invoke_, codegen); + + if (invoke_->IsInvokeStaticOrDirect()) { + codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), + Location::RegisterLocation(A0)); + codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this); + } else { + UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented"; + UNREACHABLE(); + } + + // Copy the result back to the expected output. + Location out = invoke_->GetLocations()->Out(); + if (out.IsValid()) { + DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory. + DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg())); + MoveFromReturnRegister(out, invoke_->GetType(), codegen); + } + + RestoreLiveRegisters(codegen, invoke_->GetLocations()); + __ B(GetExitLabel()); + } + + const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; } + + private: + // The instruction where this slow path is happening. + HInvoke* const invoke_; + + DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS64); +}; + +#undef __ + bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) { Dispatch(invoke); LocationSummary* res = invoke->GetLocations(); @@ -185,7 +272,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) { GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); } -static void GenCountZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) { +static void GenNumberOfLeadingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) { GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister out = locations->Out().AsRegister<GpuRegister>(); @@ -202,7 +289,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* } void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - GenCountZeroes(invoke->GetLocations(), false, GetAssembler()); + GenNumberOfLeadingZeroes(invoke->GetLocations(), false, GetAssembler()); } // int java.lang.Long.numberOfLeadingZeros(long i) @@ -211,7 +298,103 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* inv } void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - GenCountZeroes(invoke->GetLocations(), true, GetAssembler()); + GenNumberOfLeadingZeroes(invoke->GetLocations(), true, GetAssembler()); +} + +static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) { + Location in = locations->InAt(0); + Location out = locations->Out(); + + if (is64bit) { + __ Dsbh(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>()); + __ Dshd(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>()); + __ Dbitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>()); + __ Dclz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>()); + } else { + __ Rotr(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>(), 16); + __ Wsbh(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>()); + __ Bitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>()); + __ Clz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>()); + } +} + +// int java.lang.Integer.numberOfTrailingZeros(int i) +void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { + GenNumberOfTrailingZeroes(invoke->GetLocations(), false, GetAssembler()); +} + +// int java.lang.Long.numberOfTrailingZeros(long i) +void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { + GenNumberOfTrailingZeroes(invoke->GetLocations(), true, GetAssembler()); +} + +static void GenRotateRight(HInvoke* invoke, + Primitive::Type type, + Mips64Assembler* assembler) { + DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + + LocationSummary* locations = invoke->GetLocations(); + GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); + GpuRegister out = locations->Out().AsRegister<GpuRegister>(); + + if (invoke->InputAt(1)->IsIntConstant()) { + uint32_t shift = static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()); + if (type == Primitive::kPrimInt) { + shift &= 0x1f; + __ Rotr(out, in, shift); + } else { + shift &= 0x3f; + if (shift < 32) { + __ Drotr(out, in, shift); + } else { + shift &= 0x1f; + __ Drotr32(out, in, shift); + } + } + } else { + GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>(); + if (type == Primitive::kPrimInt) { + __ Rotrv(out, in, shamt); + } else { + __ Drotrv(out, in, shamt); + } + } +} + +// int java.lang.Integer.rotateRight(int i, int distance) +void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateRight(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateRight(HInvoke* invoke) { + GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler()); +} + +// int java.lang.Long.rotateRight(long i, int distance) +void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void IntrinsicCodeGeneratorMIPS64::VisitLongRotateRight(HInvoke* invoke) { + GenRotateRight(invoke, Primitive::kPrimLong, GetAssembler()); } static void GenReverse(LocationSummary* locations, @@ -765,6 +948,505 @@ void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) { Thread::PeerOffset<kMips64PointerSize>().Int32Value()); } +static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { + LocationSummary* locations = new (arena) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::NoLocation()); // Unused receiver. + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(2, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +static void GenUnsafeGet(HInvoke* invoke, + Primitive::Type type, + bool is_volatile, + CodeGeneratorMIPS64* codegen) { + LocationSummary* locations = invoke->GetLocations(); + DCHECK((type == Primitive::kPrimInt) || + (type == Primitive::kPrimLong) || + (type == Primitive::kPrimNot)); + Mips64Assembler* assembler = codegen->GetAssembler(); + // Object pointer. + GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>(); + // Long offset. + GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>(); + GpuRegister trg = locations->Out().AsRegister<GpuRegister>(); + + __ Daddu(TMP, base, offset); + if (is_volatile) { + __ Sync(0); + } + switch (type) { + case Primitive::kPrimInt: + __ Lw(trg, TMP, 0); + break; + + case Primitive::kPrimNot: + __ Lwu(trg, TMP, 0); + break; + + case Primitive::kPrimLong: + __ Ld(trg, TMP, 0); + break; + + default: + LOG(FATAL) << "Unsupported op size " << type; + UNREACHABLE(); + } +} + +// int sun.misc.Unsafe.getInt(Object o, long offset) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) { + CreateIntIntIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) { + GenUnsafeGet(invoke, Primitive::kPrimInt, false, codegen_); +} + +// int sun.misc.Unsafe.getIntVolatile(Object o, long offset) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { + CreateIntIntIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { + GenUnsafeGet(invoke, Primitive::kPrimInt, true, codegen_); +} + +// long sun.misc.Unsafe.getLong(Object o, long offset) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { + CreateIntIntIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { + GenUnsafeGet(invoke, Primitive::kPrimLong, false, codegen_); +} + +// long sun.misc.Unsafe.getLongVolatile(Object o, long offset) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { + CreateIntIntIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { + GenUnsafeGet(invoke, Primitive::kPrimLong, true, codegen_); +} + +// Object sun.misc.Unsafe.getObject(Object o, long offset) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { + CreateIntIntIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { + GenUnsafeGet(invoke, Primitive::kPrimNot, false, codegen_); +} + +// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { + CreateIntIntIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { + GenUnsafeGet(invoke, Primitive::kPrimNot, true, codegen_); +} + +static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) { + LocationSummary* locations = new (arena) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::NoLocation()); // Unused receiver. + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(2, Location::RequiresRegister()); + locations->SetInAt(3, Location::RequiresRegister()); +} + +static void GenUnsafePut(LocationSummary* locations, + Primitive::Type type, + bool is_volatile, + bool is_ordered, + CodeGeneratorMIPS64* codegen) { + DCHECK((type == Primitive::kPrimInt) || + (type == Primitive::kPrimLong) || + (type == Primitive::kPrimNot)); + Mips64Assembler* assembler = codegen->GetAssembler(); + // Object pointer. + GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>(); + // Long offset. + GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>(); + GpuRegister value = locations->InAt(3).AsRegister<GpuRegister>(); + + __ Daddu(TMP, base, offset); + if (is_volatile || is_ordered) { + __ Sync(0); + } + switch (type) { + case Primitive::kPrimInt: + case Primitive::kPrimNot: + __ Sw(value, TMP, 0); + break; + + case Primitive::kPrimLong: + __ Sd(value, TMP, 0); + break; + + default: + LOG(FATAL) << "Unsupported op size " << type; + UNREACHABLE(); + } + if (is_volatile) { + __ Sync(0); + } + + if (type == Primitive::kPrimNot) { + codegen->MarkGCCard(base, value); + } +} + +// void sun.misc.Unsafe.putInt(Object o, long offset, int x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, false, codegen_); +} + +// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, true, codegen_); +} + +// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, false, codegen_); +} + +// void sun.misc.Unsafe.putObject(Object o, long offset, Object x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, false, codegen_); +} + +// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, true, codegen_); +} + +// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, false, codegen_); +} + +// void sun.misc.Unsafe.putLong(Object o, long offset, long x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, false, codegen_); +} + +// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, true, codegen_); +} + +// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x) +void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) { + CreateIntIntIntIntToVoid(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) { + GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, false, codegen_); +} + +// char java.lang.String.charAt(int index) +void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCallOnSlowPath, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::SameAsFirstInput()); +} + +void IntrinsicCodeGeneratorMIPS64::VisitStringCharAt(HInvoke* invoke) { + LocationSummary* locations = invoke->GetLocations(); + Mips64Assembler* assembler = GetAssembler(); + + // Location of reference to data array + const int32_t value_offset = mirror::String::ValueOffset().Int32Value(); + // Location of count + const int32_t count_offset = mirror::String::CountOffset().Int32Value(); + + GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>(); + GpuRegister idx = locations->InAt(1).AsRegister<GpuRegister>(); + GpuRegister out = locations->Out().AsRegister<GpuRegister>(); + + // TODO: Maybe we can support range check elimination. Overall, + // though, I think it's not worth the cost. + // TODO: For simplicity, the index parameter is requested in a + // register, so different from Quick we will not optimize the + // code for constants (which would save a register). + + SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke); + codegen_->AddSlowPath(slow_path); + + // Load the string size + __ Lw(TMP, obj, count_offset); + codegen_->MaybeRecordImplicitNullCheck(invoke); + // Revert to slow path if idx is too large, or negative + __ Bgeuc(idx, TMP, slow_path->GetEntryLabel()); + + // out = obj[2*idx]. + __ Sll(TMP, idx, 1); // idx * 2 + __ Daddu(TMP, TMP, obj); // Address of char at location idx + __ Lhu(out, TMP, value_offset); // Load char at location idx + + __ Bind(slow_path->GetExitLabel()); +} + +// int java.lang.String.compareTo(String anotherString) +void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); +} + +void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) { + Mips64Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>(); + SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke); + codegen_->AddSlowPath(slow_path); + __ Beqzc(argument, slow_path->GetEntryLabel()); + + __ LoadFromOffset(kLoadDoubleword, + TMP, + TR, + QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, + pStringCompareTo).Int32Value()); + __ Jalr(TMP); + __ Nop(); + __ Bind(slow_path->GetExitLabel()); +} + +static void GenerateStringIndexOf(HInvoke* invoke, + Mips64Assembler* assembler, + CodeGeneratorMIPS64* codegen, + ArenaAllocator* allocator, + bool start_at_zero) { + LocationSummary* locations = invoke->GetLocations(); + GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP; + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + // Check for code points > 0xFFFF. Either a slow-path check when we + // don't know statically, or directly dispatch if we have a constant. + SlowPathCodeMIPS64* slow_path = nullptr; + if (invoke->InputAt(1)->IsIntConstant()) { + if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) { + // Always needs the slow-path. We could directly dispatch to it, + // but this case should be rare, so for simplicity just put the + // full slow-path down and branch unconditionally. + slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke); + codegen->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); + return; + } + } else { + GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>(); + __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max()); + slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke); + codegen->AddSlowPath(slow_path); + __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel()); // UTF-16 required + } + + if (start_at_zero) { + DCHECK_EQ(tmp_reg, A2); + // Start-index = 0. + __ Clear(tmp_reg); + } else { + __ Slt(TMP, A2, ZERO); // if fromIndex < 0 + __ Seleqz(A2, A2, TMP); // fromIndex = 0 + } + + __ LoadFromOffset(kLoadDoubleword, + TMP, + TR, + QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pIndexOf).Int32Value()); + __ Jalr(TMP); + __ Nop(); + + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } +} + +// int java.lang.String.indexOf(int ch) +void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + // We have a hand-crafted assembly stub that follows the runtime + // calling convention. So it's best to align the inputs accordingly. + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); + + // Need a temp for slow-path codepoint compare, and need to send start-index=0. + locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2))); +} + +void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) { + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true); +} + +// int java.lang.String.indexOf(int ch, int fromIndex) +void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + // We have a hand-crafted assembly stub that follows the runtime + // calling convention. So it's best to align the inputs accordingly. + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); +} + +void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) { + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false); +} + +// java.lang.String.String(byte[] bytes) +void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); +} + +void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) { + Mips64Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>(); + SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke); + codegen_->AddSlowPath(slow_path); + __ Beqzc(byte_array, slow_path->GetEntryLabel()); + + __ LoadFromOffset(kLoadDoubleword, + TMP, + TR, + QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromBytes).Int32Value()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Jalr(TMP); + __ Nop(); + __ Bind(slow_path->GetExitLabel()); +} + +// java.lang.String.String(char[] value) +void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); +} + +void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) { + Mips64Assembler* assembler = GetAssembler(); + + __ LoadFromOffset(kLoadDoubleword, + TMP, + TR, + QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromChars).Int32Value()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Jalr(TMP); + __ Nop(); +} + +// java.lang.String.String(String original) +void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); +} + +void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invoke) { + Mips64Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>(); + SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke); + codegen_->AddSlowPath(slow_path); + __ Beqzc(string_to_copy, slow_path->GetEntryLabel()); + + __ LoadFromOffset(kLoadDoubleword, + TMP, + TR, + QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromString).Int32Value()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Jalr(TMP); + __ Nop(); + __ Bind(slow_path->GetExitLabel()); +} + // Unimplemented intrinsics. #define UNIMPLEMENTED_INTRINSIC(Name) \ @@ -776,38 +1458,12 @@ void IntrinsicCodeGeneratorMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSE UNIMPLEMENTED_INTRINSIC(MathRoundDouble) UNIMPLEMENTED_INTRINSIC(MathRoundFloat) -UNIMPLEMENTED_INTRINSIC(UnsafeGet) -UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafeGetLong) -UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafeGetObject) -UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafePut) -UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered) -UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafePutObject) -UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered) -UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafePutLong) -UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered) -UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile) UNIMPLEMENTED_INTRINSIC(UnsafeCASInt) UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) UNIMPLEMENTED_INTRINSIC(UnsafeCASObject) -UNIMPLEMENTED_INTRINSIC(StringCharAt) -UNIMPLEMENTED_INTRINSIC(StringCompareTo) UNIMPLEMENTED_INTRINSIC(StringEquals) -UNIMPLEMENTED_INTRINSIC(StringIndexOf) -UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter) -UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes) -UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars) -UNIMPLEMENTED_INTRINSIC(StringNewStringFromString) UNIMPLEMENTED_INTRINSIC(LongRotateLeft) -UNIMPLEMENTED_INTRINSIC(LongRotateRight) -UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros) UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft) -UNIMPLEMENTED_INTRINSIC(IntegerRotateRight) -UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 2426f8b08d..939e62c6dd 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1799,8 +1799,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { return true; } - virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const { - UNUSED(obj); + virtual bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const { return false; } @@ -1917,16 +1916,14 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { virtual bool CanBeMoved() const { return false; } // Returns whether the two instructions are of the same kind. - virtual bool InstructionTypeEquals(HInstruction* other) const { - UNUSED(other); + virtual bool InstructionTypeEquals(HInstruction* other ATTRIBUTE_UNUSED) const { return false; } // Returns whether any data encoded in the two instructions is equal. // This method does not look at the inputs. Both instructions must be // of the same type, otherwise the method has undefined behavior. - virtual bool InstructionDataEquals(HInstruction* other) const { - UNUSED(other); + virtual bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const { return false; } @@ -2489,8 +2486,7 @@ class HUnaryOperation : public HExpression<1> { Primitive::Type GetResultType() const { return GetType(); } bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -2560,8 +2556,7 @@ class HBinaryOperation : public HExpression<2> { } bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -3392,8 +3387,7 @@ class HInvokeStaticOrDirect : public HInvoke { target_method_(target_method), dispatch_info_(dispatch_info) {} - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { - UNUSED(obj); + bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE { // We access the method via the dex cache so we can't do an implicit null check. // TODO: for intrinsics we can generate implicit null checks. return false; @@ -3831,8 +3825,7 @@ class HDivZeroCheck : public HExpression<1> { bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -4102,8 +4095,7 @@ class HNot : public HUnaryOperation { : HUnaryOperation(result_type, input, dex_pc) {} bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -4128,8 +4120,7 @@ class HBooleanNot : public HUnaryOperation { : HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {} bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -4295,8 +4286,7 @@ class HNullCheck : public HExpression<1> { } bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -4441,12 +4431,10 @@ class HArrayGet : public HExpression<2> { } bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { - UNUSED(obj); + bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE { // TODO: We can be smarter here. // Currently, the array access is always preceded by an ArrayLength or a NullCheck // which generates the implicit null check. There are cases when these can be removed @@ -4494,8 +4482,7 @@ class HArraySet : public HTemplateInstruction<3> { // Can throw ArrayStoreException. bool CanThrow() const OVERRIDE { return needs_type_check_; } - bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { - UNUSED(obj); + bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE { // TODO: Same as for ArrayGet. return false; } @@ -4558,8 +4545,7 @@ class HArrayLength : public HExpression<1> { } bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { @@ -4582,8 +4568,7 @@ class HBoundsCheck : public HExpression<2> { } bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -4797,8 +4782,7 @@ class HClinitCheck : public HExpression<1> { } bool CanBeMoved() const OVERRIDE { return true; } - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - UNUSED(other); + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } @@ -5416,7 +5400,7 @@ class HGraphVisitor : public ValueObject { explicit HGraphVisitor(HGraph* graph) : graph_(graph) {} virtual ~HGraphVisitor() {} - virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); } + virtual void VisitInstruction(HInstruction* instruction ATTRIBUTE_UNUSED) {} virtual void VisitBasicBlock(HBasicBlock* block); // Visit the graph following basic block insertion order. diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index c7f08066d4..17a4743290 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -666,7 +666,6 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite jobject class_loader, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) const { - UNUSED(invoke_type); std::string method_name = PrettyMethod(method_idx, dex_file); MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); CompilerDriver* compiler_driver = GetCompilerDriver(); diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index f7a7e420bb..a1feaf77bd 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -756,7 +756,9 @@ void ReferenceTypePropagation::ProcessWorklist() { while (!worklist_.empty()) { HInstruction* instruction = worklist_.back(); worklist_.pop_back(); - if (UpdateNullability(instruction) || UpdateReferenceTypeInfo(instruction)) { + bool updated_nullability = UpdateNullability(instruction); + bool updated_reference_type = UpdateReferenceTypeInfo(instruction); + if (updated_nullability || updated_reference_type) { AddDependentInstructionsToWorklist(instruction); } } diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index f1c0b9258a..d97a2a40b2 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -301,7 +301,7 @@ class Assembler { } // TODO: Implement with disassembler. - virtual void Comment(const char* format, ...) { UNUSED(format); } + virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {} // Emit code that will create an activation on the stack virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h index b30f7d772e..f1233ca457 100644 --- a/compiler/utils/assembler_test.h +++ b/compiler/utils/assembler_test.h @@ -83,6 +83,15 @@ class AssemblerTest : public testing::Test { fmt); } + std::string RepeatRRNoDupes(void (Ass::*f)(Reg, Reg), std::string fmt) { + return RepeatTemplatedRegistersNoDupes<Reg, Reg>(f, + GetRegisters(), + GetRegisters(), + &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>, + &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>, + fmt); + } + std::string Repeatrr(void (Ass::*f)(Reg, Reg), std::string fmt) { return RepeatTemplatedRegisters<Reg, Reg>(f, GetRegisters(), @@ -608,6 +617,45 @@ class AssemblerTest : public testing::Test { return str; } + template <typename Reg1, typename Reg2> + std::string RepeatTemplatedRegistersNoDupes(void (Ass::*f)(Reg1, Reg2), + const std::vector<Reg1*> reg1_registers, + const std::vector<Reg2*> reg2_registers, + std::string (AssemblerTest::*GetName1)(const Reg1&), + std::string (AssemblerTest::*GetName2)(const Reg2&), + std::string fmt) { + WarnOnCombinations(reg1_registers.size() * reg2_registers.size()); + + std::string str; + for (auto reg1 : reg1_registers) { + for (auto reg2 : reg2_registers) { + if (reg1 == reg2) continue; + (assembler_.get()->*f)(*reg1, *reg2); + std::string base = fmt; + + std::string reg1_string = (this->*GetName1)(*reg1); + size_t reg1_index; + while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) { + base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string); + } + + std::string reg2_string = (this->*GetName2)(*reg2); + size_t reg2_index; + while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) { + base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string); + } + + if (str.size() > 0) { + str += "\n"; + } + str += base; + } + } + // Add a newline at the end. + str += "\n"; + return str; + } + template <typename Reg1, typename Reg2, typename Reg3> std::string RepeatTemplatedRegisters(void (Ass::*f)(Reg1, Reg2, Reg3), const std::vector<Reg1*> reg1_registers, diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index b078f3e4cf..00e8995bff 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -335,6 +335,10 @@ void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) { EmitR(0, rs, rt, rd, 0, 0x04); } +void Mips64Assembler::Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) { + EmitR(0, rs, rt, rd, 1, 0x06); +} + void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) { EmitR(0, rs, rt, rd, 0, 0x06); } @@ -351,6 +355,10 @@ void Mips64Assembler::Dsrl(GpuRegister rd, GpuRegister rt, int shamt) { EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a); } +void Mips64Assembler::Drotr(GpuRegister rd, GpuRegister rt, int shamt) { + EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3a); +} + void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) { EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b); } @@ -363,6 +371,10 @@ void Mips64Assembler::Dsrl32(GpuRegister rd, GpuRegister rt, int shamt) { EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e); } +void Mips64Assembler::Drotr32(GpuRegister rd, GpuRegister rt, int shamt) { + EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3e); +} + void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) { EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f); } @@ -375,6 +387,10 @@ void Mips64Assembler::Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) { EmitR(0, rs, rt, rd, 0, 0x16); } +void Mips64Assembler::Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) { + EmitR(0, rs, rt, rd, 1, 0x16); +} + void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) { EmitR(0, rs, rt, rd, 0, 0x17); } @@ -773,6 +789,10 @@ void Mips64Assembler::Cvtds(FpuRegister fd, FpuRegister fs) { EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21); } +void Mips64Assembler::Cvtsl(FpuRegister fd, FpuRegister fs) { + EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x20); +} + void Mips64Assembler::Cvtdl(FpuRegister fd, FpuRegister fs) { EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x21); } diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index d083eb4306..33f22d2c2d 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -123,15 +123,19 @@ class Mips64Assembler FINAL : public Assembler { void Sra(GpuRegister rd, GpuRegister rt, int shamt); void Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs); void Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs); + void Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs); void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs); void Dsll(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64 void Dsrl(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64 + void Drotr(GpuRegister rd, GpuRegister rt, int shamt); void Dsra(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64 void Dsll32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64 void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64 + void Drotr32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64 void Dsra32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64 void Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64 void Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64 + void Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64 void Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64 void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16); @@ -230,6 +234,7 @@ class Mips64Assembler FINAL : public Assembler { void Cvtdw(FpuRegister fd, FpuRegister fs); void Cvtsd(FpuRegister fd, FpuRegister fs); void Cvtds(FpuRegister fd, FpuRegister fs); + void Cvtsl(FpuRegister fd, FpuRegister fs); void Cvtdl(FpuRegister fd, FpuRegister fs); void Mfc1(GpuRegister rt, FpuRegister fs); diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc index 2071aca546..16f29b00bc 100644 --- a/compiler/utils/mips64/assembler_mips64_test.cc +++ b/compiler/utils/mips64/assembler_mips64_test.cc @@ -215,6 +215,22 @@ TEST_F(AssemblerMIPS64Test, AbsD) { DriverStr(RepeatFF(&mips64::Mips64Assembler::AbsD, "abs.d ${reg1}, ${reg2}"), "abs.d"); } +TEST_F(AssemblerMIPS64Test, MovS) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::MovS, "mov.s ${reg1}, ${reg2}"), "mov.s"); +} + +TEST_F(AssemblerMIPS64Test, MovD) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::MovD, "mov.d ${reg1}, ${reg2}"), "mov.d"); +} + +TEST_F(AssemblerMIPS64Test, NegS) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::NegS, "neg.s ${reg1}, ${reg2}"), "neg.s"); +} + +TEST_F(AssemblerMIPS64Test, NegD) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::NegD, "neg.d ${reg1}, ${reg2}"), "neg.d"); +} + TEST_F(AssemblerMIPS64Test, RoundLS) { DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundLS, "round.l.s ${reg1}, ${reg2}"), "round.l.s"); } @@ -307,6 +323,34 @@ TEST_F(AssemblerMIPS64Test, CvtDL) { DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "cvt.d.l"); } +TEST_F(AssemblerMIPS64Test, CvtDS) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "cvt.d.s"); +} + +TEST_F(AssemblerMIPS64Test, CvtDW) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "cvt.d.w"); +} + +TEST_F(AssemblerMIPS64Test, CvtSL) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsl, "cvt.s.l ${reg1}, ${reg2}"), "cvt.s.l"); +} + +TEST_F(AssemblerMIPS64Test, CvtSD) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "cvt.s.d"); +} + +TEST_F(AssemblerMIPS64Test, CvtSW) { + DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "cvt.s.w"); +} + +//////////////// +// CALL / JMP // +//////////////// + +TEST_F(AssemblerMIPS64Test, Jalr) { + DriverStr(RepeatRRNoDupes(&mips64::Mips64Assembler::Jalr, "jalr ${reg1}, ${reg2}"), "jalr"); +} + ////////// // MISC // ////////// @@ -319,6 +363,14 @@ TEST_F(AssemblerMIPS64Test, Dbitswap) { DriverStr(RepeatRR(&mips64::Mips64Assembler::Dbitswap, "dbitswap ${reg1}, ${reg2}"), "dbitswap"); } +TEST_F(AssemblerMIPS64Test, Seb) { + DriverStr(RepeatRR(&mips64::Mips64Assembler::Seb, "seb ${reg1}, ${reg2}"), "seb"); +} + +TEST_F(AssemblerMIPS64Test, Seh) { + DriverStr(RepeatRR(&mips64::Mips64Assembler::Seh, "seh ${reg1}, ${reg2}"), "seh"); +} + TEST_F(AssemblerMIPS64Test, Dsbh) { DriverStr(RepeatRR(&mips64::Mips64Assembler::Dsbh, "dsbh ${reg1}, ${reg2}"), "dsbh"); } @@ -331,6 +383,42 @@ TEST_F(AssemblerMIPS64Test, Wsbh) { DriverStr(RepeatRR(&mips64::Mips64Assembler::Wsbh, "wsbh ${reg1}, ${reg2}"), "wsbh"); } +TEST_F(AssemblerMIPS64Test, Sll) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sll, 5, "sll ${reg1}, ${reg2}, {imm}"), "sll"); +} + +TEST_F(AssemblerMIPS64Test, Srl) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl"); +} + +TEST_F(AssemblerMIPS64Test, Sra) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra"); +} + +TEST_F(AssemblerMIPS64Test, Dsll) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll"); +} + +TEST_F(AssemblerMIPS64Test, Dsrl) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl"); +} + +TEST_F(AssemblerMIPS64Test, Dsra) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra"); +} + +TEST_F(AssemblerMIPS64Test, Dsll32) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), "dsll32"); +} + +TEST_F(AssemblerMIPS64Test, Dsrl32) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), "dsrl32"); +} + +TEST_F(AssemblerMIPS64Test, Dsra32) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), "dsra32"); +} + TEST_F(AssemblerMIPS64Test, Sc) { DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sc, -9, "sc ${reg1}, {imm}(${reg2})"), "sc"); } |