diff options
| author | 2014-06-30 15:21:53 +0000 | |
|---|---|---|
| committer | 2014-06-30 14:46:08 +0000 | |
| commit | 595248a0d9b96a4b442bb0cd3fecd55ae630b03f (patch) | |
| tree | 49bee72a5cad1dd97b7847bd67026f5d8b6a0176 | |
| parent | 5ea18555c3e413aea86bbb5ff6a320c4ea1f925c (diff) | |
| parent | baa7c88a34fdfd230a2a383c2e388945f4d907b6 (diff) | |
Merge "AArch64: Rename A64_/A32_ register prefix to x/w."
| -rw-r--r-- | compiler/dex/quick/arm64/arm64_lir.h | 39 | ||||
| -rw-r--r-- | compiler/dex/quick/arm64/call_arm64.cc | 58 | ||||
| -rw-r--r-- | compiler/dex/quick/arm64/int_arm64.cc | 4 | ||||
| -rw-r--r-- | compiler/dex/quick/arm64/target_arm64.cc | 22 | ||||
| -rw-r--r-- | compiler/dex/quick/arm64/utility_arm64.cc | 6 |
5 files changed, 64 insertions, 65 deletions
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h index ac5f33b721..b0865f1c3f 100644 --- a/compiler/dex/quick/arm64/arm64_lir.h +++ b/compiler/dex/quick/arm64/arm64_lir.h @@ -136,23 +136,23 @@ enum A64NativeRegisterPool { A64_REGISTER_CODE_LIST(A64_DEFINE_REGISTERS) #undef A64_DEFINE_REGISTERS - rwzr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0x3f, rxzr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0x3f, - rwsp = rw31, + rwzr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0x3f, rsp = rx31, - rA64_SUSPEND = rx19, - rA64_SELF = rx18, - rA64_SP = rx31, - rA64_LR = rx30, + rwsp = rw31, + + // Aliases which are not defined in "ARM Architecture Reference, register names". + rxSUSPEND = rx19, + rxSELF = rx18, + rxLR = rx30, /* * FIXME: It's a bit awkward to define both 32 and 64-bit views of these - we'll only ever use * the 64-bit view. However, for now we'll define a 32-bit view to keep these from being * allocated as 32-bit temp registers. */ - rA32_SUSPEND = rw19, - rA32_SELF = rw18, - rA32_SP = rw31, - rA32_LR = rw30 + rwSUSPEND = rw19, + rwSELF = rw18, + rwLR = rw30, }; #define A64_DEFINE_REGSTORAGES(nr) \ @@ -163,17 +163,18 @@ enum A64NativeRegisterPool { A64_REGISTER_CODE_LIST(A64_DEFINE_REGSTORAGES) #undef A64_DEFINE_REGSTORAGES -constexpr RegStorage rs_wzr(RegStorage::kValid | rwzr); constexpr RegStorage rs_xzr(RegStorage::kValid | rxzr); -constexpr RegStorage rs_rA64_SUSPEND(RegStorage::kValid | rA64_SUSPEND); -constexpr RegStorage rs_rA64_SELF(RegStorage::kValid | rA64_SELF); -constexpr RegStorage rs_rA64_SP(RegStorage::kValid | rA64_SP); -constexpr RegStorage rs_rA64_LR(RegStorage::kValid | rA64_LR); +constexpr RegStorage rs_wzr(RegStorage::kValid | rwzr); +// Reserved registers. +constexpr RegStorage rs_xSUSPEND(RegStorage::kValid | rxSUSPEND); +constexpr RegStorage rs_xSELF(RegStorage::kValid | rxSELF); +constexpr RegStorage rs_sp(RegStorage::kValid | rsp); +constexpr RegStorage rs_xLR(RegStorage::kValid | rxLR); // TODO: eliminate the need for these. -constexpr RegStorage rs_rA32_SUSPEND(RegStorage::kValid | rA32_SUSPEND); -constexpr RegStorage rs_rA32_SELF(RegStorage::kValid | rA32_SELF); -constexpr RegStorage rs_rA32_SP(RegStorage::kValid | rA32_SP); -constexpr RegStorage rs_rA32_LR(RegStorage::kValid | rA32_LR); +constexpr RegStorage rs_wSUSPEND(RegStorage::kValid | rwSUSPEND); +constexpr RegStorage rs_wSELF(RegStorage::kValid | rwSELF); +constexpr RegStorage rs_wsp(RegStorage::kValid | rwsp); +constexpr RegStorage rs_wLR(RegStorage::kValid | rwLR); // RegisterLocation templates return values (following the hard-float calling convention). const RegLocation arm_loc_c_return = diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index cb126f2cc7..cfdf926fba 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -174,12 +174,12 @@ void Arm64Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, rs_x0); - LoadWordDisp(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData).Int32Value(), - rs_rA64_LR); + LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData).Int32Value(), + rs_xLR); // Materialize a pointer to the fill data image NewLIR3(kA64Adr2xd, rx1, 0, WrapPointer(tab_rec)); ClobberCallerSave(); - LIR* call_inst = OpReg(kOpBlx, rs_rA64_LR); + LIR* call_inst = OpReg(kOpBlx, rs_xLR); MarkSafepointPC(call_inst); } @@ -206,7 +206,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); } } - Load32Disp(rs_rA64_SELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); + Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value()); NewLIR2(kA64Ldxr2rX, rw3, rx2); MarkPossibleNullPointerException(opt_flags); @@ -221,9 +221,9 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { } // TODO: move to a slow path. // Go expensive route - artLockObjectFromCode(obj); - LoadWordDisp(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(), rs_rA64_LR); + LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(), rs_xLR); ClobberCallerSave(); - LIR* call_inst = OpReg(kOpBlx, rs_rA64_LR); + LIR* call_inst = OpReg(kOpBlx, rs_xLR); MarkSafepointPC(call_inst); LIR* success_target = NewLIR0(kPseudoTargetLabel); @@ -254,7 +254,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); } } - Load32Disp(rs_rA64_SELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); + Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); Load32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2); MarkPossibleNullPointerException(opt_flags); LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w1, rs_w2, NULL); @@ -269,9 +269,9 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { } // TODO: move to a slow path. // Go expensive route - artUnlockObjectFromCode(obj); - LoadWordDisp(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(), rs_rA64_LR); + LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(), rs_xLR); ClobberCallerSave(); - LIR* call_inst = OpReg(kOpBlx, rs_rA64_LR); + LIR* call_inst = OpReg(kOpBlx, rs_xLR); MarkSafepointPC(call_inst); LIR* success_target = NewLIR0(kPseudoTargetLabel); @@ -281,8 +281,8 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset<8>().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); - LoadRefDisp(rs_rA64_SELF, ex_offset, rl_result.reg, kNotVolatile); - StoreRefDisp(rs_rA64_SELF, ex_offset, rs_xzr, kNotVolatile); + LoadRefDisp(rs_xSELF, ex_offset, rl_result.reg, kNotVolatile); + StoreRefDisp(rs_xSELF, ex_offset, rs_xzr, kNotVolatile); StoreValue(rl_dest, rl_result); } @@ -293,7 +293,7 @@ void Arm64Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { RegStorage reg_card_base = AllocTempWide(); RegStorage reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); - LoadWordDisp(rs_rA64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base); + LoadWordDisp(rs_xSELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base); OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); // TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"? StoreBaseIndexed(reg_card_base, As64BitReg(reg_card_no), As32BitReg(reg_card_base), @@ -341,33 +341,33 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { if (!large_frame) { // Load stack limit - LoadWordDisp(rs_rA64_SELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9); + LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9); } } else { // TODO(Arm64) Implement implicit checks. // Implicit stack overflow check. // Generate a load from [sp, #-framesize]. If this is in the stack // redzone we will get a segmentation fault. - // Load32Disp(rs_rA64_SP, -Thread::kStackOverflowReservedBytes, rs_wzr); + // Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr); // MarkPossibleStackOverflowException(); LOG(FATAL) << "Implicit stack overflow checks not implemented."; } } if (frame_size_ > 0) { - OpRegImm64(kOpSub, rs_rA64_SP, spill_size); + OpRegImm64(kOpSub, rs_sp, spill_size); } /* Need to spill any FP regs? */ if (fp_spill_mask_) { int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_); - SpillFPRegs(rs_rA64_SP, spill_offset, fp_spill_mask_); + SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_); } /* Spill core callee saves. */ if (core_spill_mask_) { int spill_offset = spill_size - kArm64PointerSize*num_core_spills_; - SpillCoreRegs(rs_rA64_SP, spill_offset, core_spill_mask_); + SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_); } if (!skip_overflow_check) { @@ -383,11 +383,11 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) m2l_->ResetDefTracking(); GenerateTargetLabel(kPseudoThrowTarget); // Unwinds stack. - m2l_->OpRegImm(kOpAdd, rs_rA64_SP, sp_displace_); + m2l_->OpRegImm(kOpAdd, rs_sp, sp_displace_); m2l_->ClobberCallerSave(); ThreadOffset<8> func_offset = QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow); m2l_->LockTemp(rs_x8); - m2l_->LoadWordDisp(rs_rA64_SELF, func_offset.Int32Value(), rs_x8); + m2l_->LoadWordDisp(rs_xSELF, func_offset.Int32Value(), rs_x8); m2l_->NewLIR1(kA64Br1x, rs_x8.GetReg()); m2l_->FreeTemp(rs_x8); } @@ -399,26 +399,26 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) if (large_frame) { // Compare Expected SP against bottom of stack. // Branch to throw target if there is not enough room. - OpRegRegImm(kOpSub, rs_x9, rs_rA64_SP, frame_size_without_spills); - LoadWordDisp(rs_rA64_SELF, Thread::StackEndOffset<8>().Int32Value(), rs_x8); + OpRegRegImm(kOpSub, rs_x9, rs_sp, frame_size_without_spills); + LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x8); LIR* branch = OpCmpBranch(kCondUlt, rs_x9, rs_x8, nullptr); AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size)); - OpRegCopy(rs_rA64_SP, rs_x9); // Establish stack after checks. + OpRegCopy(rs_sp, rs_x9); // Establish stack after checks. } else { /* * If the frame is small enough we are guaranteed to have enough space that remains to * handle signals on the user stack. * Establishes stack before checks. */ - OpRegRegImm(kOpSub, rs_rA64_SP, rs_rA64_SP, frame_size_without_spills); - LIR* branch = OpCmpBranch(kCondUlt, rs_rA64_SP, rs_x9, nullptr); + OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills); + LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_x9, nullptr); AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_)); } } else { - OpRegImm(kOpSub, rs_rA64_SP, frame_size_without_spills); + OpRegImm(kOpSub, rs_sp, frame_size_without_spills); } } else { - OpRegImm(kOpSub, rs_rA64_SP, frame_size_without_spills); + OpRegImm(kOpSub, rs_sp, frame_size_without_spills); } FlushIns(ArgLocs, rl_method); @@ -448,14 +448,14 @@ void Arm64Mir2Lir::GenExitSequence() { /* Need to restore any FP callee saves? */ if (fp_spill_mask_) { int spill_offset = frame_size_ - kArm64PointerSize*(num_fp_spills_ + num_core_spills_); - UnSpillFPRegs(rs_rA64_SP, spill_offset, fp_spill_mask_); + UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_); } if (core_spill_mask_) { int spill_offset = frame_size_ - kArm64PointerSize*num_core_spills_; - UnSpillCoreRegs(rs_rA64_SP, spill_offset, core_spill_mask_); + UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_); } - OpRegImm64(kOpAdd, rs_rA64_SP, frame_size_); + OpRegImm64(kOpAdd, rs_sp, frame_size_); NewLIR0(kA64Ret); } diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index ac32f6e3d9..56fb2dd018 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -599,9 +599,7 @@ void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) { // Test suspend flag, return target of taken suspend branch LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) { - // FIXME: Define rA64_SUSPEND as w19, when we do not need two copies of reserved register. - // Note: The opcode is not set as wide, so actually we are using the 32-bit version register. - NewLIR3(kA64Subs3rRd, rA64_SUSPEND, rA64_SUSPEND, 1); + NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1); return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target); } diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc index 0ebcefba70..6105837f79 100644 --- a/compiler/dex/quick/arm64/target_arm64.cc +++ b/compiler/dex/quick/arm64/target_arm64.cc @@ -48,9 +48,9 @@ static constexpr RegStorage dp_regs_arr[] = rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23, rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31}; static constexpr RegStorage reserved_regs_arr[] = - {rs_rA32_SUSPEND, rs_rA32_SELF, rs_rA32_SP, rs_rA32_LR, rs_wzr}; + {rs_wSUSPEND, rs_wSELF, rs_wsp, rs_wLR, rs_wzr}; static constexpr RegStorage reserved64_regs_arr[] = - {rs_rA64_SUSPEND, rs_rA64_SELF, rs_rA64_SP, rs_rA64_LR, rs_xzr}; + {rs_xSUSPEND, rs_xSELF, rs_sp, rs_xLR, rs_xzr}; // TUNING: Are there too many temp registers and too less promote target? // This definition need to be matched with runtime.cc, quick entry assembly and JNI compiler // Note: we are not able to call to C function directly if it un-match C ABI. @@ -107,11 +107,11 @@ RegLocation Arm64Mir2Lir::LocCReturnDouble() { RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) { RegStorage res_reg = RegStorage::InvalidReg(); switch (reg) { - case kSelf: res_reg = rs_rA64_SELF; break; - case kSuspend: res_reg = rs_rA64_SUSPEND; break; - case kLr: res_reg = rs_rA64_LR; break; + case kSelf: res_reg = rs_xSELF; break; + case kSuspend: res_reg = rs_xSUSPEND; break; + case kLr: res_reg = rs_xLR; break; case kPc: res_reg = RegStorage::InvalidReg(); break; - case kSp: res_reg = rs_rA64_SP; break; + case kSp: res_reg = rs_sp; break; case kArg0: res_reg = rs_x0; break; case kArg1: res_reg = rs_x1; break; case kArg2: res_reg = rs_x2; break; @@ -130,7 +130,7 @@ RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) { case kFArg7: res_reg = rs_f7; break; case kRet0: res_reg = rs_x0; break; case kRet1: res_reg = rs_x1; break; - case kInvokeTgt: res_reg = rs_rA64_LR; break; + case kInvokeTgt: res_reg = rs_xLR; break; case kHiddenArg: res_reg = rs_x12; break; case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break; case kCount: res_reg = RegStorage::InvalidReg(); break; @@ -644,7 +644,7 @@ void Arm64Mir2Lir::CompilerInitializeRegAlloc() { */ void Arm64Mir2Lir::AdjustSpillMask() { - core_spill_mask_ |= (1 << rs_rA64_LR.GetRegNum()); + core_spill_mask_ |= (1 << rs_xLR.GetRegNum()); num_core_spills_++; } @@ -789,13 +789,13 @@ RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) { RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<8> offset) { // TODO(Arm64): use LoadWordDisp instead. // e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR); - LoadBaseDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR, k64, kNotVolatile); - return rs_rA64_LR; + LoadBaseDisp(rs_xSELF, offset.Int32Value(), rs_xLR, k64, kNotVolatile); + return rs_xLR; } LIR* Arm64Mir2Lir::CheckSuspendUsingLoad() { RegStorage tmp = rs_x0; - LoadWordDisp(rs_rA64_SELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp); + LoadWordDisp(rs_xSELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp); LIR* load2 = LoadWordDisp(tmp, 0, tmp); return load2; } diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc index c8d45c637a..e2484101df 100644 --- a/compiler/dex/quick/arm64/utility_arm64.cc +++ b/compiler/dex/quick/arm64/utility_arm64.cc @@ -554,7 +554,7 @@ LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2 /* RegReg operations with SP in first parameter need extended register instruction form. * Only CMN and CMP instructions are implemented. */ - if (r_dest_src1 == rs_rA64_SP) { + if (r_dest_src1 == rs_sp) { return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND); } else { return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT); @@ -1110,7 +1110,7 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor // TODO: in future may need to differentiate Dalvik accesses w/ spills if (mem_ref_type_ == ResourceMask::kDalvikReg) { - DCHECK(r_base == rs_rA64_SP); + DCHECK(r_base == rs_sp); AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit()); } return load; @@ -1203,7 +1203,7 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto // TODO: In future, may need to differentiate Dalvik & spill accesses. if (mem_ref_type_ == ResourceMask::kDalvikReg) { - DCHECK(r_base == rs_rA64_SP); + DCHECK(r_base == rs_sp); AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit()); } return store; |