AArch64: improve usage of TargetReg() and friends.
TargetReg(arg1) does now always return a 32-bit register. We also avoid
using this function directly and rather use the two-arguments overload
or TargetPtrReg().
Change-Id: I746b3c29a2a2553b399b5c3e7ee3887c7e7c52c3
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 083277d..462be54 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -655,10 +655,10 @@
if (kIsDebugBuild && (kFailOnSizeError || kReportSizeError)) {
// Register usage checks: First establish register usage requirements based on the
// format in `kind'.
- bool want_float = false;
- bool want_64_bit = false;
- bool want_var_size = true;
- bool want_zero = false;
+ bool want_float = false; // Want a float (rather than core) register.
+ bool want_64_bit = false; // Want a 64-bit (rather than 32-bit) register.
+ bool want_var_size = true; // Want register with variable size (kFmtReg{R,F}).
+ bool want_zero = false; // Want the zero (rather than sp) register.
switch (kind) {
case kFmtRegX:
want_64_bit = true;
@@ -717,9 +717,6 @@
}
}
- // TODO(Arm64): if !want_size_match, then we still should compare the size of the
- // register with the size required by the instruction width (kA64Wide).
-
// Fail, if `expected' contains an unsatisfied requirement.
if (expected != nullptr) {
LOG(WARNING) << "Method: " << PrettyMethod(cu_->method_idx, *cu_->dex_file)
@@ -734,11 +731,12 @@
}
}
- // TODO(Arm64): this may or may not be necessary, depending on how wzr, xzr are
- // defined.
- if (is_zero) {
- operand = 31;
- }
+ // In the lines below, we rely on (operand & 0x1f) == 31 to be true for register sp
+ // and zr. This means that these two registers do not need any special treatment, as
+ // their bottom 5 bits are correctly set to 31 == 0b11111, which is the right
+ // value for encoding both sp and zr.
+ COMPILE_ASSERT((rxzr & 0x1f) == 0x1f, rzr_register_number_must_be_31);
+ COMPILE_ASSERT((rsp & 0x1f) == 0x1f, rsp_register_number_must_be_31);
}
value = (operand << encoder->field_loc[i].start) &
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index e4eeeaf..de97653 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -105,16 +105,14 @@
// Required for target - register utilities.
RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
- RegStorage reg = TargetReg(symbolic_reg);
if (wide_kind == kWide || wide_kind == kRef) {
- return (reg.Is64Bit()) ? reg : As64BitReg(reg);
+ return As64BitReg(TargetReg(symbolic_reg));
} else {
- return (reg.Is32Bit()) ? reg : As32BitReg(reg);
+ return Check32BitReg(TargetReg(symbolic_reg));
}
}
RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
- RegStorage reg = TargetReg(symbolic_reg);
- return (reg.Is64Bit() ? reg : As64BitReg(reg));
+ return As64BitReg(TargetReg(symbolic_reg));
}
RegStorage GetArgMappingToPhysicalReg(int arg_num);
RegLocation GetReturnAlt();
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 2212380..6a27ad0 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -108,19 +108,19 @@
RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
RegStorage res_reg = RegStorage::InvalidReg();
switch (reg) {
- case kSelf: res_reg = rs_xSELF; break;
- case kSuspend: res_reg = rs_xSUSPEND; break;
- case kLr: res_reg = rs_xLR; break;
+ case kSelf: res_reg = rs_wSELF; break;
+ case kSuspend: res_reg = rs_wSUSPEND; break;
+ case kLr: res_reg = rs_wLR; break;
case kPc: res_reg = RegStorage::InvalidReg(); break;
- case kSp: res_reg = rs_sp; break;
- case kArg0: res_reg = rs_x0; break;
- case kArg1: res_reg = rs_x1; break;
- case kArg2: res_reg = rs_x2; break;
- case kArg3: res_reg = rs_x3; break;
- case kArg4: res_reg = rs_x4; break;
- case kArg5: res_reg = rs_x5; break;
- case kArg6: res_reg = rs_x6; break;
- case kArg7: res_reg = rs_x7; break;
+ case kSp: res_reg = rs_wsp; break;
+ case kArg0: res_reg = rs_w0; break;
+ case kArg1: res_reg = rs_w1; break;
+ case kArg2: res_reg = rs_w2; break;
+ case kArg3: res_reg = rs_w3; break;
+ case kArg4: res_reg = rs_w4; break;
+ case kArg5: res_reg = rs_w5; break;
+ case kArg6: res_reg = rs_w6; break;
+ case kArg7: res_reg = rs_w7; break;
case kFArg0: res_reg = rs_f0; break;
case kFArg1: res_reg = rs_f1; break;
case kFArg2: res_reg = rs_f2; break;
@@ -129,10 +129,10 @@
case kFArg5: res_reg = rs_f5; break;
case kFArg6: res_reg = rs_f6; break;
case kFArg7: res_reg = rs_f7; break;
- case kRet0: res_reg = rs_x0; break;
- case kRet1: res_reg = rs_x1; break;
- case kInvokeTgt: res_reg = rs_xLR; break;
- case kHiddenArg: res_reg = rs_x12; break;
+ case kRet0: res_reg = rs_w0; break;
+ case kRet1: res_reg = rs_w1; break;
+ case kInvokeTgt: res_reg = rs_wLR; break;
+ case kHiddenArg: res_reg = rs_w12; break;
case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
case kCount: res_reg = RegStorage::InvalidReg(); break;
default: res_reg = RegStorage::InvalidReg();
@@ -929,13 +929,13 @@
*/
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.reg = TargetReg(kArg0);
+ rl_src.reg = TargetReg(kArg0, kRef);
rl_src.home = false;
MarkLive(rl_src);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
}
if (cu_->num_ins == 0) {
@@ -961,9 +961,9 @@
} else {
// Needs flush.
if (t_loc->ref) {
- StoreRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile);
} else {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
kNotVolatile);
}
}
@@ -971,9 +971,9 @@
// If arriving in frame & promoted.
if (t_loc->location == kLocPhysReg) {
if (t_loc->ref) {
- LoadRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
+ LoadRefDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
} else {
- LoadBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg,
+ LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), t_loc->reg,
t_loc->wide ? k64 : k32, kNotVolatile);
}
}
@@ -1070,7 +1070,7 @@
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
@@ -1078,9 +1078,10 @@
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (loc.ref) {
- StoreRefDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
} else {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
+ kNotVolatile);
}
}
next_arg++;
@@ -1114,8 +1115,8 @@
RegStorage temp = TargetReg(kArg3, kNotWide);
// Now load the argument VR and store to the outs.
- Load32Disp(TargetReg(kSp), current_src_offset, temp);
- Store32Disp(TargetReg(kSp), current_dest_offset, temp);
+ Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
+ Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
current_src_offset += bytes_to_move;
current_dest_offset += bytes_to_move;
@@ -1126,8 +1127,7 @@
// Now handle rest not registers if they are
if (in_to_reg_storage_mapping.IsThereStackMapped()) {
- RegStorage regSingle = TargetReg(kArg2);
- RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg());
+ RegStorage regWide = TargetReg(kArg3, kWide);
for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
RegLocation rl_arg = info->args[i];
rl_arg = UpdateRawLoc(rl_arg);
@@ -1139,25 +1139,27 @@
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
}
} else {
if (rl_arg.location == kLocPhysReg) {
if (rl_arg.ref) {
- StoreRefDisp(TargetReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
} else {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
}
} else {
if (rl_arg.ref) {
+ RegStorage regSingle = TargetReg(kArg2, kRef);
LoadValueDirectFixed(rl_arg, regSingle);
- StoreRefDisp(TargetReg(kSp), out_offset, regSingle, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, regSingle, kNotVolatile);
} else {
- LoadValueDirectFixed(rl_arg, As32BitReg(regSingle));
- StoreBaseDisp(TargetReg(kSp), out_offset, As32BitReg(regSingle), k32, kNotVolatile);
+ RegStorage regSingle = TargetReg(kArg2, kNotWide);
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
}
}
}
@@ -1194,13 +1196,13 @@
direct_code, direct_method, type);
if (pcrLabel) {
if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
- *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
+ *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
RegStorage tmp = AllocTemp();
- Load32Disp(TargetReg(kArg1), 0, tmp);
+ Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
MarkPossibleNullPointerException(info->opt_flags);
FreeTemp(tmp);
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 634ab94..fd0fe6e 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1201,7 +1201,7 @@
* @param wide_kind What kind of view of the special register is required.
* @return Return the #RegStorage corresponding to the given purpose @p reg.
*
- * Note: For 32b system, wide (kWide) views only make sense for the argument registers and the
+ * @note For 32b system, wide (kWide) views only make sense for the argument registers and the
* return. In that case, this function should return a pair where the first component of
* the result will be the indicated special register.
*/