diff options
author | 2014-05-06 20:49:36 +0100 | |
---|---|---|
committer | 2014-05-07 10:10:19 +0100 | |
commit | 455759b5702b9435b91d1b4dada22c4cce7cae3c (patch) | |
tree | 73df437e7b8e1ca1b78be8d7fb2d38fec01b9dee | |
parent | 7189fee4268c70d7ed0151e988ff7c7cd85f2a30 (diff) |
Remove LoadBaseDispWide and StoreBaseDispWide.
Just pass k64 or kDouble to non-wide versions.
Change-Id: I000619c3b78d3a71db42edc747c8a0ba1ee229be
-rw-r--r-- | compiler/dex/quick/arm/codegen_arm.h | 2 | ||||
-rw-r--r-- | compiler/dex/quick/arm/int_arm.cc | 21 | ||||
-rw-r--r-- | compiler/dex/quick/arm/utility_arm.cc | 11 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/codegen_arm64.h | 2 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/int_arm64.cc | 21 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/utility_arm64.cc | 11 | ||||
-rw-r--r-- | compiler/dex/quick/gen_common.cc | 26 | ||||
-rw-r--r-- | compiler/dex/quick/gen_invoke.cc | 8 | ||||
-rw-r--r-- | compiler/dex/quick/gen_loadstore.cc | 6 | ||||
-rw-r--r-- | compiler/dex/quick/mips/codegen_mips.h | 2 | ||||
-rw-r--r-- | compiler/dex/quick/mips/int_mips.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/mips/utility_mips.cc | 24 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.cc | 23 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 7 | ||||
-rw-r--r-- | compiler/dex/quick/ralloc_util.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/x86/codegen_x86.h | 2 | ||||
-rw-r--r-- | compiler/dex/quick/x86/fp_x86.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/x86/int_x86.cc | 8 | ||||
-rw-r--r-- | compiler/dex/quick/x86/utility_x86.cc | 10 |
19 files changed, 62 insertions, 134 deletions
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index 9d1723a55f..8b4576c56a 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -34,7 +34,6 @@ class ArmMir2Lir FINAL : public Mir2Lir { RegStorage LoadHelper(ThreadOffset<4> offset); LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg); - LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg); LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size); LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, @@ -42,7 +41,6 @@ class ArmMir2Lir FINAL : public Mir2Lir { LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); - LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src); LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size); LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 8391c0366b..8dd31d18ee 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -1170,19 +1170,14 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } FreeTemp(reg_len); } + LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG); + MarkPossibleNullPointerException(opt_flags); + if (!constant_index) { + FreeTemp(reg_ptr); + } if (rl_dest.wide) { - LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg, INVALID_SREG); - MarkPossibleNullPointerException(opt_flags); - if (!constant_index) { - FreeTemp(reg_ptr); - } StoreValueWide(rl_dest, rl_result); } else { - LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG); - MarkPossibleNullPointerException(opt_flags); - if (!constant_index) { - FreeTemp(reg_ptr); - } StoreValue(rl_dest, rl_result); } } else { @@ -1275,11 +1270,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, FreeTemp(reg_len); } - if (rl_src.wide) { - StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg); - } else { - StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size); - } + StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size); MarkPossibleNullPointerException(opt_flags); } else { /* reg_ptr -> array data */ diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 08acef7873..b7b9093b1d 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -957,7 +957,6 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg) { - DCHECK(!((size == k64) || (size == kDouble))); // TODO: base this on target. if (size == kWord) { size = k32; @@ -965,11 +964,6 @@ LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_ return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg); } -LIR* ArmMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, - int s_reg) { - return LoadBaseDispBody(r_base, displacement, r_dest, k64, s_reg); -} - LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) { @@ -1091,14 +1085,9 @@ LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r if (size == kWord) { size = k32; } - DCHECK(!((size == k64) || (size == kDouble))); return StoreBaseDispBody(r_base, displacement, r_src, size); } -LIR* ArmMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { - return StoreBaseDispBody(r_base, displacement, r_src, k64); -} - LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { int opcode; DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble()); diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index 94c2563ae3..4e784c6b38 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -34,7 +34,6 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { RegStorage LoadHelper(ThreadOffset<4> offset); LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg); - LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg); LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size); LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, @@ -42,7 +41,6 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); - LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src); LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size); LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index 11fb76571f..c5a3ab6b39 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -1170,19 +1170,14 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } FreeTemp(reg_len); } + LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG); + MarkPossibleNullPointerException(opt_flags); + if (!constant_index) { + FreeTemp(reg_ptr); + } if (rl_dest.wide) { - LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg, INVALID_SREG); - MarkPossibleNullPointerException(opt_flags); - if (!constant_index) { - FreeTemp(reg_ptr); - } StoreValueWide(rl_dest, rl_result); } else { - LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG); - MarkPossibleNullPointerException(opt_flags); - if (!constant_index) { - FreeTemp(reg_ptr); - } StoreValue(rl_dest, rl_result); } } else { @@ -1275,11 +1270,7 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, FreeTemp(reg_len); } - if (rl_src.wide) { - StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg); - } else { - StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size); - } + StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size); MarkPossibleNullPointerException(opt_flags); } else { /* reg_ptr -> array data */ diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc index d66b834131..8ff1830050 100644 --- a/compiler/dex/quick/arm64/utility_arm64.cc +++ b/compiler/dex/quick/arm64/utility_arm64.cc @@ -957,7 +957,6 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg) { - DCHECK(!((size == k64) || (size == kDouble))); // TODO: base this on target. if (size == kWord) { size = k32; @@ -965,11 +964,6 @@ LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg); } -LIR* Arm64Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, - int s_reg) { - return LoadBaseDispBody(r_base, displacement, r_dest, k64, s_reg); -} - LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) { @@ -1091,14 +1085,9 @@ LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage if (size == kWord) { size = k32; } - DCHECK(!((size == k64) || (size == kDouble))); return StoreBaseDispBody(r_base, displacement, r_src, size); } -LIR* Arm64Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { - return StoreBaseDispBody(r_base, displacement, r_src, k64); -} - LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { int opcode; DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble()); diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 2cd17ccffc..395cff7d61 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -564,13 +564,8 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, // There might have been a store before this volatile one so insert StoreStore barrier. GenMemBarrier(kStoreStore); } - if (is_long_or_double) { - StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); - } else if (rl_src.ref) { - StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); - } else { - Store32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); - } + OpSize size = LoadStoreOpSize(is_long_or_double, rl_src.ref); + StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, size); if (field_info.IsVolatile()) { // A load might follow the volatile store so insert a StoreLoad barrier. GenMemBarrier(kStoreLoad); @@ -646,13 +641,8 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, } RegLocation rl_result = EvalLoc(rl_dest, result_reg_kind, true); - if (is_long_or_double) { - LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG); - } else if (rl_result.ref) { - LoadRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); - } else { - Load32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); - } + OpSize size = LoadStoreOpSize(is_long_or_double, rl_result.ref); + LoadBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, size, INVALID_SREG); FreeTemp(r_base); if (field_info.IsVolatile()) { @@ -714,8 +704,8 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, result_reg_kind = kFPReg; } rl_result = EvalLoc(rl_dest, result_reg_kind, true); - LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, - rl_obj.s_reg_low); + LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, + size, rl_obj.s_reg_low); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // Without context sensitive analysis, we must issue the most conservative barriers. @@ -727,7 +717,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, RegStorage reg_ptr = AllocTemp(); OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); rl_result = EvalLoc(rl_dest, reg_class, true); - LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG); + LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, INVALID_SREG); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // Without context sensitive analysis, we must issue the most conservative barriers. @@ -791,7 +781,7 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, // There might have been a store before this volatile one so insert StoreStore barrier. GenMemBarrier(kStoreStore); } - StoreBaseDispWide(reg_ptr, 0, rl_src.reg); + StoreBaseDisp(reg_ptr, 0, rl_src.reg, size); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // A load might follow the volatile store so insert a StoreLoad barrier. diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 9c1fbe4389..960ac10528 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -791,7 +791,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, } int outs_offset = (next_use + 1) * 4; if (rl_arg.wide) { - StoreBaseDispWide(TargetReg(kSp), outs_offset, arg_reg); + StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64); next_use += 2; } else { Store32Disp(TargetReg(kSp), outs_offset, arg_reg); @@ -859,7 +859,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, if (loc.wide) { loc = UpdateLocWide(loc); if ((next_arg >= 2) && (loc.location == kLocPhysReg)) { - StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); + StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64); } next_arg += 2; } else { @@ -1433,7 +1433,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, } else { RegStorage rl_temp_offset = AllocTemp(); OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); - LoadBaseDispWide(rl_temp_offset, 0, rl_result.reg, INVALID_SREG); + LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, INVALID_SREG); FreeTemp(rl_temp_offset); } } else { @@ -1480,7 +1480,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, } else { RegStorage rl_temp_offset = AllocTemp(); OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); - StoreBaseDispWide(rl_temp_offset, 0, rl_value.reg); + StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64); FreeTemp(rl_temp_offset); } } else { diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index e6911cd391..6fe1e3169b 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -123,7 +123,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest) { } else { DCHECK((rl_src.location == kLocDalvikFrame) || (rl_src.location == kLocCompilerTemp)); - LoadBaseDispWide(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, INVALID_SREG); } } @@ -258,7 +258,7 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) { def_start = last_lir_insn_; DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1), mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low))); - StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); + StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64); MarkClean(rl_dest); def_end = last_lir_insn_; MarkDefWide(rl_dest, def_start, def_end); @@ -320,7 +320,7 @@ void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) { LIR *def_start = last_lir_insn_; DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1), mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low))); - StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); + StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64); MarkClean(rl_dest); LIR *def_end = last_lir_insn_; MarkDefWide(rl_dest, def_start, def_end); diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 7a8376e8b1..cdabf8ebc1 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -35,7 +35,6 @@ class MipsMir2Lir FINAL : public Mir2Lir { LIR* LoadBaseDisp(int r_base, int displacement, int r_dest, OpSize size, int s_reg); LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg); - LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg); LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size); LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, @@ -43,7 +42,6 @@ class MipsMir2Lir FINAL : public Mir2Lir { LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); - LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src); LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size); LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 1410e14925..fe2e495121 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -511,7 +511,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, GenArrayBoundsCheck(rl_index.reg, reg_len); FreeTemp(reg_len); } - LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG); + LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, INVALID_SREG); FreeTemp(reg_ptr); StoreValueWide(rl_dest, rl_result); @@ -589,7 +589,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, FreeTemp(reg_len); } - StoreBaseDispWide(reg_ptr, 0, rl_src.reg); + StoreBaseDisp(reg_ptr, 0, rl_src.reg, size); } else { rl_src = LoadValue(rl_src, reg_class); if (needs_range_check) { diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 50b945a956..9aa929cbf3 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -551,15 +551,15 @@ LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r if (size == kWord) { size = k32; } - return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size, - s_reg); -} - -LIR* MipsMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, - int s_reg) { - return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg); + if (size == k64 || size == kDouble) { + return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg); + } else { + return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size, + s_reg); + } } +// FIXME: don't split r_dest into 2 containers. LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, RegStorage r_src_hi, OpSize size) { LIR *res; @@ -647,11 +647,11 @@ LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage if (size == kWord) { size = k32; } - return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size); -} - -LIR* MipsMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { - return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), k64); + if (size == k64 || size == kDouble) { + return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), size); + } else { + return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size); + } } LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) { diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index c9e1950de5..9915ff6f3a 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -59,7 +59,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { RegStorage new_regs = AllocTypedTempWide(false, kAnyReg); reg_arg_low = new_regs.GetLow(); reg_arg_high = new_regs.GetHigh(); - LoadBaseDispWide(TargetReg(kSp), offset, new_regs, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64, INVALID_SREG); } else { reg_arg_high = AllocTemp(); int offset_high = offset + sizeof(uint32_t); @@ -112,7 +112,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high); Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow()); } else { - LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, INVALID_SREG); } } } @@ -126,6 +126,9 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { } bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE)); + bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT)); + OpSize size = LoadStoreOpSize(wide, ref); + // The inliner doesn't distinguish kDouble or kFloat, use shorty. bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D'; @@ -134,11 +137,7 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { LockArg(data.object_arg); RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float); RegStorage reg_obj = LoadArg(data.object_arg); - if (wide) { - LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg, INVALID_SREG); - } else { - Load32Disp(reg_obj, data.field_offset, rl_dest.reg); - } + LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg, size, INVALID_SREG); if (data.is_volatile) { // Without context sensitive analysis, we must issue the most conservative barriers. // In this case, either a load or store may follow so we issue both barriers. @@ -161,6 +160,8 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) { } bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE)); + bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT)); + OpSize size = LoadStoreOpSize(wide, ref); // Point of no return - no aborts after this GenPrintLabel(mir); @@ -172,16 +173,12 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) { // There might have been a store before this volatile one so insert StoreStore barrier. GenMemBarrier(kStoreStore); } - if (wide) { - StoreBaseDispWide(reg_obj, data.field_offset, reg_src); - } else { - Store32Disp(reg_obj, data.field_offset, reg_src); - } + StoreBaseDisp(reg_obj, data.field_offset, reg_src, size); if (data.is_volatile) { // A load might follow the volatile store so insert a StoreLoad barrier. GenMemBarrier(kStoreLoad); } - if (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT)) { + if (ref) { MarkGCCard(reg_src, reg_obj); } return true; diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index cb4396f4cf..cc6532c76c 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -977,8 +977,6 @@ class Mir2Lir : public Backend { virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0; virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg) = 0; - virtual LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, - int s_reg) = 0; virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size) = 0; virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, @@ -988,7 +986,6 @@ class Mir2Lir : public Backend { virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) = 0; - virtual LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) = 0; virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size) = 0; virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, @@ -1263,6 +1260,10 @@ class Mir2Lir : public Backend { */ RegLocation ForceTempWide(RegLocation loc); + static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) { + return wide ? k64 : ref ? kReference : k32; + } + virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index a39611e195..76553af9d7 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -634,14 +634,14 @@ void Mir2Lir::FlushRegWide(RegStorage reg) { info1 = info2; } int v_reg = mir_graph_->SRegToVReg(info1->SReg()); - StoreBaseDispWide(TargetReg(kSp), VRegOffset(v_reg), reg); + StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64); } } else { RegisterInfo* info = GetRegInfo(reg); if (info->IsLive() && info->IsDirty()) { info->SetIsDirty(false); int v_reg = mir_graph_->SRegToVReg(info->SReg()); - StoreBaseDispWide(TargetReg(kSp), VRegOffset(v_reg), reg); + StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64); } } } diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 8f0490c361..1898738930 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -34,7 +34,6 @@ class X86Mir2Lir FINAL : public Mir2Lir { RegStorage LoadHelper(ThreadOffset<4> offset); LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg); - LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg); LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size); LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, @@ -42,7 +41,6 @@ class X86Mir2Lir FINAL : public Mir2Lir { LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); - LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src); LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size); LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 1ed0b63d43..74828c7ad9 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -149,7 +149,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do } else { // It must have been register promoted if it is not a temp but is still in physical // register. Since we need it to be in memory to convert, we place it there now. - StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg); + StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64); } } @@ -183,7 +183,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do if (is_double) { rl_result = EvalLocWide(rl_dest, kFPReg, true); - LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, INVALID_SREG); StoreFinalValueWide(rl_dest, rl_result); } else { diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index b747102bc1..b71a2ced15 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -688,14 +688,12 @@ bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info); RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); + // Unaligned access is allowed on x86. + LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG); if (size == k64) { - // Unaligned access is allowed on x86. - LoadBaseDispWide(rl_address.reg, 0, rl_result.reg, INVALID_SREG); StoreValueWide(rl_dest, rl_result); } else { DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); - // Unaligned access is allowed on x86. - LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG); StoreValue(rl_dest, rl_result); } return true; @@ -709,7 +707,7 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { if (size == k64) { // Unaligned access is allowed on x86. RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg); - StoreBaseDispWide(rl_address.reg, 0, rl_value.reg); + StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size); } else { DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); // Unaligned access is allowed on x86. diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index da6ded5b15..7fe0d1f4d6 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -676,11 +676,6 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_ size, s_reg); } -LIR* X86Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, - int s_reg) { - return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, k64, s_reg); -} - LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, RegStorage r_src, OpSize size, int s_reg) { LIR *store = NULL; @@ -770,11 +765,6 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, INVALID_SREG); } -LIR* X86Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { - return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, - r_src, k64, INVALID_SREG); -} - LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, int offset, int check_value, LIR* target) { NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset, |