summaryrefslogtreecommitdiff
path: root/compiler/dex/quick/gen_invoke.cc
diff options
context:
space:
mode:
author buzbee <buzbee@google.com> 2014-06-21 15:31:01 -0700
committer Andreas Gampe <agampe@google.com> 2014-07-03 00:12:07 -0700
commitb5860fb459f1ed71f39d8a87b45bee6727d79fe8 (patch)
tree3ac54afcb83678d3edfef855f62b79de8b3fff85 /compiler/dex/quick/gen_invoke.cc
parent555377d55c37db860583e0655f63a1dacb589921 (diff)
Register promotion support for 64-bit targets
Not sufficiently tested for 64-bit targets, but should be fairly close. A significant amount of refactoring could stil be done, (in later CLs). With this change we are not making any changes to the vmap scheme. As a result, it is a requirement that if a vreg is promoted to both a 32-bit view and the low half of a 64-bit view it must share the same physical register. We may change this restriction later on to allow for more flexibility for 32-bit Arm. For example, if v4, v5, v4/v5 and v5/v6 are all hot enough to promote, we'd end up with something like: v4 (as an int) -> r10 v4/v5 (as a long) -> r10 v5 (as an int) -> r11 v5/v6 (as a long) -> r11 Fix a couple of ARM64 bugs on the way... Change-Id: I6a152b9c164d9f1a053622266e165428045362f3
Diffstat (limited to 'compiler/dex/quick/gen_invoke.cc')
-rw-r--r--compiler/dex/quick/gen_invoke.cc31
1 files changed, 12 insertions, 19 deletions
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index bf51d28be3..c75e681683 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -257,11 +257,11 @@ template <size_t pointer_size>
void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
- if (TargetReg(kArg0) != arg0) {
- OpRegCopy(TargetReg(kArg0), arg0);
+ DCHECK(!IsSameReg(TargetReg(kArg1), arg0));
+ if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) {
+ OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
}
- LoadCurrMethodDirect(TargetReg(kArg1));
+ LoadCurrMethodDirect(TargetRefReg(kArg1));
ClobberCallerSave();
CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
}
@@ -272,11 +272,11 @@ void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> h
RegStorage arg0, RegLocation arg2,
bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
- if (TargetReg(kArg0) != arg0) {
- OpRegCopy(TargetReg(kArg0), arg0);
+ DCHECK(!IsSameReg(TargetReg(kArg1), arg0));
+ if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) {
+ OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
}
- LoadCurrMethodDirect(TargetReg(kArg1));
+ LoadCurrMethodDirect(TargetRefReg(kArg1));
LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
ClobberCallerSave();
CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
@@ -394,13 +394,6 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size>
INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
RegLocation arg1, bool safepoint_pc)
-// TODO: This is a hack! Reshape the two macros into functions and move them to a better place.
-#define IsSameReg(r1, r2) \
- (GetRegInfo(r1)->Master()->GetReg().GetReg() == GetRegInfo(r2)->Master()->GetReg().GetReg())
-#define TargetArgReg(arg, is_wide) \
- (GetRegInfo(TargetReg(arg))->FindMatchingView( \
- (is_wide) ? RegisterInfo::k64SoloStorageMask : RegisterInfo::k32SoloStorageMask)->GetReg())
-
void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
if (IsSameReg(arg1, TargetReg(kArg0))) {
if (IsSameReg(arg0, TargetReg(kArg1))) {
@@ -562,7 +555,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
need_flush = false;
} else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
- OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg);
+ OpRegCopy(RegStorage::Solo32(v_map->fp_reg), reg);
need_flush = false;
} else {
need_flush = true;
@@ -584,8 +577,8 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
* halves of the double are promoted. Make sure they are in a usable form.
*/
int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
- int low_reg = promotion_map_[lowreg_index].FpReg;
- int high_reg = promotion_map_[lowreg_index + 1].FpReg;
+ int low_reg = promotion_map_[lowreg_index].fp_reg;
+ int high_reg = promotion_map_[lowreg_index + 1].fp_reg;
if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
need_flush = true;
}
@@ -600,7 +593,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg));
}
if (v_map->fp_location == kLocPhysReg) {
- Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg));
+ Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg));
}
}
}