summaryrefslogtreecommitdiff
path: root/compiler/dex/quick/gen_invoke.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/dex/quick/gen_invoke.cc')
-rw-r--r--compiler/dex/quick/gen_invoke.cc51
1 files changed, 21 insertions, 30 deletions
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 93a23a6a6e..4ecfeb9ea5 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -358,7 +358,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
rl_src.location = kLocPhysReg;
rl_src.reg = TargetReg(kArg0);
rl_src.home = false;
- MarkLive(rl_src.reg, rl_src.s_reg_low);
+ MarkLive(rl_src);
if (rl_method.wide) {
StoreValueWide(rl_method, rl_src);
} else {
@@ -753,7 +753,8 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
// Wide spans, we need the 2nd half of uses[2].
rl_arg = UpdateLocWide(rl_use2);
if (rl_arg.location == kLocPhysReg) {
- reg = rl_arg.reg.GetHigh();
+ // NOTE: not correct for 64-bit core regs, but this needs rewriting for hard-float.
+ reg = rl_arg.reg.IsPair() ? rl_arg.reg.GetHigh() : rl_arg.reg.DoubleToHighSingle();
} else {
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
@@ -768,34 +769,28 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
}
// Loop through the rest
while (next_use < info->num_arg_words) {
- RegStorage low_reg;
- RegStorage high_reg;
+ RegStorage arg_reg;
rl_arg = info->args[next_use];
rl_arg = UpdateRawLoc(rl_arg);
if (rl_arg.location == kLocPhysReg) {
- if (rl_arg.wide) {
- low_reg = rl_arg.reg.GetLow();
- high_reg = rl_arg.reg.GetHigh();
- } else {
- low_reg = rl_arg.reg;
- }
+ arg_reg = rl_arg.reg;
} else {
- low_reg = TargetReg(kArg2);
+ arg_reg = rl_arg.wide ? RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)) :
+ TargetReg(kArg2);
if (rl_arg.wide) {
- high_reg = TargetReg(kArg3);
- LoadValueDirectWideFixed(rl_arg, RegStorage::MakeRegPair(low_reg, high_reg));
+ LoadValueDirectWideFixed(rl_arg, arg_reg);
} else {
- LoadValueDirectFixed(rl_arg, low_reg);
+ LoadValueDirectFixed(rl_arg, arg_reg);
}
call_state = next_call_insn(cu_, info, call_state, target_method,
vtable_idx, direct_code, direct_method, type);
}
int outs_offset = (next_use + 1) * 4;
if (rl_arg.wide) {
- StoreBaseDispWide(TargetReg(kSp), outs_offset, RegStorage::MakeRegPair(low_reg, high_reg));
+ StoreBaseDispWide(TargetReg(kSp), outs_offset, arg_reg);
next_use += 2;
} else {
- Store32Disp(TargetReg(kSp), outs_offset, low_reg);
+ Store32Disp(TargetReg(kSp), outs_offset, arg_reg);
next_use++;
}
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
@@ -926,7 +921,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
// Allocate a free xmm temp. Since we are working through the calling sequence,
// we expect to have an xmm temporary available.
RegStorage temp = AllocTempDouble();
- CHECK_GT(temp.GetLowReg(), 0);
+ DCHECK(temp.Valid());
LIR* ld1 = nullptr;
LIR* ld2 = nullptr;
@@ -989,9 +984,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
}
// Free the temporary used for the data movement.
- // CLEANUP: temp is currently a bogus pair, elmiminate extra free when updated.
- FreeTemp(temp.GetLow());
- FreeTemp(temp.GetHigh());
+ FreeTemp(temp);
} else {
// Moving 32-bits via general purpose register.
bytes_to_move = sizeof(uint32_t);
@@ -1136,8 +1129,8 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
} else {
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg,
- RegStorage::InvalidReg(), kUnsignedHalf, INVALID_SREG);
+ LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf,
+ INVALID_SREG);
}
FreeTemp(reg_off);
FreeTemp(reg_ptr);
@@ -1409,7 +1402,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
Load32Disp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
} else {
CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
- reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
+ reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg, offset);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1432,13 +1425,12 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_long) {
if (cu_->instruction_set == kX86) {
- LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg.GetLow(),
- rl_result.reg.GetHigh(), k64, INVALID_SREG);
+ LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64, INVALID_SREG);
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
LoadBaseDispWide(rl_temp_offset, 0, rl_result.reg, INVALID_SREG);
- FreeTemp(rl_temp_offset.GetReg());
+ FreeTemp(rl_temp_offset);
}
} else {
LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
@@ -1480,13 +1472,12 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
if (is_long) {
rl_value = LoadValueWide(rl_src_value, kCoreReg);
if (cu_->instruction_set == kX86) {
- StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg.GetLow(),
- rl_value.reg.GetHigh(), k64, INVALID_SREG);
+ StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg, k64, INVALID_SREG);
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
StoreBaseDispWide(rl_temp_offset, 0, rl_value.reg);
- FreeTemp(rl_temp_offset.GetReg());
+ FreeTemp(rl_temp_offset);
}
} else {
rl_value = LoadValue(rl_src_value, kCoreReg);
@@ -1494,7 +1485,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
}
// Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
- FreeTemp(rl_offset.reg.GetReg());
+ FreeTemp(rl_offset.reg);
if (is_volatile) {
// A load might follow the volatile store so insert a StoreLoad barrier.