Merge "Last patch for running tests on ARM64"
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 718468f..eb4a336 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -339,7 +339,16 @@
std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
-// Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
+/**
+ * @brief Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
+ * @details Without context sensitive analysis, the most conservative set of barriers
+ * must be issued to ensure the Java Memory Model. Thus the recipe is as follows:
+ * -# Use StoreStore barrier before volatile store.
+ * -# Use StoreLoad barrier after volatile store.
+ * -# Use LoadLoad and LoadStore barrier after each volatile load.
+ * -# Use StoreStore barrier after all stores but before return from any constructor whose
+ * class has final fields.
+ */
enum MemBarrierKind {
kLoadStore,
kLoadLoad,
@@ -364,6 +373,7 @@
kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
kRegDef0,
kRegDef1,
+ kRegDef2,
kRegDefA,
kRegDefD,
kRegDefFPCSList0,
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index e10f66f..2c125f6 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -175,7 +175,6 @@
#define INVALID_SREG (-1)
#define INVALID_VREG (0xFFFFU)
-#define INVALID_REG (0x7F)
#define INVALID_OFFSET (0xDEADF00FU)
#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck)
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index c218621..1784af3 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -203,19 +203,53 @@
dr15 = fr30 + ARM_FP_DOUBLE,
};
+// TODO: clean this up; reduce use of or eliminate macros
+
+const RegStorage rs_r0(RegStorage::k32BitSolo, r0);
+const RegStorage rs_r1(RegStorage::k32BitSolo, r1);
+const RegStorage rs_r2(RegStorage::k32BitSolo, r2);
+const RegStorage rs_r3(RegStorage::k32BitSolo, r3);
+const RegStorage rs_rARM_SUSPEND(RegStorage::k32BitSolo, rARM_SUSPEND);
+const RegStorage rs_r5(RegStorage::k32BitSolo, r5);
+const RegStorage rs_r6(RegStorage::k32BitSolo, r6);
+const RegStorage rs_r7(RegStorage::k32BitSolo, r7);
+const RegStorage rs_r8(RegStorage::k32BitSolo, r8);
+const RegStorage rs_rARM_SELF(RegStorage::k32BitSolo, rARM_SELF);
+const RegStorage rs_r10(RegStorage::k32BitSolo, r10);
+const RegStorage rs_r11(RegStorage::k32BitSolo, r11);
+const RegStorage rs_r12(RegStorage::k32BitSolo, r12);
+const RegStorage rs_r13sp(RegStorage::k32BitSolo, r13sp);
+const RegStorage rs_rARM_SP(RegStorage::k32BitSolo, rARM_SP);
+const RegStorage rs_r14lr(RegStorage::k32BitSolo, r14lr);
+const RegStorage rs_rARM_LR(RegStorage::k32BitSolo, rARM_LR);
+const RegStorage rs_r15pc(RegStorage::k32BitSolo, r15pc);
+const RegStorage rs_rARM_PC(RegStorage::k32BitSolo, rARM_PC);
+const RegStorage rs_invalid(RegStorage::kInvalid);
+
// Target-independent aliases.
#define rARM_ARG0 r0
+#define rs_rARM_ARG0 rs_r0
#define rARM_ARG1 r1
+#define rs_rARM_ARG1 rs_r1
#define rARM_ARG2 r2
+#define rs_rARM_ARG2 rs_r2
#define rARM_ARG3 r3
+#define rs_rARM_ARG3 rs_r3
#define rARM_FARG0 r0
+#define rs_ARM_FARG0 rs_r0
#define rARM_FARG1 r1
+#define rs_rARM_FARG1 rs_r1
#define rARM_FARG2 r2
+#define rs_rARM_FARG2 rs_r2
#define rARM_FARG3 r3
+#define rs_rARM_FARG3 rs_r3
#define rARM_RET0 r0
+#define rs_rARM_RET0 rs_r0
#define rARM_RET1 r1
+#define rs_rARM_RET1 rs_r1
#define rARM_INVOKE_TGT rARM_LR
-#define rARM_COUNT INVALID_REG
+#define rs_rARM_INVOKE_TGT rs_rARM_LR
+#define rARM_COUNT RegStorage::kInvalidRegVal
// RegisterLocation templates return values (r0, or r0/r1).
const RegLocation arm_loc_c_return
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 94f0ca4..175fc06 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -31,11 +31,11 @@
*
* The test loop will look something like:
*
- * adr rBase, <table>
+ * adr r_base, <table>
* ldr r_val, [rARM_SP, v_reg_off]
* mov r_idx, #table_size
* lp:
- * ldmia rBase!, {r_key, r_disp}
+ * ldmia r_base!, {r_key, r_disp}
* sub r_idx, #1
* cmp r_val, r_key
* ifeq
@@ -60,29 +60,29 @@
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
- int rBase = AllocTemp();
+ RegStorage r_base = AllocTemp();
/* Allocate key and disp temps */
- int r_key = AllocTemp();
- int r_disp = AllocTemp();
+ RegStorage r_key = AllocTemp();
+ RegStorage r_disp = AllocTemp();
// Make sure r_key's register number is less than r_disp's number for ldmia
- if (r_key > r_disp) {
- int tmp = r_disp;
+ if (r_key.GetReg() > r_disp.GetReg()) {
+ RegStorage tmp = r_disp;
r_disp = r_key;
r_key = tmp;
}
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, rBase, 0, WrapPointer(tab_rec));
+ NewLIR3(kThumb2Adr, r_base.GetReg(), 0, WrapPointer(tab_rec));
// Set up r_idx
- int r_idx = AllocTemp();
+ RegStorage r_idx = AllocTemp();
LoadConstant(r_idx, size);
// Establish loop branch target
LIR* target = NewLIR0(kPseudoTargetLabel);
// Load next key/disp
- NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
- OpRegReg(kOpCmp, r_key, rl_src.reg.GetReg());
+ NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetReg()) | (1 << r_disp.GetReg()));
+ OpRegReg(kOpCmp, r_key, rl_src.reg);
// Go if match. NOTE: No instruction set switch here - must stay Thumb2
OpIT(kCondEq, "");
- LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
+ LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg());
tab_rec->anchor = switch_branch;
// Needs to use setflags encoding here
OpRegRegImm(kOpSub, r_idx, r_idx, 1); // For value == 1, this should set flags.
@@ -109,28 +109,28 @@
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
- int table_base = AllocTemp();
+ RegStorage table_base = AllocTemp();
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, table_base, 0, WrapPointer(tab_rec));
+ NewLIR3(kThumb2Adr, table_base.GetReg(), 0, WrapPointer(tab_rec));
int low_key = s4FromSwitchData(&table[2]);
- int keyReg;
+ RegStorage keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.reg.GetReg();
+ keyReg = rl_src.reg;
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
LIR* branch_over = OpCondBranch(kCondHi, NULL);
// Load the displacement from the switch table
- int disp_reg = AllocTemp();
+ RegStorage disp_reg = AllocTemp();
LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord);
// ..and go! NOTE: No instruction set switch here - must stay Thumb2
- LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg);
+ LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg());
tab_rec->anchor = switch_branch;
/* branch_over target here */
@@ -163,13 +163,13 @@
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
- LoadValueDirectFixed(rl_src, r0);
- LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
- rARM_LR);
+ LoadValueDirectFixed(rl_src, rs_r0);
+ LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
+ rs_rARM_LR);
// Materialize a pointer to the fill data image
NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
ClobberCallerSave();
- LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+ LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
MarkSafepointPC(call_inst);
}
@@ -179,7 +179,7 @@
*/
void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
FlushAllRegs();
- LoadValueDirectFixed(rl_src, r0); // Get obj
+ LoadValueDirectFixed(rl_src, rs_r0); // Get obj
LockCallTemps(); // Prepare for explicit register usage
constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15.
if (kArchVariantHasGoodBranchPredictor) {
@@ -188,13 +188,13 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- null_check_branch = OpCmpImmBranch(kCondEq, r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
- LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, r1, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL);
NewLIR4(kThumb2Strex, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, r1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -204,9 +204,9 @@
}
// TODO: move to a slow path.
// Go expensive route - artLockObjectFromCode(obj);
- LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
+ LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rs_rARM_LR);
ClobberCallerSave();
- LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+ LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
MarkSafepointPC(call_inst);
LIR* success_target = NewLIR0(kPseudoTargetLabel);
@@ -214,19 +214,19 @@
GenMemBarrier(kLoadLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
- GenNullCheck(r0, opt_flags);
- LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ GenNullCheck(rs_r0, opt_flags);
+ LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
MarkPossibleNullPointerException(opt_flags);
NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
- OpRegImm(kOpCmp, r1, 0);
+ OpRegImm(kOpCmp, rs_r1, 0);
OpIT(kCondEq, "");
NewLIR4(kThumb2Strex/*eq*/, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
- OpRegImm(kOpCmp, r1, 0);
+ OpRegImm(kOpCmp, rs_r1, 0);
OpIT(kCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
- LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
+ LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rs_rARM_LR);
ClobberCallerSave();
- LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
+ LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
MarkSafepointPC(call_inst);
GenMemBarrier(kLoadLoad);
}
@@ -239,22 +239,22 @@
*/
void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
FlushAllRegs();
- LoadValueDirectFixed(rl_src, r0); // Get obj
+ LoadValueDirectFixed(rl_src, rs_r0); // Get obj
LockCallTemps(); // Prepare for explicit register usage
LIR* null_check_branch;
- LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15.
if (kArchVariantHasGoodBranchPredictor) {
if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- null_check_branch = OpCmpImmBranch(kCondEq, r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
- LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1);
- LoadConstantNoClobber(r3, 0);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, r1, r2, NULL);
- StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
+ LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
+ LoadConstantNoClobber(rs_r3, 0);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL);
+ StoreWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -264,9 +264,9 @@
}
// TODO: move to a slow path.
// Go expensive route - artUnlockObjectFromCode(obj);
- LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
+ LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rs_rARM_LR);
ClobberCallerSave();
- LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+ LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
MarkSafepointPC(call_inst);
LIR* success_target = NewLIR0(kPseudoTargetLabel);
@@ -274,19 +274,20 @@
GenMemBarrier(kStoreLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
- GenNullCheck(r0, opt_flags);
- LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
+ GenNullCheck(rs_r0, opt_flags);
+ LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock
MarkPossibleNullPointerException(opt_flags);
- LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
- LoadConstantNoClobber(r3, 0);
+ LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2);
+ LoadConstantNoClobber(rs_r3, 0);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
- OpRegReg(kOpCmp, r1, r2);
+ OpRegReg(kOpCmp, rs_r1, rs_r2);
OpIT(kCondEq, "EE");
- StoreWordDisp/*eq*/(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
+ StoreWordDisp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
// Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
+ LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(),
+ rs_rARM_LR);
ClobberCallerSave();
- LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
+ LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
MarkSafepointPC(call_inst);
GenMemBarrier(kStoreLoad);
}
@@ -295,10 +296,10 @@
void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int reset_reg = AllocTemp();
- LoadWordDisp(rARM_SELF, ex_offset, rl_result.reg.GetReg());
+ RegStorage reset_reg = AllocTemp();
+ LoadWordDisp(rs_rARM_SELF, ex_offset, rl_result.reg);
LoadConstant(reset_reg, 0);
- StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
+ StoreWordDisp(rs_rARM_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
@@ -306,14 +307,13 @@
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) {
- int reg_card_base = AllocTemp();
- int reg_card_no = AllocTemp();
+void ArmMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+ RegStorage reg_card_base = AllocTemp();
+ RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
- LoadWordDisp(rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+ LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
- kUnsignedByte);
+ StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
branch_over->target = target;
FreeTemp(reg_card_base);
@@ -344,7 +344,7 @@
if (!skip_overflow_check) {
if (Runtime::Current()->ExplicitStackOverflowChecks()) {
/* Load stack limit */
- LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ LoadWordDisp(rs_rARM_SELF, Thread::StackEndOffset().Int32Value(), rs_r12);
}
}
/* Spill core callee saves */
@@ -374,14 +374,14 @@
m2l_->ResetDefTracking();
GenerateTargetLabel();
if (restore_lr_) {
- m2l_->LoadWordDisp(kArmRegSP, sp_displace_ - 4, kArmRegLR);
+ m2l_->LoadWordDisp(rs_rARM_SP, sp_displace_ - 4, rs_rARM_LR);
}
- m2l_->OpRegImm(kOpAdd, kArmRegSP, sp_displace_);
+ m2l_->OpRegImm(kOpAdd, rs_rARM_SP, sp_displace_);
m2l_->ClobberCallerSave();
ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
// Load the entrypoint directly into the pc instead of doing a load + branch. Assumes
// codegen and target are in thumb2 mode.
- m2l_->LoadWordDisp(rARM_SELF, func_offset.Int32Value(), rARM_PC);
+ m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC);
}
private:
@@ -389,29 +389,29 @@
const size_t sp_displace_;
};
if (static_cast<size_t>(frame_size_) > Thread::kStackOverflowReservedUsableBytes) {
- OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_without_spills);
- LIR* branch = OpCmpBranch(kCondUlt, rARM_LR, r12, nullptr);
+ OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills);
+ LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr);
// Need to restore LR since we used it as a temp.
AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true,
frame_size_without_spills));
- OpRegCopy(rARM_SP, rARM_LR); // Establish stack
+ OpRegCopy(rs_rARM_SP, rs_rARM_LR); // Establish stack
} else {
// If the frame is small enough we are guaranteed to have enough space that remains to
// handle signals on the user stack.
- OpRegRegImm(kOpSub, rARM_SP, rARM_SP, frame_size_without_spills);
- LIR* branch = OpCmpBranch(kCondUlt, rARM_SP, r12, nullptr);
+ OpRegRegImm(kOpSub, rs_rARM_SP, rs_rARM_SP, frame_size_without_spills);
+ LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr);
AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_));
}
} else {
// Implicit stack overflow check.
// Generate a load from [sp, #-framesize]. If this is in the stack
// redzone we will get a segmentation fault.
- OpRegImm(kOpSub, rARM_SP, frame_size_without_spills);
- LoadWordDisp(rARM_SP, 0, rARM_LR);
+ OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
+ LoadWordDisp(rs_rARM_SP, 0, rs_rARM_LR);
MarkPossibleStackOverflowException();
}
} else {
- OpRegImm(kOpSub, rARM_SP, frame_size_without_spills);
+ OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
}
FlushIns(ArgLocs, rl_method);
@@ -432,7 +432,7 @@
LockTemp(r1);
NewLIR0(kPseudoMethodExit);
- OpRegImm(kOpAdd, rARM_SP, frame_size_ - (spill_count * 4));
+ OpRegImm(kOpAdd, rs_rARM_SP, frame_size_ - (spill_count * 4));
/* Need to restore any FP callee saves? */
if (num_fp_spills_) {
NewLIR1(kThumb2VPopCS, num_fp_spills_);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 6e72c80..6df341b 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -22,38 +22,42 @@
namespace art {
-class ArmMir2Lir : public Mir2Lir {
+class ArmMir2Lir FINAL : public Mir2Lir {
public:
ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
- int LoadHelper(ThreadOffset offset);
+ RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
- LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
- int s_reg);
- LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
- LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_dest, int r_dest_hi, OpSize size, int s_reg);
- LIR* LoadConstantNoClobber(int r_dest, int value);
- LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
- LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
- LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
- LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
- LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg);
- void MarkGCCard(int val_reg, int tgt_addr_reg);
+ RegStorage LoadHelper(ThreadOffset offset);
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ int s_reg);
+ LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size);
+ LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_dest, RegStorage r_dest_hi, OpSize size, int s_reg);
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size);
+ LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_src, RegStorage r_src_hi, OpSize size, int s_reg);
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
bool IsFpReg(int reg);
+ bool IsFpReg(RegStorage reg);
bool SameRegType(int reg1, int reg2);
- int AllocTypedTemp(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
- int TargetReg(SpecialTargetRegister reg);
- int GetArgMappingToPhysicalReg(int arg_num);
+ RegStorage TargetReg(SpecialTargetRegister reg);
+ RegStorage GetArgMappingToPhysicalReg(int arg_num);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
@@ -64,8 +68,8 @@
uint64_t GetRegMaskCommon(int reg);
void AdjustSpillMask();
void ClobberCallerSave();
- void FlushReg(int reg);
- void FlushRegWide(int reg1, int reg2);
+ void FlushReg(RegStorage reg);
+ void FlushRegWide(RegStorage reg);
void FreeCallTemps();
void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
void LockCallTemps();
@@ -97,13 +101,16 @@
RegLocation rl_src, int scale, bool card_mark);
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
+ void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
@@ -113,15 +120,18 @@
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
- ThrowKind kind);
- RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+ void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset,
+ ThrowKind kind);
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheck(int reg_lo, int reg_hi);
+ void GenDivZeroCheck(RegStorage reg);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
void GenSpecialExitSequence();
@@ -134,7 +144,7 @@
void GenMonitorExit(int opt_flags, RegLocation rl_src);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
+ int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
@@ -142,38 +152,39 @@
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
- LIR* OpFpRegCopy(int r_dest, int r_src);
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpIT(ConditionCode cond, const char* guide);
- LIR* OpMem(OpKind op, int rBase, int disp);
- LIR* OpPcRelLoad(int reg, LIR* target);
- LIR* OpReg(OpKind op, int r_dest_src);
- LIR* OpRegCopy(int r_dest, int r_src);
- LIR* OpRegCopyNoInsert(int r_dest, int r_src);
- LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
- LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
- LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
- LIR* OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src);
- LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
- LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+ LIR* OpReg(OpKind op, RegStorage r_dest_src);
+ LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+ LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
- LIR* OpVldm(int rBase, int count);
- LIR* OpVstm(int rBase, int count);
- void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
- void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+ LIR* OpVldm(RegStorage r_base, int count);
+ LIR* OpVstm(RegStorage r_base, int count);
+ void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
+ void OpRegCopyWide(RegStorage dest, RegStorage src);
void OpTlsCmp(ThreadOffset offset, int val);
- LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg);
- LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
- LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift);
- LIR* OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, int shift);
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ int shift);
+ LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
static const ArmEncodingMap EncodingMap[kArmLast];
int EncodeShift(int code, int amount);
int ModifiedImmediate(uint32_t value);
@@ -190,9 +201,16 @@
void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
void AssignDataOffsets();
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero);
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, bool check_zero);
RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ typedef struct {
+ OpKind op;
+ uint32_t shift;
+ } EasyMultiplyOp;
+ bool GetEasyMultiplyOp(int lit, EasyMultiplyOp* op);
+ bool GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops);
+ void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops);
};
} // namespace art
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 6868f6f..398bf96 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -111,8 +111,8 @@
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR3(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -143,17 +143,19 @@
break;
case Instruction::LONG_TO_DOUBLE: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
rl_result = EvalLoc(rl_dest, kFPReg, true);
- // TODO: clean up AllocTempDouble so that its result has the double bits set.
- int tmp1 = AllocTempDouble();
- int tmp2 = AllocTempDouble();
+ // TODO: fix AllocTempDouble to return a k64BitSolo double reg and lose the ARM_FP_DOUBLE.
+ RegStorage tmp1 = AllocTempDouble();
+ RegStorage tmp2 = AllocTempDouble();
- NewLIR2(kThumb2VcvtF64S32, tmp1 | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
- NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), (src_reg & ~ARM_FP_DOUBLE));
- LoadConstantWide(tmp2, tmp2 + 1, INT64_C(0x41f0000000000000));
- NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), tmp1 | ARM_FP_DOUBLE,
- tmp2 | ARM_FP_DOUBLE);
+ // FIXME: needs 64-bit register cleanup.
+ NewLIR2(kThumb2VcvtF64S32, tmp1.GetLowReg() | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
+ NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+ (src_reg & ~ARM_FP_DOUBLE));
+ LoadConstantWide(tmp2, 0x41f0000000000000LL);
+ NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+ tmp1.GetLowReg() | ARM_FP_DOUBLE, tmp2.GetLowReg() | ARM_FP_DOUBLE);
FreeTemp(tmp1);
FreeTemp(tmp2);
StoreValueWide(rl_dest, rl_result);
@@ -164,20 +166,23 @@
return;
case Instruction::LONG_TO_FLOAT: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
rl_result = EvalLoc(rl_dest, kFPReg, true);
// Allocate temp registers.
- int high_val = AllocTempDouble();
- int low_val = AllocTempDouble();
- int const_val = AllocTempDouble();
+ RegStorage high_val = AllocTempDouble();
+ RegStorage low_val = AllocTempDouble();
+ RegStorage const_val = AllocTempDouble();
// Long to double.
- NewLIR2(kThumb2VcvtF64S32, high_val | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
- NewLIR2(kThumb2VcvtF64U32, low_val | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE));
- LoadConstantWide(const_val, const_val + 1, INT64_C(0x41f0000000000000));
- NewLIR3(kThumb2VmlaF64, low_val | ARM_FP_DOUBLE, high_val | ARM_FP_DOUBLE,
- const_val | ARM_FP_DOUBLE);
+ NewLIR2(kThumb2VcvtF64S32, high_val.GetLowReg() | ARM_FP_DOUBLE,
+ (src_reg & ~ARM_FP_DOUBLE) + 1);
+ NewLIR2(kThumb2VcvtF64U32, low_val.GetLowReg() | ARM_FP_DOUBLE,
+ (src_reg & ~ARM_FP_DOUBLE));
+ LoadConstantWide(const_val, INT64_C(0x41f0000000000000));
+ NewLIR3(kThumb2VmlaF64, low_val.GetLowReg() | ARM_FP_DOUBLE,
+ high_val.GetLowReg() | ARM_FP_DOUBLE,
+ const_val.GetLowReg() | ARM_FP_DOUBLE);
// Double to float.
- NewLIR2(kThumb2VcvtDF, rl_result.reg.GetReg(), low_val | ARM_FP_DOUBLE);
+ NewLIR2(kThumb2VcvtDF, rl_result.reg.GetReg(), low_val.GetLowReg() | ARM_FP_DOUBLE);
// Free temp registers.
FreeTemp(high_val);
FreeTemp(low_val);
@@ -194,14 +199,14 @@
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, kFPReg);
src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
@@ -220,8 +225,8 @@
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
@@ -294,16 +299,16 @@
// In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.reg.GetReg(), default_result);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ LoadConstant(rl_result.reg, default_result);
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
// In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.reg.GetReg(), default_result);
+ LoadConstant(rl_result.reg, default_result);
NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
DCHECK(!ARM_FPREG(rl_result.reg.GetReg()));
@@ -315,7 +320,7 @@
GenBarrier();
OpIT(kCondEq, "");
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.reg, 0);
GenBarrier();
StoreValue(rl_dest, rl_result);
@@ -333,8 +338,8 @@
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+ NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -345,18 +350,19 @@
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
- NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()));
+ // TODO: shouldn't need S2d once 64bitSolo has proper double tag bit.
+ NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()));
NewLIR0(kThumb2Fmstat);
branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
- int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
- NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
- NewLIR1(kThumbBlxR, r_tgt);
- NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), r0, r1);
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
+ NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
+ NewLIR1(kThumbBlxR, r_tgt.GetReg());
+ NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), r0, r1);
branch->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 1d959fa..964c2fb 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -24,7 +24,7 @@
namespace art {
-LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) {
+LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
OpRegReg(kOpCmp, src1, src2);
return OpCondBranch(cond, target);
}
@@ -88,16 +88,16 @@
LIR* target2;
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, -1);
- OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
LIR* branch1 = OpCondBranch(kCondLt, NULL);
LIR* branch2 = OpCondBranch(kCondGt, NULL);
- OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpSub, t_reg, rl_src1.reg, rl_src2.reg);
LIR* branch3 = OpCondBranch(kCondEq, NULL);
OpIT(kCondHi, "E");
- NewLIR2(kThumb2MovI8M, t_reg, ModifiedImmediate(-1));
+ NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
LoadConstant(t_reg, 1);
GenBarrier();
@@ -107,7 +107,7 @@
target1 = NewLIR0(kPseudoTargetLabel);
RegLocation rl_temp = LocCReturn(); // Just using as template, will change
- rl_temp.reg.SetReg(t_reg);
+ rl_temp.reg.SetReg(t_reg.GetReg());
StoreValue(rl_dest, rl_temp);
FreeTemp(t_reg);
@@ -125,12 +125,12 @@
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.reg.GetReg();
- int32_t high_reg = rl_src1.reg.GetHighReg();
+ RegStorage low_reg = rl_src1.reg.GetLow();
+ RegStorage high_reg = rl_src1.reg.GetHigh();
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
- int t_reg = AllocTemp();
- NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
+ RegStorage t_reg = AllocTemp();
+ NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), low_reg.GetReg(), high_reg.GetReg(), 0);
FreeTemp(t_reg);
OpCondBranch(ccode, taken);
return;
@@ -185,33 +185,33 @@
}
bool cheap_false_val = InexpensiveConstantInt(false_val);
if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
- OpRegRegImm(kOpSub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), -true_val);
+ OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(true_val == 0 ? kCondNe : kCondUge, "");
- LoadConstant(rl_result.reg.GetReg(), false_val);
+ LoadConstant(rl_result.reg, false_val);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
- OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 1);
+ OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondLs, "");
- LoadConstant(rl_result.reg.GetReg(), false_val);
+ LoadConstant(rl_result.reg, false_val);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.reg, 0);
OpIT(ccode, "E");
- LoadConstant(rl_result.reg.GetReg(), true_val);
- LoadConstant(rl_result.reg.GetReg(), false_val);
+ LoadConstant(rl_result.reg, true_val);
+ LoadConstant(rl_result.reg, false_val);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else {
// Unlikely case - could be tuned.
- int t_reg1 = AllocTemp();
- int t_reg2 = AllocTemp();
+ RegStorage t_reg1 = AllocTemp();
+ RegStorage t_reg2 = AllocTemp();
LoadConstant(t_reg1, true_val);
LoadConstant(t_reg2, false_val);
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.reg, 0);
OpIT(ccode, "E");
- OpRegCopy(rl_result.reg.GetReg(), t_reg1);
- OpRegCopy(rl_result.reg.GetReg(), t_reg2);
+ OpRegCopy(rl_result.reg, t_reg1);
+ OpRegCopy(rl_result.reg, t_reg2);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
} else {
@@ -221,17 +221,17 @@
rl_true = LoadValue(rl_true, kCoreReg);
rl_false = LoadValue(rl_false, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.reg, 0);
if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { // Is the "true" case already in place?
OpIT(NegateComparison(ccode), "");
- OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_false.reg);
} else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { // False case in place?
OpIT(ccode, "");
- OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_true.reg);
} else { // Normal - select between the two.
OpIT(ccode, "E");
- OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
- OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_true.reg);
+ OpRegCopy(rl_result.reg, rl_false.reg);
}
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
@@ -261,7 +261,7 @@
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
switch (ccode) {
case kCondEq:
OpCondBranch(kCondNe, not_taken);
@@ -292,7 +292,7 @@
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
OpCondBranch(ccode, taken);
}
@@ -300,8 +300,7 @@
* Generate a register comparison to an immediate and branch. Caller
* is responsible for setting branch target field.
*/
-LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
- LIR* target) {
+LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
LIR* branch;
ArmConditionCode arm_cond = ArmConditionEncoding(cond);
/*
@@ -315,10 +314,10 @@
*/
bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
- if (!skip && (ARM_LOWREG(reg)) && (check_value == 0) &&
+ if (!skip && (ARM_LOWREG(reg.GetReg())) && (check_value == 0) &&
((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
- reg, 0);
+ reg.GetReg(), 0);
} else {
OpRegImm(kOpCmp, reg, check_value);
branch = NewLIR2(kThumbBCond, 0, arm_cond);
@@ -327,56 +326,64 @@
return branch;
}
-LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
LIR* res;
int opcode;
- if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
+ // If src or dest is a pair, we'll be using low reg.
+ if (r_dest.IsPair()) {
+ r_dest = r_dest.GetLow();
+ }
+ if (r_src.IsPair()) {
+ r_src = r_src.GetLow();
+ }
+ if (ARM_FPREG(r_dest.GetReg()) || ARM_FPREG(r_src.GetReg()))
return OpFpRegCopy(r_dest, r_src);
- if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
+ if (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src.GetReg()))
opcode = kThumbMovRR;
- else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
+ else if (!ARM_LOWREG(r_dest.GetReg()) && !ARM_LOWREG(r_src.GetReg()))
opcode = kThumbMovRR_H2H;
- else if (ARM_LOWREG(r_dest))
+ else if (ARM_LOWREG(r_dest.GetReg()))
opcode = kThumbMovRR_H2L;
else
opcode = kThumbMovRR_L2H;
- res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+ res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
return res;
}
-LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
LIR* res = OpRegCopyNoInsert(r_dest, r_src);
AppendLIR(res);
return res;
}
-void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
- int src_hi) {
- bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
- bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
- DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
- DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
+void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+ bool dest_fp = ARM_FPREG(r_dest.GetLowReg());
+ bool src_fp = ARM_FPREG(r_src.GetLowReg());
if (dest_fp) {
if (src_fp) {
- OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+ // FIXME: handle 64-bit solo's here.
+ OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
+ RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
} else {
- NewLIR3(kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
+ NewLIR3(kThumb2Fmdrr, S2d(r_dest.GetLowReg(), r_dest.GetHighReg()),
+ r_src.GetLowReg(), r_src.GetHighReg());
}
} else {
if (src_fp) {
- NewLIR3(kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
+ NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), S2d(r_src.GetLowReg(),
+ r_src.GetHighReg()));
} else {
// Handle overlap
- if (src_hi == dest_lo) {
- DCHECK_NE(src_lo, dest_hi);
- OpRegCopy(dest_hi, src_hi);
- OpRegCopy(dest_lo, src_lo);
+ if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+ DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
} else {
- OpRegCopy(dest_lo, src_lo);
- OpRegCopy(dest_hi, src_hi);
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
}
}
}
@@ -418,43 +425,152 @@
if (pattern == DivideNone) {
return false;
}
- // Tuning: add rem patterns
- if (!is_div) {
- return false;
- }
- int r_magic = AllocTemp();
+ RegStorage r_magic = AllocTemp();
LoadConstant(r_magic, magic_table[lit].magic);
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int r_hi = AllocTemp();
- int r_lo = AllocTemp();
- NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.reg.GetReg());
+ RegStorage r_hi = AllocTemp();
+ RegStorage r_lo = AllocTemp();
+ NewLIR4(kThumb2Smull, r_lo.GetReg(), r_hi.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
switch (pattern) {
case Divide3:
- OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi,
- rl_src.reg.GetReg(), EncodeShift(kArmAsr, 31));
+ OpRegRegRegShift(kOpSub, rl_result.reg, r_hi, rl_src.reg, EncodeShift(kArmAsr, 31));
break;
case Divide5:
- OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
- EncodeShift(kArmAsr, magic_table[lit].shift));
+ OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
+ OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
case Divide7:
- OpRegReg(kOpAdd, r_hi, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
- EncodeShift(kArmAsr, magic_table[lit].shift));
+ OpRegReg(kOpAdd, r_hi, rl_src.reg);
+ OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
+ OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
default:
LOG(FATAL) << "Unexpected pattern: " << pattern;
}
+
+ if (!is_div) {
+ RegStorage tmp1 = r_lo;
+ EasyMultiplyOp ops[2];
+
+ bool canEasyMultiply = GetEasyMultiplyTwoOps(lit, ops);
+ DCHECK_NE(canEasyMultiply, false);
+
+ GenEasyMultiplyTwoOps(tmp1, rl_result.reg, ops);
+ OpRegRegReg(kOpSub, rl_result.reg, rl_src.reg, tmp1);
+ }
+
StoreValue(rl_dest, rl_result);
return true;
}
-LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code,
- int reg1, int base, int offset, ThrowKind kind) {
+// Try to convert *lit to 1 RegRegRegShift/RegRegShift form.
+bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
+ if (IsPowerOfTwo(lit)) {
+ op->op = kOpLsl;
+ op->shift = LowestSetBit(lit);
+ return true;
+ }
+
+ if (IsPowerOfTwo(lit - 1)) {
+ op->op = kOpAdd;
+ op->shift = LowestSetBit(lit - 1);
+ return true;
+ }
+
+ if (IsPowerOfTwo(lit + 1)) {
+ op->op = kOpRsub;
+ op->shift = LowestSetBit(lit + 1);
+ return true;
+ }
+
+ op->op = kOpInvalid;
+ return false;
+}
+
+// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
+bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
+ GetEasyMultiplyOp(lit, &ops[0]);
+ if (GetEasyMultiplyOp(lit, &ops[0])) {
+ ops[1].op = kOpInvalid;
+ return true;
+ }
+
+ int lit1 = lit;
+ uint32_t shift = LowestSetBit(lit1);
+ if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
+ ops[1].op = kOpLsl;
+ ops[1].shift = shift;
+ return true;
+ }
+
+ lit1 = lit - 1;
+ shift = LowestSetBit(lit1);
+ if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
+ ops[1].op = kOpAdd;
+ ops[1].shift = shift;
+ return true;
+ }
+
+ lit1 = lit + 1;
+ shift = LowestSetBit(lit1);
+ if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
+ ops[1].op = kOpRsub;
+ ops[1].shift = shift;
+ return true;
+ }
+
+ return false;
+}
+
+void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) {
+ // dest = ( src << shift1) + [ src | -src | 0 ]
+ // dest = (dest << shift2) + [ src | -src | 0 ]
+ for (int i = 0; i < 2; i++) {
+ RegStorage r_src2;
+ if (i == 0) {
+ r_src2 = r_src;
+ } else {
+ r_src2 = r_dest;
+ }
+ switch (ops[i].op) {
+ case kOpLsl:
+ OpRegRegImm(kOpLsl, r_dest, r_src2, ops[i].shift);
+ break;
+ case kOpAdd:
+ OpRegRegRegShift(kOpAdd, r_dest, r_src, r_src2, EncodeShift(kArmLsl, ops[i].shift));
+ break;
+ case kOpRsub:
+ OpRegRegRegShift(kOpRsub, r_dest, r_src, r_src2, EncodeShift(kArmLsl, ops[i].shift));
+ break;
+ default:
+ DCHECK_NE(i, 0);
+ DCHECK_EQ(ops[i].op, kOpInvalid);
+ break;
+ }
+ }
+}
+
+bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ EasyMultiplyOp ops[2];
+
+ if (!GetEasyMultiplyTwoOps(lit, ops)) {
+ return false;
+ }
+
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+ GenEasyMultiplyTwoOps(rl_result.reg, rl_src.reg, ops);
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
+LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
+ int offset, ThrowKind kind) {
LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
return NULL;
}
@@ -470,12 +586,11 @@
return rl_dest;
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
- bool is_div) {
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Put the literal in a temp.
- int lit_temp = AllocTemp();
+ RegStorage lit_temp = AllocTemp();
LoadConstant(lit_temp, lit);
// Use the generic case for div/rem with arg2 in a register.
// TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
@@ -485,22 +600,22 @@
return rl_result;
}
-RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
+RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
bool is_div) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
// Simple case, use sdiv instruction.
- OpRegRegReg(kOpDiv, rl_result.reg.GetReg(), reg1, reg2);
+ OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
} else {
// Remainder case, use the following code:
// temp = reg1 / reg2 - integer division
// temp = temp * reg2
// dest = reg1 - temp
- int temp = AllocTemp();
+ RegStorage temp = AllocTemp();
OpRegRegReg(kOpDiv, temp, reg1, reg2);
OpRegReg(kOpMul, temp, reg2);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), reg1, temp);
+ OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
FreeTemp(temp);
}
@@ -515,10 +630,10 @@
rl_src2 = LoadValue(rl_src2, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
OpIT((is_min) ? kCondGt : kCondLt, "E");
- OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
+ OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
GenBarrier();
StoreValue(rl_dest, rl_result);
return true;
@@ -526,24 +641,24 @@
bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address.wide = 0; // ignore high half in info->args[1]
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
- if (rl_address.reg.GetReg() != rl_result.reg.GetReg()) {
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
+ if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
+ LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
+ LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
} else {
- LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
+ LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
+ LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
}
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -551,24 +666,24 @@
bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address.wide = 0; // ignore high half in info->args[1]
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_src_value = info->args[2]; // [size] value
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
if (size == kLong) {
// Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), kWord);
- StoreBaseDisp(rl_address.reg.GetReg(), 4, rl_value.reg.GetHighReg(), kWord);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), kWord);
+ StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), kWord);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
}
return true;
}
-void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
+void ArmMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
LOG(FATAL) << "Unexpected use of OpLea for Arm";
}
@@ -581,7 +696,7 @@
// Unused - RegLocation rl_src_unsafe = info->args[0];
RegLocation rl_src_obj = info->args[1]; // Object - known non-null
RegLocation rl_src_offset = info->args[2]; // long low
- rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
RegLocation rl_src_expected = info->args[4]; // int, long or Object
// If is_long, high half is in info->args[5]
RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
@@ -602,25 +717,25 @@
LockTemp(rARM_LR);
bool load_early = true;
if (is_long) {
- bool expected_is_core_reg =
- rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.reg.GetReg());
- bool new_value_is_core_reg =
- rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.reg.GetReg());
- bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.reg.GetReg());
- bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.reg.GetReg());
+ int expected_reg = is_long ? rl_src_expected.reg.GetLowReg() : rl_src_expected.reg.GetReg();
+ int new_val_reg = is_long ? rl_src_new_value.reg.GetLowReg() : rl_src_new_value.reg.GetReg();
+ bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !IsFpReg(expected_reg);
+ bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !IsFpReg(new_val_reg);
+ bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
+ bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
if (!expected_is_good_reg && !new_value_is_good_reg) {
// None of expected/new_value is non-temp reg, need to load both late
load_early = false;
// Make sure they are not in the temp regs and the load will not be skipped.
if (expected_is_core_reg) {
- FlushRegWide(rl_src_expected.reg.GetReg(), rl_src_expected.reg.GetHighReg());
+ FlushRegWide(rl_src_expected.reg);
ClobberSReg(rl_src_expected.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
rl_src_expected.location = kLocDalvikFrame;
}
if (new_value_is_core_reg) {
- FlushRegWide(rl_src_new_value.reg.GetReg(), rl_src_new_value.reg.GetHighReg());
+ FlushRegWide(rl_src_new_value.reg);
ClobberSReg(rl_src_new_value.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
rl_src_new_value.location = kLocDalvikFrame;
@@ -641,13 +756,13 @@
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
- MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
+ MarkGCCard(rl_new_value.reg, rl_object.reg);
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
- int r_ptr = rARM_LR;
- OpRegRegReg(kOpAdd, r_ptr, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
+ RegStorage r_ptr = rs_rARM_LR;
+ OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
// Free now unneeded rl_object and rl_offset to give more temps.
ClobberSReg(rl_object.s_reg_low);
@@ -662,8 +777,8 @@
rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
} else {
// NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
- int low_reg = AllocTemp();
- int high_reg = AllocTemp();
+ int low_reg = AllocTemp().GetReg();
+ int high_reg = AllocTemp().GetReg();
rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
rl_expected = rl_new_value;
}
@@ -673,38 +788,38 @@
// } while (tmp == 0 && failure([r_ptr] <- r_new_value));
// result = tmp != 0;
- int r_tmp = AllocTemp();
+ RegStorage r_tmp = AllocTemp();
LIR* target = NewLIR0(kPseudoTargetLabel);
if (is_long) {
- int r_tmp_high = AllocTemp();
+ RegStorage r_tmp_high = AllocTemp();
if (!load_early) {
- LoadValueDirectWide(rl_src_expected, rl_expected.reg.GetReg(), rl_expected.reg.GetHighReg());
+ LoadValueDirectWide(rl_src_expected, rl_expected.reg);
}
- NewLIR3(kThumb2Ldrexd, r_tmp, r_tmp_high, r_ptr);
- OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
- OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHighReg());
+ NewLIR3(kThumb2Ldrexd, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
+ OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
+ OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
if (!load_early) {
- LoadValueDirectWide(rl_src_new_value, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg());
+ LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
}
// Make sure we use ORR that sets the ccode
- if (ARM_LOWREG(r_tmp) && ARM_LOWREG(r_tmp_high)) {
- NewLIR2(kThumbOrr, r_tmp, r_tmp_high);
+ if (ARM_LOWREG(r_tmp.GetReg()) && ARM_LOWREG(r_tmp_high.GetReg())) {
+ NewLIR2(kThumbOrr, r_tmp.GetReg(), r_tmp_high.GetReg());
} else {
- NewLIR4(kThumb2OrrRRRs, r_tmp, r_tmp, r_tmp_high, 0);
+ NewLIR4(kThumb2OrrRRRs, r_tmp.GetReg(), r_tmp.GetReg(), r_tmp_high.GetReg(), 0);
}
FreeTemp(r_tmp_high); // Now unneeded
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg(), r_ptr);
+ NewLIR4(kThumb2Strexd /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetLowReg(), rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
} else {
- NewLIR3(kThumb2Ldrex, r_tmp, r_ptr, 0);
- OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
+ NewLIR3(kThumb2Ldrex, r_tmp.GetReg(), r_ptr.GetReg(), 0);
+ OpRegReg(kOpSub, r_tmp, rl_expected.reg);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.reg.GetReg(), r_ptr, 0);
+ NewLIR4(kThumb2Strex /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
}
// Still one conditional left from OpIT(kCondEq, "T") from either branch
@@ -712,16 +827,15 @@
OpCondBranch(kCondEq, target);
if (!load_early) {
- FreeTemp(rl_expected.reg.GetReg()); // Now unneeded.
- FreeTemp(rl_expected.reg.GetHighReg()); // Now unneeded.
+ FreeTemp(rl_expected.reg); // Now unneeded.
}
// result := (tmp1 != 0) ? 0 : 1;
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), r_tmp, 1);
+ OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondUlt, "");
- LoadConstant(rl_result.reg.GetReg(), 0); /* cc */
+ LoadConstant(rl_result.reg, 0); /* cc */
FreeTemp(r_tmp); // Now unneeded.
StoreValue(rl_dest, rl_result);
@@ -732,31 +846,32 @@
return true;
}
-LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) {
- return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
+LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
}
-LIR* ArmMir2Lir::OpVldm(int rBase, int count) {
- return NewLIR3(kThumb2Vldms, rBase, fr0, count);
+LIR* ArmMir2Lir::OpVldm(RegStorage r_base, int count) {
+ return NewLIR3(kThumb2Vldms, r_base.GetReg(), fr0, count);
}
-LIR* ArmMir2Lir::OpVstm(int rBase, int count) {
- return NewLIR3(kThumb2Vstms, rBase, fr0, count);
+LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
+ return NewLIR3(kThumb2Vstms, r_base.GetReg(), fr0, count);
}
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
+ OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
-void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
- int t_reg = AllocTemp();
- NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
+void ArmMir2Lir::GenDivZeroCheck(RegStorage reg) {
+ DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
+ RegStorage t_reg = AllocTemp();
+ NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), reg.GetLowReg(), reg.GetHighReg(), 0);
FreeTemp(t_reg);
GenCheck(kCondEq, kThrowDivZero);
}
@@ -768,7 +883,7 @@
}
// Decrement register and branch on condition
-LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
+LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
// Combine sub & test using sub setflags encoding here
OpRegRegImm(kOpSub, reg, reg, 1); // For value == 1, this should set flags.
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
@@ -777,6 +892,9 @@
void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#if ANDROID_SMP != 0
+ // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
+ LIR* barrier = last_lir_insn_;
+
int dmb_flavor;
// TODO: revisit Arm barrier kinds
switch (barrier_kind) {
@@ -789,25 +907,33 @@
dmb_flavor = kSY; // quiet gcc.
break;
}
- LIR* dmb = NewLIR1(kThumb2Dmb, dmb_flavor);
- dmb->u.m.def_mask = ENCODE_ALL;
+
+ // If the same barrier already exists, don't generate another.
+ if (barrier == nullptr
+ || (barrier != nullptr && (barrier->opcode != kThumb2Dmb || barrier->operands[0] != dmb_flavor))) {
+ barrier = NewLIR1(kThumb2Dmb, dmb_flavor);
+ }
+
+ // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
+ DCHECK(!barrier->flags.use_def_invalid);
+ barrier->u.m.def_mask = ENCODE_ALL;
#endif
}
void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int z_reg = AllocTemp();
+ RegStorage z_reg = AllocTemp();
LoadConstantNoClobber(z_reg, 0);
// Check for destructive overlap
- if (rl_result.reg.GetReg() == rl_src.reg.GetHighReg()) {
- int t_reg = AllocTemp();
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
- OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, t_reg);
+ if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
+ RegStorage t_reg = AllocTemp();
+ OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
+ OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
- OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, rl_src.reg.GetHighReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
+ OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
}
FreeTemp(z_reg);
StoreValueWide(rl_dest, rl_result);
@@ -843,19 +969,19 @@
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
int reg_status = 0;
- int res_lo = INVALID_REG;
- int res_hi = INVALID_REG;
- bool dest_promoted = rl_dest.location == kLocPhysReg && !rl_dest.reg.IsInvalid() &&
- !IsTemp(rl_dest.reg.GetReg()) && !IsTemp(rl_dest.reg.GetHighReg());
- bool src1_promoted = !IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg());
- bool src2_promoted = !IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg());
+ RegStorage res_lo;
+ RegStorage res_hi;
+ bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
+ !IsTemp(rl_dest.reg.GetLowReg()) && !IsTemp(rl_dest.reg.GetHighReg());
+ bool src1_promoted = !IsTemp(rl_src1.reg.GetLowReg()) && !IsTemp(rl_src1.reg.GetHighReg());
+ bool src2_promoted = !IsTemp(rl_src2.reg.GetLowReg()) && !IsTemp(rl_src2.reg.GetHighReg());
// Check if rl_dest is *not* either operand and we have enough temp registers.
if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
(dest_promoted || src1_promoted || src2_promoted)) {
// In this case, we do not need to manually allocate temp registers for result.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- res_lo = rl_result.reg.GetReg();
- res_hi = rl_result.reg.GetHighReg();
+ res_lo = rl_result.reg.GetLow();
+ res_hi = rl_result.reg.GetHigh();
} else {
res_lo = AllocTemp();
if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
@@ -872,34 +998,36 @@
// Temporarily add LR to the temp pool, and assign it to tmp1
MarkTemp(rARM_LR);
FreeTemp(rARM_LR);
- int tmp1 = rARM_LR;
+ RegStorage tmp1 = rs_rARM_LR;
LockTemp(rARM_LR);
- if (rl_src1.reg.GetReg() == rl_src2.reg.GetReg()) {
- DCHECK_NE(res_hi, INVALID_REG);
- DCHECK_NE(res_lo, INVALID_REG);
- NewLIR3(kThumb2MulRRR, tmp1, rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
+ if (rl_src1.reg == rl_src2.reg) {
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
} else {
- NewLIR3(kThumb2MulRRR, tmp1, rl_src2.reg.GetReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
if (reg_status == 2) {
- DCHECK_EQ(res_hi, INVALID_REG);
- DCHECK_NE(rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ DCHECK(!res_hi.Valid());
+ DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
FreeTemp(rl_src1.reg.GetHighReg());
res_hi = AllocTemp();
}
- DCHECK_NE(res_hi, INVALID_REG);
- DCHECK_NE(res_lo, INVALID_REG);
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
- NewLIR4(kThumb2Mla, tmp1, rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg(), tmp1);
- NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
+ tmp1.GetReg());
+ NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
if (reg_status == 2) {
// Clobber rl_src1 since it was corrupted.
- FreeTemp(rl_src1.reg.GetReg());
- Clobber(rl_src1.reg.GetReg());
- Clobber(rl_src1.reg.GetHighReg());
+ FreeTemp(rl_src1.reg);
+ Clobber(rl_src1.reg);
}
}
@@ -912,8 +1040,7 @@
// We had manually allocated registers for rl_result.
// Now construct a RegLocation.
rl_result = GetReturnWide(false); // Just using as a template.
- rl_result.reg.SetReg(res_lo);
- rl_result.reg.SetHighReg(res_hi);
+ rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
}
StoreValueWide(rl_dest, rl_result);
@@ -971,27 +1098,26 @@
}
/* null object? */
- GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- int reg_len = INVALID_REG;
+ RegStorage reg_len;
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.reg, len_offset, reg_len);
MarkPossibleNullPointerException(opt_flags);
} else {
- ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
+ ForceImplicitNullCheck(rl_array.reg, opt_flags);
}
if (rl_dest.wide || rl_dest.fp || constant_index) {
- int reg_ptr;
+ RegStorage reg_ptr;
if (constant_index) {
- reg_ptr = rl_array.reg.GetReg(); // NOTE: must not alter reg_ptr in constant case.
+ reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
} else {
// No special indexed operation, lea + load w/ displacement
reg_ptr = AllocTemp();
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
- EncodeShift(kArmLsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
FreeTemp(rl_index.reg.GetReg());
}
rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -1000,20 +1126,19 @@
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
- INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg, INVALID_SREG);
MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
}
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
@@ -1022,16 +1147,16 @@
}
} else {
// Offset base, then use indexed load
- int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ RegStorage reg_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
FreeTemp(rl_array.reg.GetReg());
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -1065,31 +1190,31 @@
rl_index = LoadValue(rl_index, kCoreReg);
}
- int reg_ptr;
+ RegStorage reg_ptr;
bool allocated_reg_ptr_temp = false;
if (constant_index) {
- reg_ptr = rl_array.reg.GetReg();
+ reg_ptr = rl_array.reg;
} else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
Clobber(rl_array.reg.GetReg());
- reg_ptr = rl_array.reg.GetReg();
+ reg_ptr = rl_array.reg;
} else {
allocated_reg_ptr_temp = true;
reg_ptr = AllocTemp();
}
/* null object? */
- GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- int reg_len = INVALID_REG;
+ RegStorage reg_len;
if (needs_range_check) {
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.reg, len_offset, reg_len);
MarkPossibleNullPointerException(opt_flags);
} else {
- ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
+ ForceImplicitNullCheck(rl_array.reg, opt_flags);
}
/* at this point, reg_ptr points to array, 2 live temps */
if (rl_src.wide || rl_src.fp || constant_index) {
@@ -1099,41 +1224,39 @@
rl_src = LoadValue(rl_src, reg_class);
}
if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
- EncodeShift(kArmLsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
}
if (needs_range_check) {
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_src.wide) {
- StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg);
} else {
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size);
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
}
MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
- scale, size);
+ StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+ MarkGCCard(rl_src.reg, rl_array.reg);
}
}
@@ -1156,53 +1279,53 @@
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
if (shift_amount == 1) {
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg());
- OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
} else if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
- LoadConstant(rl_result.reg.GetReg(), 0);
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
+ LoadConstant(rl_result.reg.GetLow(), 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetReg(), 0);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetLow(), 0);
} else {
- OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(),
+ OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), rl_src.reg.GetLow(),
EncodeShift(kArmLsr, 32 - shift_amount));
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), shift_amount);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
} else {
- int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+ LoadConstant(rl_result.reg.GetHigh(), 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetHigh(), 0);
} else {
- int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpLsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+ OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
}
break;
default:
@@ -1257,35 +1380,35 @@
switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
- NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+ NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
- if ((val_lo != 0) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
- OpRegRegImm(kOpOr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+ if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
+ OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
}
if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
- OpRegRegImm(kOpOr, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+ OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
}
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
- OpRegRegImm(kOpXor, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
- OpRegRegImm(kOpXor, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+ OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
+ OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
break;
case Instruction::AND_LONG:
case Instruction::AND_LONG_2ADDR:
- if ((val_lo != 0xffffffff) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
- OpRegRegImm(kOpAnd, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+ if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
+ OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
}
if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
- OpRegRegImm(kOpAnd, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+ OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
}
break;
case Instruction::SUB_LONG_2ADDR:
case Instruction::SUB_LONG:
- NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+ NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
break;
default:
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7f8656a..5bab0e3 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -53,43 +53,43 @@
}
// Return a target-dependent special register.
-int ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
- int res = INVALID_REG;
+RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
+ int res_reg = RegStorage::kInvalidRegVal;
switch (reg) {
- case kSelf: res = rARM_SELF; break;
- case kSuspend: res = rARM_SUSPEND; break;
- case kLr: res = rARM_LR; break;
- case kPc: res = rARM_PC; break;
- case kSp: res = rARM_SP; break;
- case kArg0: res = rARM_ARG0; break;
- case kArg1: res = rARM_ARG1; break;
- case kArg2: res = rARM_ARG2; break;
- case kArg3: res = rARM_ARG3; break;
- case kFArg0: res = rARM_FARG0; break;
- case kFArg1: res = rARM_FARG1; break;
- case kFArg2: res = rARM_FARG2; break;
- case kFArg3: res = rARM_FARG3; break;
- case kRet0: res = rARM_RET0; break;
- case kRet1: res = rARM_RET1; break;
- case kInvokeTgt: res = rARM_INVOKE_TGT; break;
- case kHiddenArg: res = r12; break;
- case kHiddenFpArg: res = INVALID_REG; break;
- case kCount: res = rARM_COUNT; break;
+ case kSelf: res_reg = rARM_SELF; break;
+ case kSuspend: res_reg = rARM_SUSPEND; break;
+ case kLr: res_reg = rARM_LR; break;
+ case kPc: res_reg = rARM_PC; break;
+ case kSp: res_reg = rARM_SP; break;
+ case kArg0: res_reg = rARM_ARG0; break;
+ case kArg1: res_reg = rARM_ARG1; break;
+ case kArg2: res_reg = rARM_ARG2; break;
+ case kArg3: res_reg = rARM_ARG3; break;
+ case kFArg0: res_reg = rARM_FARG0; break;
+ case kFArg1: res_reg = rARM_FARG1; break;
+ case kFArg2: res_reg = rARM_FARG2; break;
+ case kFArg3: res_reg = rARM_FARG3; break;
+ case kRet0: res_reg = rARM_RET0; break;
+ case kRet1: res_reg = rARM_RET1; break;
+ case kInvokeTgt: res_reg = rARM_INVOKE_TGT; break;
+ case kHiddenArg: res_reg = r12; break;
+ case kHiddenFpArg: res_reg = RegStorage::kInvalidRegVal; break;
+ case kCount: res_reg = rARM_COUNT; break;
}
- return res;
+ return RegStorage::Solo32(res_reg);
}
-int ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+RegStorage ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
// For the 32-bit internal ABI, the first 3 arguments are passed in registers.
switch (arg_num) {
case 0:
- return rARM_ARG1;
+ return rs_rARM_ARG1;
case 1:
- return rARM_ARG2;
+ return rs_rARM_ARG2;
case 2:
- return rARM_ARG3;
+ return rs_rARM_ARG3;
default:
- return INVALID_REG;
+ return RegStorage::InvalidReg();
}
}
@@ -528,20 +528,16 @@
// Alloc a pair of core registers, or a double.
RegStorage ArmMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
- int high_reg;
- int low_reg;
-
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
- low_reg = AllocTempDouble();
- high_reg = low_reg + 1;
+ return AllocTempDouble();
} else {
- low_reg = AllocTemp();
- high_reg = AllocTemp();
+ RegStorage low_reg = AllocTemp();
+ RegStorage high_reg = AllocTemp();
+ return RegStorage::MakeRegPair(low_reg, high_reg);
}
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
-int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+RegStorage ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
return AllocTempFloat();
return AllocTemp();
@@ -583,15 +579,18 @@
reg_pool_->next_core_reg = r2;
}
-void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep,
- RegLocation rl_free) {
- if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
- (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
- // No overlap, free both
- FreeTemp(rl_free.reg.GetReg());
- FreeTemp(rl_free.reg.GetHighReg());
+void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
+ DCHECK(rl_keep.wide);
+ DCHECK(rl_free.wide);
+ if ((rl_free.reg.GetLowReg() != rl_keep.reg.GetLowReg()) &&
+ (rl_free.reg.GetLowReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetLowReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+ // No overlap, free.
+ FreeTemp(rl_free.reg);
}
}
+
/*
* TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
* instructions might call out to C/assembly helper functions. Until
@@ -624,9 +623,9 @@
fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE;
}
-void ArmMir2Lir::FlushRegWide(int reg1, int reg2) {
- RegisterInfo* info1 = GetRegInfo(reg1);
- RegisterInfo* info2 = GetRegInfo(reg2);
+void ArmMir2Lir::FlushRegWide(RegStorage reg) {
+ RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
+ RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
DCHECK(info1 && info2 && info1->pair && info2->pair &&
(info1->partner == info2->reg) &&
(info2->partner == info1->reg));
@@ -642,16 +641,18 @@
mir_graph_->SRegToVReg(info1->s_reg))
info1 = info2;
int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
- StoreBaseDispWide(rARM_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+ StoreBaseDispWide(rs_rARM_SP, VRegOffset(v_reg),
+ RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
}
}
-void ArmMir2Lir::FlushReg(int reg) {
- RegisterInfo* info = GetRegInfo(reg);
+void ArmMir2Lir::FlushReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ RegisterInfo* info = GetRegInfo(reg.GetReg());
if (info->live && info->dirty) {
info->dirty = false;
int v_reg = mir_graph_->SRegToVReg(info->s_reg);
- StoreBaseDisp(rARM_SP, VRegOffset(v_reg), reg, kWord);
+ StoreBaseDisp(rs_rARM_SP, VRegOffset(v_reg), reg, kWord);
}
}
@@ -660,6 +661,10 @@
return ARM_FPREG(reg);
}
+bool ArmMir2Lir::IsFpReg(RegStorage reg) {
+ return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
+}
+
/* Clobber all regs that might be used by an external C call */
void ArmMir2Lir::ClobberCallerSave() {
Clobber(r0);
@@ -694,7 +699,7 @@
Clobber(r3);
MarkInUse(r2);
MarkInUse(r3);
- MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+ MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
return res;
}
@@ -722,14 +727,14 @@
FreeTemp(r3);
}
-int ArmMir2Lir::LoadHelper(ThreadOffset offset) {
- LoadWordDisp(rARM_SELF, offset.Int32Value(), rARM_LR);
- return rARM_LR;
+RegStorage ArmMir2Lir::LoadHelper(ThreadOffset offset) {
+ LoadWordDisp(rs_rARM_SELF, offset.Int32Value(), rs_rARM_LR);
+ return rs_rARM_LR;
}
LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
- int tmp = r0;
- LoadWordDisp(rARM_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
+ RegStorage tmp = rs_r0;
+ LoadWordDisp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
LIR* load2 = LoadWordDisp(tmp, 0, tmp);
return load2;
}
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 1a7f2fc..cf90fb1 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -169,37 +169,37 @@
* 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) {
+LIR* ArmMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
LIR* res;
int mod_imm;
- if (ARM_FPREG(r_dest)) {
- return LoadFPConstantValue(r_dest, value);
+ if (ARM_FPREG(r_dest.GetReg())) {
+ return LoadFPConstantValue(r_dest.GetReg(), value);
}
/* See if the value can be constructed cheaply */
- if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
- return NewLIR2(kThumbMovImm, r_dest, value);
+ if (ARM_LOWREG(r_dest.GetReg()) && (value >= 0) && (value <= 255)) {
+ return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
}
/* Check Modified immediate special cases */
mod_imm = ModifiedImmediate(value);
if (mod_imm >= 0) {
- res = NewLIR2(kThumb2MovI8M, r_dest, mod_imm);
+ res = NewLIR2(kThumb2MovI8M, r_dest.GetReg(), mod_imm);
return res;
}
mod_imm = ModifiedImmediate(~value);
if (mod_imm >= 0) {
- res = NewLIR2(kThumb2MvnI8M, r_dest, mod_imm);
+ res = NewLIR2(kThumb2MvnI8M, r_dest.GetReg(), mod_imm);
return res;
}
/* 16-bit immediate? */
if ((value & 0xffff) == value) {
- res = NewLIR2(kThumb2MovImm16, r_dest, value);
+ res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), value);
return res;
}
/* Do a low/high pair */
- res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value));
- NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value));
+ res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), Low16Bits(value));
+ NewLIR2(kThumb2MovImm16H, r_dest.GetReg(), High16Bits(value));
return res;
}
@@ -219,7 +219,7 @@
return branch;
}
-LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) {
+LIR* ArmMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpBlx:
@@ -231,12 +231,13 @@
default:
LOG(FATAL) << "Bad opcode " << op;
}
- return NewLIR1(opcode, r_dest_src);
+ return NewLIR1(opcode, r_dest_src.GetReg());
}
-LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
+LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
int shift) {
- bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
+ bool thumb_form =
+ ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdc:
@@ -255,9 +256,9 @@
case kOpCmp:
if (thumb_form)
opcode = kThumbCmpRR;
- else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+ else if ((shift == 0) && !ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
opcode = kThumbCmpHH;
- else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
+ else if ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()))
opcode = kThumbCmpLH;
else if (shift == 0)
opcode = kThumbCmpHL;
@@ -269,11 +270,11 @@
break;
case kOpMov:
DCHECK_EQ(shift, 0);
- if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
+ if (ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()))
opcode = kThumbMovRR;
- else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+ else if (!ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
opcode = kThumbMovRR_H2H;
- else if (ARM_LOWREG(r_dest_src1))
+ else if (ARM_LOWREG(r_dest_src1.GetReg()))
opcode = kThumbMovRR_H2L;
else
opcode = kThumbMovRR_L2H;
@@ -324,7 +325,7 @@
DCHECK_EQ(shift, 0);
if (!thumb_form) {
// Binary, but rm is encoded twice.
- return NewLIR3(kThumb2RevRR, r_dest_src1, r_src2, r_src2);
+ return NewLIR3(kThumb2RevRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
}
opcode = kThumbRev;
break;
@@ -332,64 +333,64 @@
DCHECK_EQ(shift, 0);
if (!thumb_form) {
// Binary, but rm is encoded twice.
- return NewLIR3(kThumb2RevshRR, r_dest_src1, r_src2, r_src2);
+ return NewLIR3(kThumb2RevshRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
}
opcode = kThumbRevsh;
break;
case kOp2Byte:
DCHECK_EQ(shift, 0);
- return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
+ return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 8);
case kOp2Short:
DCHECK_EQ(shift, 0);
- return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
+ return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
case kOp2Char:
DCHECK_EQ(shift, 0);
- return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
+ return NewLIR4(kThumb2Ubfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
default:
LOG(FATAL) << "Bad opcode: " << op;
break;
}
DCHECK(!IsPseudoLirOp(opcode));
if (EncodingMap[opcode].flags & IS_BINARY_OP) {
- return NewLIR2(opcode, r_dest_src1, r_src2);
+ return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
} else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
- return NewLIR3(opcode, r_dest_src1, r_src2, shift);
+ return NewLIR3(opcode, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
- return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2);
+ return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg());
}
} else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
- return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift);
+ return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
LOG(FATAL) << "Unexpected encoding operand count";
return NULL;
}
}
-LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
+LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
return OpRegRegShift(op, r_dest_src1, r_src2, 0);
}
-LIR* ArmMir2Lir::OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type) {
+LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
UNIMPLEMENTED(FATAL);
return nullptr;
}
-LIR* ArmMir2Lir::OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type) {
+LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
UNIMPLEMENTED(FATAL);
return nullptr;
}
-LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
return NULL;
}
-LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
- int r_src2, int shift) {
+LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
+ RegStorage r_src2, int shift) {
ArmOpcode opcode = kThumbBkpt;
- bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
- ARM_LOWREG(r_src2);
+ bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()) &&
+ ARM_LOWREG(r_src2.GetReg());
switch (op) {
case kOpAdd:
opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
@@ -448,51 +449,51 @@
}
DCHECK(!IsPseudoLirOp(opcode));
if (EncodingMap[opcode].flags & IS_QUAD_OP) {
- return NewLIR4(opcode, r_dest, r_src1, r_src2, shift);
+ return NewLIR4(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
} else {
DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
- return NewLIR3(opcode, r_dest, r_src1, r_src2);
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
}
}
-LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) {
+LIR* ArmMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
}
-LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
+LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
LIR* res;
bool neg = (value < 0);
int32_t abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
ArmOpcode alt_opcode = kThumbBkpt;
- bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
+ bool all_low_regs = (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()));
int32_t mod_imm = ModifiedImmediate(value);
switch (op) {
case kOpLsl:
if (all_low_regs)
- return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value);
+ return NewLIR3(kThumbLslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
else
- return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value);
+ return NewLIR3(kThumb2LslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
case kOpLsr:
if (all_low_regs)
- return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value);
+ return NewLIR3(kThumbLsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
else
- return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value);
+ return NewLIR3(kThumb2LsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
case kOpAsr:
if (all_low_regs)
- return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value);
+ return NewLIR3(kThumbAsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
else
- return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value);
+ return NewLIR3(kThumb2AsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
case kOpRor:
- return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value);
+ return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
case kOpAdd:
- if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
+ if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r13sp) &&
(value <= 1020) && ((value & 0x3) == 0)) {
- return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2);
- } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
+ return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
+ } else if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r15pc) &&
(value <= 1020) && ((value & 0x3) == 0)) {
- return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2);
+ return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
}
// Note: intentional fallthrough
case kOpSub:
@@ -501,7 +502,7 @@
opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
else
opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
- return NewLIR3(opcode, r_dest, r_src1, abs_value);
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
}
if (mod_imm < 0) {
mod_imm = ModifiedImmediate(-value);
@@ -516,7 +517,7 @@
opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
else
opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
- return NewLIR3(opcode, r_dest, r_src1, abs_value);
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
}
if (op == kOpSub) {
opcode = kThumb2SubRRI8M;
@@ -546,7 +547,7 @@
if (mod_imm < 0) {
mod_imm = ModifiedImmediate(~value);
if (mod_imm >= 0) {
- return NewLIR3(kThumb2BicRRI8M, r_dest, r_src1, mod_imm);
+ return NewLIR3(kThumb2BicRRI8M, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
}
}
opcode = kThumb2AndRRI8M;
@@ -564,13 +565,13 @@
case kOpCmp: {
LIR* res;
if (mod_imm >= 0) {
- res = NewLIR2(kThumb2CmpRI8M, r_src1, mod_imm);
+ res = NewLIR2(kThumb2CmpRI8M, r_src1.GetReg(), mod_imm);
} else {
mod_imm = ModifiedImmediate(-value);
if (mod_imm >= 0) {
- res = NewLIR2(kThumb2CmnRI8M, r_src1, mod_imm);
+ res = NewLIR2(kThumb2CmnRI8M, r_src1.GetReg(), mod_imm);
} else {
- int r_tmp = AllocTemp();
+ RegStorage r_tmp = AllocTemp();
res = LoadConstant(r_tmp, value);
OpRegReg(kOpCmp, r_src1, r_tmp);
FreeTemp(r_tmp);
@@ -583,28 +584,28 @@
}
if (mod_imm >= 0) {
- return NewLIR3(opcode, r_dest, r_src1, mod_imm);
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
} else {
- int r_scratch = AllocTemp();
+ RegStorage r_scratch = AllocTemp();
LoadConstant(r_scratch, value);
if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
- res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0);
+ res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
else
- res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch);
+ res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
FreeTemp(r_scratch);
return res;
}
}
/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
-LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
+LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
bool neg = (value < 0);
int32_t abs_value = (neg) ? -value : value;
- bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
+ bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1.GetReg()));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdd:
- if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+ if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
return NewLIR1(kThumbAddSpI7, value >> 2);
} else if (short_form) {
@@ -612,7 +613,7 @@
}
break;
case kOpSub:
- if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+ if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
return NewLIR1(kThumbSubSpI7, value >> 2);
} else if (short_form) {
@@ -632,18 +633,18 @@
break;
}
if (short_form) {
- return NewLIR2(opcode, r_dest_src1, abs_value);
+ return NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
} else {
return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
}
}
-LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
+LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR* res = NULL;
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
- int target_reg = S2d(r_dest_lo, r_dest_hi);
- if (ARM_FPREG(r_dest_lo)) {
+ int target_reg = S2d(r_dest.GetLowReg(), r_dest.GetHighReg());
+ if (ARM_FPREG(r_dest.GetLowReg())) {
if ((val_lo == 0) && (val_hi == 0)) {
// TODO: we need better info about the target CPU. a vector exclusive or
// would probably be better here if we could rely on its existance.
@@ -659,8 +660,8 @@
}
} else {
if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
- res = LoadConstantNoClobber(r_dest_lo, val_lo);
- LoadConstantNoClobber(r_dest_hi, val_hi);
+ res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
+ LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
}
}
if (res == NULL) {
@@ -669,12 +670,13 @@
if (data_target == NULL) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
- if (ARM_FPREG(r_dest_lo)) {
+ if (ARM_FPREG(r_dest.GetLowReg())) {
res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
target_reg, r15pc, 0, 0, 0, data_target);
} else {
+ DCHECK(r_dest.IsPair());
res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
- r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
+ r_dest.GetLowReg(), r_dest.GetHighReg(), r15pc, 0, 0, data_target);
}
SetMemRefType(res, true, kLiteral);
AppendLIR(res);
@@ -686,23 +688,24 @@
return ((amount & 0x1f) << 2) | code;
}
-LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
+LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) {
- bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
+ bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
+ ARM_LOWREG(r_dest.GetReg());
LIR* load;
ArmOpcode opcode = kThumbBkpt;
bool thumb_form = (all_low_regs && (scale == 0));
- int reg_ptr;
+ RegStorage reg_ptr;
- if (ARM_FPREG(r_dest)) {
- if (ARM_SINGLEREG(r_dest)) {
+ if (ARM_FPREG(r_dest.GetReg())) {
+ if (ARM_SINGLEREG(r_dest.GetReg())) {
DCHECK((size == kWord) || (size == kSingle));
opcode = kThumb2Vldrs;
size = kSingle;
} else {
- DCHECK(ARM_DOUBLEREG(r_dest));
+ DCHECK(ARM_DOUBLEREG(r_dest.GetReg()));
DCHECK((size == kLong) || (size == kDouble));
- DCHECK_EQ((r_dest & 0x1), 0);
+ DCHECK_EQ((r_dest.GetReg() & 0x1), 0);
opcode = kThumb2Vldrd;
size = kDouble;
}
@@ -716,12 +719,12 @@
case kSingle:
reg_ptr = AllocTemp();
if (scale) {
- NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
+ NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
EncodeShift(kArmLsl, scale));
} else {
- OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
+ OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
}
- load = NewLIR3(opcode, r_dest, reg_ptr, 0);
+ load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0);
FreeTemp(reg_ptr);
return load;
case kWord:
@@ -743,30 +746,31 @@
LOG(FATAL) << "Bad size: " << size;
}
if (thumb_form)
- load = NewLIR3(opcode, r_dest, rBase, r_index);
+ load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
else
- load = NewLIR4(opcode, r_dest, rBase, r_index, scale);
+ load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
return load;
}
-LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
- bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
+ bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
+ ARM_LOWREG(r_src.GetReg());
LIR* store = NULL;
ArmOpcode opcode = kThumbBkpt;
bool thumb_form = (all_low_regs && (scale == 0));
- int reg_ptr;
+ RegStorage reg_ptr;
- if (ARM_FPREG(r_src)) {
- if (ARM_SINGLEREG(r_src)) {
+ if (ARM_FPREG(r_src.GetReg())) {
+ if (ARM_SINGLEREG(r_src.GetReg())) {
DCHECK((size == kWord) || (size == kSingle));
opcode = kThumb2Vstrs;
size = kSingle;
} else {
- DCHECK(ARM_DOUBLEREG(r_src));
+ DCHECK(ARM_DOUBLEREG(r_src.GetReg()));
DCHECK((size == kLong) || (size == kDouble));
- DCHECK_EQ((r_src & 0x1), 0);
+ DCHECK_EQ((r_src.GetReg() & 0x1), 0);
opcode = kThumb2Vstrd;
size = kDouble;
}
@@ -780,12 +784,12 @@
case kSingle:
reg_ptr = AllocTemp();
if (scale) {
- NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
+ NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
EncodeShift(kArmLsl, scale));
} else {
- OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
+ OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
}
- store = NewLIR3(opcode, r_src, reg_ptr, 0);
+ store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0);
FreeTemp(reg_ptr);
return store;
case kWord:
@@ -803,9 +807,9 @@
LOG(FATAL) << "Bad size: " << size;
}
if (thumb_form)
- store = NewLIR3(opcode, r_src, rBase, r_index);
+ store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
else
- store = NewLIR4(opcode, r_src, rBase, r_index, scale);
+ store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
return store;
}
@@ -815,45 +819,44 @@
* on base (which must have an associated s_reg and MIR). If not
* performing null check, incoming MIR can be null.
*/
-LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
- int r_dest_hi, OpSize size, int s_reg) {
+LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size, int s_reg) {
LIR* load = NULL;
ArmOpcode opcode = kThumbBkpt;
bool short_form = false;
bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
+ bool all_low = r_dest.Is32Bit() && ARM_LOWREG(r_base.GetReg() && ARM_LOWREG(r_dest.GetReg()));
int encoded_disp = displacement;
- bool is64bit = false;
bool already_generated = false;
+ int dest_low_reg = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
switch (size) {
case kDouble:
case kLong:
- is64bit = true;
- if (ARM_FPREG(r_dest)) {
- if (ARM_SINGLEREG(r_dest)) {
- DCHECK(ARM_FPREG(r_dest_hi));
- r_dest = S2d(r_dest, r_dest_hi);
+ if (ARM_FPREG(dest_low_reg)) {
+ // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
+ if (r_dest.IsPair()) {
+ DCHECK(ARM_FPREG(r_dest.GetHighReg()));
+ r_dest = RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg()));
}
opcode = kThumb2Vldrd;
if (displacement <= 1020) {
short_form = true;
encoded_disp >>= 2;
}
- break;
} else {
if (displacement <= 1020) {
- load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
+ load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(),
+ displacement >> 2);
} else {
- load = LoadBaseDispBody(rBase, displacement, r_dest,
- -1, kWord, s_reg);
- LoadBaseDispBody(rBase, displacement + 4, r_dest_hi,
- -1, kWord, INVALID_SREG);
+ load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), kWord, s_reg);
+ LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), kWord, INVALID_SREG);
}
already_generated = true;
}
+ break;
case kSingle:
case kWord:
- if (ARM_FPREG(r_dest)) {
+ if (ARM_FPREG(r_dest.GetReg())) {
opcode = kThumb2Vldrs;
if (displacement <= 1020) {
short_form = true;
@@ -861,17 +864,17 @@
}
break;
}
- if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
+ if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r15pc) &&
(displacement <= 1020) && (displacement >= 0)) {
short_form = true;
encoded_disp >>= 2;
opcode = kThumbLdrPcRel;
- } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
+ } else if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r13sp) &&
(displacement <= 1020) && (displacement >= 0)) {
short_form = true;
encoded_disp >>= 2;
opcode = kThumbLdrSpRel;
- } else if (all_low_regs && displacement < 128 && displacement >= 0) {
+ } else if (all_low && displacement < 128 && displacement >= 0) {
DCHECK_EQ((displacement & 0x3), 0);
short_form = true;
encoded_disp >>= 2;
@@ -882,7 +885,7 @@
}
break;
case kUnsignedHalf:
- if (all_low_regs && displacement < 64 && displacement >= 0) {
+ if (all_low && displacement < 64 && displacement >= 0) {
DCHECK_EQ((displacement & 0x1), 0);
short_form = true;
encoded_disp >>= 1;
@@ -899,7 +902,7 @@
}
break;
case kUnsignedByte:
- if (all_low_regs && displacement < 32 && displacement >= 0) {
+ if (all_low && displacement < 32 && displacement >= 0) {
short_form = true;
opcode = kThumbLdrbRRI5;
} else if (thumb2Form) {
@@ -919,65 +922,67 @@
if (!already_generated) {
if (short_form) {
- load = NewLIR3(opcode, r_dest, rBase, encoded_disp);
+ load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), encoded_disp);
} else {
- int reg_offset = AllocTemp();
+ RegStorage reg_offset = AllocTemp();
LoadConstant(reg_offset, encoded_disp);
- if (ARM_FPREG(r_dest)) {
+ if (ARM_FPREG(dest_low_reg)) {
// No index ops - must use a long sequence. Turn the offset into a direct pointer.
- OpRegReg(kOpAdd, reg_offset, rBase);
- load = LoadBaseDispBody(reg_offset, 0, r_dest, r_dest_hi, size, s_reg);
+ OpRegReg(kOpAdd, reg_offset, r_base);
+ load = LoadBaseDispBody(reg_offset, 0, r_dest, size, s_reg);
} else {
- load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size);
+ load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
}
FreeTemp(reg_offset);
}
}
// TODO: in future may need to differentiate Dalvik accesses w/ spills
- if (rBase == rARM_SP) {
- AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
+ if (r_base == rs_rARM_SP) {
+ AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
}
-LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
- OpSize size, int s_reg) {
- return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg);
+LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ int s_reg) {
+ DCHECK(!((size == kLong) || (size == kDouble)));
+ return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg);
}
-LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo,
- int r_dest_hi, int s_reg) {
- return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+LIR* ArmMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
+ int s_reg) {
+ return LoadBaseDispBody(r_base, displacement, r_dest, kLong, s_reg);
}
-LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement,
- int r_src, int r_src_hi, OpSize size) {
+LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
LIR* store = NULL;
ArmOpcode opcode = kThumbBkpt;
bool short_form = false;
bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
+ bool all_low = r_src.Is32Bit() && (ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_src.GetReg()));
int encoded_disp = displacement;
- bool is64bit = false;
bool already_generated = false;
+ int src_low_reg = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
switch (size) {
case kLong:
case kDouble:
- is64bit = true;
- if (!ARM_FPREG(r_src)) {
+ if (!ARM_FPREG(src_low_reg)) {
if (displacement <= 1020) {
- store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
+ store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(),
+ displacement >> 2);
} else {
- store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord);
- StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord);
+ store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), kWord);
+ StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), kWord);
}
already_generated = true;
} else {
- if (ARM_SINGLEREG(r_src)) {
- DCHECK(ARM_FPREG(r_src_hi));
- r_src = S2d(r_src, r_src_hi);
+ // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
+ if (r_src.IsPair()) {
+ DCHECK(ARM_FPREG(r_src.GetHighReg()));
+ r_src = RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg()));
}
opcode = kThumb2Vstrd;
if (displacement <= 1020) {
@@ -988,8 +993,8 @@
break;
case kSingle:
case kWord:
- if (ARM_FPREG(r_src)) {
- DCHECK(ARM_SINGLEREG(r_src));
+ if (ARM_FPREG(r_src.GetReg())) {
+ DCHECK(ARM_SINGLEREG(r_src.GetReg()));
opcode = kThumb2Vstrs;
if (displacement <= 1020) {
short_form = true;
@@ -997,12 +1002,12 @@
}
break;
}
- if (ARM_LOWREG(r_src) && (rBase == r13sp) &&
+ if (ARM_LOWREG(r_src.GetReg()) && (r_base == rs_r13sp) &&
(displacement <= 1020) && (displacement >= 0)) {
short_form = true;
encoded_disp >>= 2;
opcode = kThumbStrSpRel;
- } else if (all_low_regs && displacement < 128 && displacement >= 0) {
+ } else if (all_low && displacement < 128 && displacement >= 0) {
DCHECK_EQ((displacement & 0x3), 0);
short_form = true;
encoded_disp >>= 2;
@@ -1014,7 +1019,7 @@
break;
case kUnsignedHalf:
case kSignedHalf:
- if (all_low_regs && displacement < 64 && displacement >= 0) {
+ if (all_low && displacement < 64 && displacement >= 0) {
DCHECK_EQ((displacement & 0x1), 0);
short_form = true;
encoded_disp >>= 1;
@@ -1026,7 +1031,7 @@
break;
case kUnsignedByte:
case kSignedByte:
- if (all_low_regs && displacement < 32 && displacement >= 0) {
+ if (all_low && displacement < 32 && displacement >= 0) {
short_form = true;
opcode = kThumbStrbRRI5;
} else if (thumb2Form) {
@@ -1039,52 +1044,52 @@
}
if (!already_generated) {
if (short_form) {
- store = NewLIR3(opcode, r_src, rBase, encoded_disp);
+ store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), encoded_disp);
} else {
- int r_scratch = AllocTemp();
+ RegStorage r_scratch = AllocTemp();
LoadConstant(r_scratch, encoded_disp);
- if (ARM_FPREG(r_src)) {
+ if (ARM_FPREG(src_low_reg)) {
// No index ops - must use a long sequence. Turn the offset into a direct pointer.
- OpRegReg(kOpAdd, r_scratch, rBase);
- store = StoreBaseDispBody(r_scratch, 0, r_src, r_src_hi, size);
+ OpRegReg(kOpAdd, r_scratch, r_base);
+ store = StoreBaseDispBody(r_scratch, 0, r_src, size);
} else {
- store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size);
+ store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
}
FreeTemp(r_scratch);
}
}
// TODO: In future, may need to differentiate Dalvik & spill accesses
- if (rBase == rARM_SP) {
- AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit);
+ if (r_base == rs_rARM_SP) {
+ AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;
}
-LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
+LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
+ DCHECK(!((size == kLong) || (size == kDouble)));
+ return StoreBaseDispBody(r_base, displacement, r_src, size);
}
-LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement,
- int r_src_lo, int r_src_hi) {
- return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
+LIR* ArmMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseDispBody(r_base, displacement, r_src, kLong);
}
-LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) {
+LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
int opcode;
- DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
- if (ARM_DOUBLEREG(r_dest)) {
+ DCHECK_EQ(ARM_DOUBLEREG(r_dest.GetReg()), ARM_DOUBLEREG(r_src.GetReg()));
+ if (ARM_DOUBLEREG(r_dest.GetReg())) {
opcode = kThumb2Vmovd;
} else {
- if (ARM_SINGLEREG(r_dest)) {
- opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
+ if (ARM_SINGLEREG(r_dest.GetReg())) {
+ opcode = ARM_SINGLEREG(r_src.GetReg()) ? kThumb2Vmovs : kThumb2Fmsr;
} else {
- DCHECK(ARM_SINGLEREG(r_src));
+ DCHECK(ARM_SINGLEREG(r_src.GetReg()));
opcode = kThumb2Fmrs;
}
}
- LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+ LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
@@ -1096,26 +1101,26 @@
return NULL;
}
-LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) {
+LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
LOG(FATAL) << "Unexpected use of OpMem for Arm";
return NULL;
}
-LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
- int displacement, int r_src, int r_src_hi, OpSize size,
- int s_reg) {
+LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_src, RegStorage r_src_hi,
+ OpSize size, int s_reg) {
LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
return NULL;
}
-LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) {
+LIR* ArmMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
return NULL;
}
-LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
- int displacement, int r_dest, int r_dest_hi, OpSize size,
- int s_reg) {
+LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_dest, RegStorage r_dest_hi,
+ OpSize size, int s_reg) {
LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
return NULL;
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 60f8796..717ad86 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1113,7 +1113,7 @@
return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
}
-LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg,
+LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target) {
// Handle this for architectures that can't compare to memory.
LoadWordDisp(base_reg, offset, temp_reg);
@@ -1168,4 +1168,12 @@
return nullptr;
}
+RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
+ loc.wide = false;
+ if (loc.reg.IsPair()) {
+ loc.reg = loc.reg.GetLow();
+ }
+ return loc;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 8c3a11fb..b23e10f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -54,12 +54,15 @@
return branch;
}
-LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) {
- LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
+LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) {
+ LIR* tgt;
LIR* branch;
if (c_code == kCondAl) {
+ tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal,
+ imm_val);
branch = OpUnconditionalBranch(tgt);
} else {
+ tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val);
branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
}
// Remember branch target - will process later
@@ -69,7 +72,7 @@
/* Perform null-check on a register. */
-LIR* Mir2Lir::GenNullCheck(int m_reg, int opt_flags) {
+LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
if (Runtime::Current()->ExplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
@@ -94,14 +97,15 @@
}
}
-void Mir2Lir::ForceImplicitNullCheck(int reg, int opt_flags) {
+void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
if (!Runtime::Current()->ExplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
// Force an implicit null check by performing a memory operation (load) from the given
// register with offset 0. This will cause a signal if the register contains 0 (null).
- int tmp = AllocTemp();
+ RegStorage tmp = AllocTemp();
+ // TODO: for Mips, would be best to use rZERO as the bogus register target.
LIR* load = LoadWordDisp(reg, 0, tmp);
FreeTemp(tmp);
MarkSafepointPC(load);
@@ -109,9 +113,10 @@
}
/* Perform check on two registers */
-LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
+LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2,
ThrowKind kind) {
- LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
+ LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(),
+ reg2.GetReg());
LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
// Remember branch target - will process later
throw_launchpads_.Insert(tgt);
@@ -162,12 +167,12 @@
if ((rl_temp.location == kLocDalvikFrame) &&
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
// OK - convert this to a compare immediate and branch
- OpCmpImmBranch(cond, rl_src1.reg.GetReg(), mir_graph_->ConstantValue(rl_src2), taken);
+ OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
return;
}
}
rl_src2 = LoadValue(rl_src2, kCoreReg);
- OpCmpBranch(cond, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), taken);
+ OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
}
void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
@@ -197,17 +202,17 @@
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- OpCmpImmBranch(cond, rl_src.reg.GetReg(), 0, taken);
+ OpCmpImmBranch(cond, rl_src.reg, 0, taken);
}
void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_src.reg);
} else {
- LoadValueDirect(rl_src, rl_result.reg.GetReg());
+ LoadValueDirect(rl_src, rl_result.reg.GetLow());
}
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31);
StoreValueWide(rl_dest, rl_result);
}
@@ -229,7 +234,7 @@
default:
LOG(ERROR) << "Bad int conversion type";
}
- OpRegReg(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegReg(op, rl_result.reg, rl_src.reg);
StoreValue(rl_dest, rl_result);
}
@@ -323,8 +328,7 @@
for (int i = 0; i < elems; i++) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.reg.GetReg(), kWord);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
}
}
/*
@@ -332,10 +336,10 @@
* this is an uncommon operation and isn't especially performance
* critical.
*/
- int r_src = AllocTemp();
- int r_dst = AllocTemp();
- int r_idx = AllocTemp();
- int r_val = INVALID_REG;
+ RegStorage r_src = AllocTemp();
+ RegStorage r_dst = AllocTemp();
+ RegStorage r_idx = AllocTemp();
+ RegStorage r_val;
switch (cu_->instruction_set) {
case kThumb2:
r_val = TargetReg(kLr);
@@ -374,11 +378,11 @@
for (int i = 0; i < elems; i++) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
StoreBaseDisp(TargetReg(kRet0),
- mirror::Array::DataOffset(component_size).Int32Value() +
- i * 4, rl_arg.reg.GetReg(), kWord);
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4,
+ rl_arg.reg, kWord);
// If the LoadValue caused a temp to be allocated, free it
- if (IsTemp(rl_arg.reg.GetReg())) {
- FreeTemp(rl_arg.reg.GetReg());
+ if (IsTemp(rl_arg.reg)) {
+ FreeTemp(rl_arg.reg);
}
}
}
@@ -392,17 +396,17 @@
//
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
- StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont,
- int storage_index, int r_base) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), storage_index_(storage_index),
- r_base_(r_base) {
+ StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
+ RegStorage r_base) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit),
+ storage_index_(storage_index), r_base_(r_base) {
}
void Compile() {
LIR* unresolved_target = GenerateTargetLabel();
uninit_->target = unresolved_target;
m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage),
- storage_index_, true);
+ storage_index_, true);
// Copy helper's result into r_base, a no-op on all but MIPS.
m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0));
@@ -412,7 +416,7 @@
private:
LIR* const uninit_;
const int storage_index_;
- const int r_base_;
+ const RegStorage r_base_;
};
void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
@@ -421,15 +425,14 @@
cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
if (field_info.FastPut() && !SLOW_FIELD_PATH) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
- int r_base;
+ RegStorage r_base;
if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
- if (IsTemp(rl_method.reg.GetReg())) {
- FreeTemp(rl_method.reg.GetReg());
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ if (IsTemp(rl_method.reg)) {
+ FreeTemp(rl_method.reg);
}
} else {
// Medium path, static storage base in a different class which requires checks that the other
@@ -439,14 +442,12 @@
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
- int r_method = TargetReg(kArg1);
+ RegStorage r_method = TargetReg(kArg1);
LockTemp(r_method);
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadWordDisp(r_method,
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- r_base);
+ LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
@@ -457,15 +458,14 @@
// The slow path is invoked if the r_base is NULL or the class pointed
// to by it is not initialized.
LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
- int r_tmp = TargetReg(kArg2);
+ RegStorage r_tmp = TargetReg(kArg2);
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
mirror::Class::kStatusInitialized, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
- AddSlowPath(new (arena_) StaticFieldSlowPath(this,
- unresolved_branch, uninit_branch, cont,
+ AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
@@ -479,19 +479,20 @@
rl_src = LoadValue(rl_src, kAnyReg);
}
if (field_info.IsVolatile()) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg(),
- rl_src.reg.GetHighReg());
+ StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
} else {
- StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg());
+ StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
}
if (field_info.IsVolatile()) {
+ // A load might follow the volatile store so insert a StoreLoad barrier.
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.reg.GetReg(), r_base);
+ MarkGCCard(rl_src.reg, r_base);
}
FreeTemp(r_base);
} else {
@@ -510,13 +511,12 @@
cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
if (field_info.FastGet() && !SLOW_FIELD_PATH) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
- int r_base;
+ RegStorage r_base;
if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
@@ -524,14 +524,12 @@
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
- int r_method = TargetReg(kArg1);
+ RegStorage r_method = TargetReg(kArg1);
LockTemp(r_method);
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadWordDisp(r_method,
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- r_base);
+ LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
@@ -542,15 +540,14 @@
// The slow path is invoked if the r_base is NULL or the class pointed
// to by it is not initialized.
LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
- int r_tmp = TargetReg(kArg2);
+ RegStorage r_tmp = TargetReg(kArg2);
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
mirror::Class::kStatusInitialized, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
- AddSlowPath(new (arena_) StaticFieldSlowPath(this,
- unresolved_branch, uninit_branch, cont,
+ AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
@@ -559,16 +556,21 @@
}
// r_base now holds static storage base
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- if (field_info.IsVolatile()) {
- GenMemBarrier(kLoadLoad);
- }
+
if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
} else {
- LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg());
+ LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
}
FreeTemp(r_base);
+
+ if (field_info.IsVolatile()) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
+ }
+
if (is_long_or_double) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -611,7 +613,7 @@
LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
current_dalvik_offset_ = lab->operands[1];
AppendLIR(lab);
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
OpUnconditionalBranch(resume_lab);
}
@@ -636,9 +638,10 @@
case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
// v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
if (target_x86) {
- OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v1),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v1);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v1));
}
// Make sure the following LoadConstant doesn't mess with kArg1.
LockTemp(TargetReg(kArg1));
@@ -647,33 +650,36 @@
break;
case kThrowArrayBounds:
// Move v1 (array index) to kArg0 and v2 (array length) to kArg1
- if (v2 != TargetReg(kArg0)) {
- OpRegCopy(TargetReg(kArg0), v1);
+ if (v2 != TargetReg(kArg0).GetReg()) {
+ OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
if (target_x86) {
// x86 leaves the array pointer in v2, so load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v2);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
}
} else {
- if (v1 == TargetReg(kArg1)) {
+ if (v1 == TargetReg(kArg1).GetReg()) {
// Swap v1 and v2, using kArg2 as a temp
- OpRegCopy(TargetReg(kArg2), v1);
+ OpRegCopy(TargetReg(kArg2), RegStorage::Solo32(v1));
if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v2);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
} else {
if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v2);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
}
- OpRegCopy(TargetReg(kArg0), v1);
+ OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
}
}
func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
@@ -682,7 +688,7 @@
func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero);
break;
case kThrowNoSuchMethod:
- OpRegCopy(TargetReg(kArg0), v1);
+ OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
func_offset =
QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod);
break;
@@ -690,7 +696,7 @@
LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
}
ClobberCallerSave();
- int r_tgt = CallHelperSetup(func_offset);
+ RegStorage r_tgt = CallHelperSetup(func_offset);
CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */);
}
}
@@ -707,37 +713,45 @@
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg, opt_flags);
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
- LoadBaseDispWide(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ // FIXME? duplicate null check?
+ GenNullCheck(rl_obj.reg, opt_flags);
+ LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
+ rl_obj.s_reg_low);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
}
} else {
- int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
+ RegStorage reg_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
rl_result = EvalLoc(rl_dest, reg_class, true);
- LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
- INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
if (field_info.IsVolatile()) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
}
FreeTemp(reg_ptr);
}
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
- LoadBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low);
+ GenNullCheck(rl_obj.reg, opt_flags);
+ LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord,
+ rl_obj.s_reg_low);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
}
StoreValue(rl_dest, rl_result);
}
@@ -767,34 +781,36 @@
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
- int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
- reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
+ GenNullCheck(rl_obj.reg, opt_flags);
+ RegStorage reg_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
if (field_info.IsVolatile()) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
- GenMemBarrier(kLoadLoad);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg, opt_flags);
if (field_info.IsVolatile()) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_src.reg.GetReg(), kWord);
+ StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
- GenMemBarrier(kLoadLoad);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.reg.GetReg(), rl_obj.reg.GetReg());
+ MarkGCCard(rl_src.reg, rl_obj.reg);
}
}
} else {
@@ -821,7 +837,7 @@
void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
- int res_reg = AllocTemp();
+ RegStorage res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
*cu_->dex_file,
@@ -829,23 +845,23 @@
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
- type_idx, rl_method.reg.GetReg(), true);
+ type_idx, rl_method.reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
} else {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadWordDisp(rl_method.reg.GetReg(), dex_cache_offset, res_reg);
+ LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
- LoadWordDisp(res_reg, offset_of_type, rl_result.reg.GetReg());
+ LoadWordDisp(res_reg, offset_of_type, rl_result.reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
FlushAllRegs();
- LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg.GetReg(), 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
// Object to generate the slow path for class resolution.
@@ -861,8 +877,8 @@
GenerateTargetLabel();
m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_,
- rl_method_.reg.GetReg(), true);
- m2l_->OpRegCopy(rl_result_.reg.GetReg(), m2l_->TargetReg(kRet0));
+ rl_method_.reg, true);
+ m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0));
m2l_->OpUnconditionalBranch(cont_);
}
@@ -874,8 +890,7 @@
};
// Add to list for future.
- AddSlowPath(new (arena_) SlowPath(this, branch, cont,
- type_idx, rl_method, rl_result));
+ AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result));
StoreValue(rl_dest, rl_result);
} else {
@@ -897,11 +912,11 @@
// If the Method* is already in a register, we can save a copy.
RegLocation rl_method = mir_graph_->GetMethodLoc();
- int r_method;
+ RegStorage r_method;
if (rl_method.location == kLocPhysReg) {
// A temp would conflict with register use below.
- DCHECK(!IsTemp(rl_method.reg.GetReg()));
- r_method = rl_method.reg.GetReg();
+ DCHECK(!IsTemp(rl_method.reg));
+ r_method = rl_method.reg;
} else {
r_method = TargetReg(kArg2);
LoadCurrMethodDirect(r_method);
@@ -922,14 +937,14 @@
// Object to generate the slow path for string resolution.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, int r_method) :
+ SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) :
LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) {
}
void Compile() {
GenerateTargetLabel();
- int r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
+ RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq
LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt);
@@ -940,7 +955,7 @@
}
private:
- int r_method_;
+ RegStorage r_method_;
};
// Add to list for future.
@@ -949,8 +964,8 @@
DCHECK_EQ(cu_->instruction_set, kX86);
LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
LoadConstant(TargetReg(kArg1), string_idx);
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), r_method,
- TargetReg(kArg1), true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), r_method, TargetReg(kArg1),
+ true);
LIR* target = NewLIR0(kPseudoTargetLabel);
branch->target = target;
}
@@ -958,11 +973,10 @@
StoreValue(rl_dest, GetReturn(false));
} else {
RegLocation rl_method = LoadCurrMethod();
- int res_reg = AllocTemp();
+ RegStorage res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
- LoadWordDisp(res_reg, offset_of_string, rl_result.reg.GetReg());
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ LoadWordDisp(res_reg, offset_of_string, rl_result.reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -1035,25 +1049,24 @@
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.reg.GetReg();
- if (result_reg == object.reg.GetReg()) {
+ RegStorage result_reg = rl_result.reg;
+ if (result_reg == object.reg) {
result_reg = AllocTypedTemp(false, kCoreReg);
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
- int check_class = AllocTypedTemp(false, kCoreReg);
- int object_class = AllocTypedTemp(false, kCoreReg);
+ RegStorage check_class = AllocTypedTemp(false, kCoreReg);
+ RegStorage object_class = AllocTypedTemp(false, kCoreReg);
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class);
- LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
+ LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
- LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
@@ -1077,7 +1090,7 @@
FreeTemp(object_class);
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.reg.GetReg(), result_reg);
+ OpRegCopy(rl_result.reg, result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1095,7 +1108,7 @@
// May generate a call - use explicit registers
LockCallTemps();
LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
- int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
@@ -1105,13 +1118,13 @@
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
@@ -1133,7 +1146,7 @@
RegLocation rl_result = GetReturn(false);
if (cu_->instruction_set == kMips) {
// On MIPS rArg0 != rl_result, place false in result if branch is taken.
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.reg, 0);
}
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1147,16 +1160,16 @@
if (cu_->instruction_set == kThumb2) {
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
OpIT(kCondEq, "E"); // if-convert the test
- LoadConstant(rl_result.reg.GetReg(), 1); // .eq case - load true
- LoadConstant(rl_result.reg.GetReg(), 0); // .ne case - load false
+ LoadConstant(rl_result.reg, 1); // .eq case - load true
+ LoadConstant(rl_result.reg, 0); // .ne case - load false
} else {
- LoadConstant(rl_result.reg.GetReg(), 0); // ne case - load false
+ LoadConstant(rl_result.reg, 0); // ne case - load false
branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
- LoadConstant(rl_result.reg.GetReg(), 1); // eq case - load true
+ LoadConstant(rl_result.reg, 1); // eq case - load true
}
} else {
if (cu_->instruction_set == kThumb2) {
- int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
if (!type_known_abstract) {
/* Uses conditional nullification */
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
@@ -1169,10 +1182,10 @@
} else {
if (!type_known_abstract) {
/* Uses branchovers */
- LoadConstant(rl_result.reg.GetReg(), 1); // assume true
+ LoadConstant(rl_result.reg, 1); // assume true
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
- int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
FreeTemp(r_tgt);
@@ -1228,7 +1241,7 @@
// May generate a call - use explicit registers
LockCallTemps();
LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
- int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
@@ -1237,12 +1250,12 @@
type_idx, TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
@@ -1256,7 +1269,7 @@
class SlowPath : public LIRSlowPath {
public:
SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
- const int class_reg) :
+ const RegStorage class_reg) :
LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
class_reg_(class_reg) {
}
@@ -1273,11 +1286,10 @@
}
public:
const int type_idx_;
- const int class_reg_;
+ const RegStorage class_reg_;
};
- AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont,
- type_idx, class_reg));
+ AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg));
}
}
// At this point, class_reg (kArg2) has class
@@ -1322,8 +1334,7 @@
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
- TargetReg(kArg1));
+ LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
@@ -1355,16 +1366,15 @@
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// The longs may overlap - use intermediate temp if so
- if ((rl_result.reg.GetReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg())) {
- int t_reg = AllocTemp();
- OpRegRegReg(first_op, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- OpRegCopy(rl_result.reg.GetReg(), t_reg);
+ if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) {
+ RegStorage t_reg = AllocTemp();
+ OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
+ OpRegCopy(rl_result.reg.GetLow(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(first_op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(),
- rl_src2.reg.GetHighReg());
+ OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
}
/*
* NOTE: If rl_dest refers to a frame variable in a large frame, the
@@ -1487,22 +1497,21 @@
if (unary) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegReg(op, rl_result.reg, rl_src1.reg);
} else {
if (shift_op) {
- int t_reg = INVALID_REG;
rl_src2 = LoadValue(rl_src2, kCoreReg);
- t_reg = AllocTemp();
- OpRegRegImm(kOpAnd, t_reg, rl_src2.reg.GetReg(), 31);
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), t_reg);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
FreeTemp(t_reg);
} else {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
}
}
StoreValue(rl_dest, rl_result);
@@ -1512,9 +1521,9 @@
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
done = true;
} else if (cu_->instruction_set == kThumb2) {
if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
@@ -1523,9 +1532,9 @@
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
done = true;
}
}
@@ -1535,7 +1544,7 @@
ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
FlushAllRegs(); /* Send everything to home location */
LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
- int r_tgt = CallHelperSetup(func_offset);
+ RegStorage r_tgt = CallHelperSetup(func_offset);
LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
if (check_zero) {
GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
@@ -1582,32 +1591,32 @@
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
if (lit == 2) {
// Division by 2 is by far the most common division by constant.
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
+ OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
} else {
- OpRegRegImm(kOpAsr, t_reg, rl_src.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
+ OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
}
} else {
- int t_reg1 = AllocTemp();
- int t_reg2 = AllocTemp();
+ RegStorage t_reg1 = AllocTemp();
+ RegStorage t_reg2 = AllocTemp();
if (lit == 2) {
- OpRegRegImm(kOpLsr, t_reg1, rl_src.reg.GetReg(), 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
+ OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k);
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
} else {
- OpRegRegImm(kOpAsr, t_reg1, rl_src.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31);
OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
}
}
StoreValue(rl_dest, rl_result);
@@ -1617,14 +1626,31 @@
// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
// and store the result in 'rl_dest'.
bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ if (lit < 0) {
+ return false;
+ }
+ if (lit == 0) {
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ LoadConstant(rl_result.reg, 0);
+ StoreValue(rl_dest, rl_result);
+ return true;
+ }
+ if (lit == 1) {
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ StoreValue(rl_dest, rl_result);
+ return true;
+ }
+ // There is RegRegRegShift on Arm, so check for more special cases
+ if (cu_->instruction_set == kThumb2) {
+ return EasyMultiply(rl_src, rl_dest, lit);
+ }
// Can we simplify this multiplication?
bool power_of_two = false;
bool pop_count_le2 = false;
bool power_of_two_minus_one = false;
- if (lit < 2) {
- // Avoid special cases.
- return false;
- } else if (IsPowerOfTwo(lit)) {
+ if (IsPowerOfTwo(lit)) {
power_of_two = true;
} else if (IsPopCountLE2(lit)) {
pop_count_le2 = true;
@@ -1637,7 +1663,7 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (power_of_two) {
// Shift.
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), LowestSetBit(lit));
+ OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit));
} else if (pop_count_le2) {
// Shift and add and shift.
int first_bit = LowestSetBit(lit);
@@ -1647,9 +1673,9 @@
// Reverse subtract: (src << (shift + 1)) - src.
DCHECK(power_of_two_minus_one);
// TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
- int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), LowestSetBit(lit + 1));
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetReg());
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1));
+ OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1668,10 +1694,10 @@
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set == kThumb2) {
- OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
+ OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit);
} else {
- OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegImm(kOpAdd, rl_result.reg.GetReg(), lit);
+ OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+ OpRegImm(kOpAdd, rl_result.reg, lit);
}
StoreValue(rl_dest, rl_result);
return;
@@ -1746,7 +1772,7 @@
case Instruction::REM_INT_LIT8:
case Instruction::REM_INT_LIT16: {
if (lit == 0) {
- GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
+ GenImmedCheck(kCondAl, RegStorage::InvalidReg(), 0, kThrowDivZero);
return;
}
if ((opcode == Instruction::DIV_INT) ||
@@ -1764,7 +1790,7 @@
bool done = false;
if (cu_->instruction_set == kMips) {
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
done = true;
} else if (cu_->instruction_set == kX86) {
rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
@@ -1774,7 +1800,7 @@
// Use ARM SDIV instruction for division. For remainder we also need to
// calculate using a MUL and subtract.
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
done = true;
}
}
@@ -1800,9 +1826,9 @@
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Avoid shifts by literal 0 - no support in Thumb. Change to copy.
if (shift_op && (lit == 0)) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_src.reg);
} else {
- OpRegRegImm(op, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
+ OpRegRegImm(op, rl_result.reg, rl_src.reg, lit);
}
StoreValue(rl_dest, rl_result);
}
@@ -1815,22 +1841,22 @@
bool call_out = false;
bool check_zero = false;
ThreadOffset func_offset(-1);
- int ret_reg = TargetReg(kRet0);
+ int ret_reg = TargetReg(kRet0).GetReg();
switch (opcode) {
case Instruction::NOT_LONG:
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Check for destructive overlap
- if (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg()) {
- int t_reg = AllocTemp();
- OpRegCopy(t_reg, rl_src2.reg.GetHighReg());
- OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), t_reg);
+ if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
+ RegStorage t_reg = AllocTemp();
+ OpRegCopy(t_reg, rl_src2.reg.GetHigh());
+ OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
}
StoreValueWide(rl_dest, rl_result);
return;
@@ -1859,7 +1885,7 @@
return;
} else {
call_out = true;
- ret_reg = TargetReg(kRet0);
+ ret_reg = TargetReg(kRet0).GetReg();
func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
}
break;
@@ -1867,7 +1893,7 @@
case Instruction::DIV_LONG_2ADDR:
call_out = true;
check_zero = true;
- ret_reg = TargetReg(kRet0);
+ ret_reg = TargetReg(kRet0).GetReg();
func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
break;
case Instruction::REM_LONG:
@@ -1876,7 +1902,7 @@
check_zero = true;
func_offset = QUICK_ENTRYPOINT_OFFSET(pLmod);
/* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
- ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
+ ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg();
break;
case Instruction::AND_LONG_2ADDR:
case Instruction::AND_LONG:
@@ -1916,17 +1942,19 @@
} else {
FlushAllRegs(); /* Send everything to home location */
if (check_zero) {
- LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
- int r_tgt = CallHelperSetup(func_offset);
- GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
- LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
+ RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
+ RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
+ RegStorage r_tgt = CallHelperSetup(func_offset);
+ GenDivZeroCheck(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)));
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
// NOTE: callout here is not a safepoint
CallHelper(r_tgt, func_offset, false /* not safepoint */);
} else {
CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
}
// Adjust return regs in to handle case of rem returning kArg2/kArg3
- if (ret_reg == TargetReg(kRet0))
+ if (ret_reg == TargetReg(kRet0).GetReg())
rl_result = GetReturnWide(false);
else
rl_result = GetReturnWideAlt();
@@ -2019,7 +2047,7 @@
/* Generic code for generating a wide constant into a VR. */
void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), value);
+ LoadConstantWide(rl_result.reg, value);
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index f3c5a34..a0242d5 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -66,12 +66,13 @@
* has a memory call operation, part 1 is a NOP for x86. For other targets,
* load arguments between the two parts.
*/
-int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
- return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
+RegStorage Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
+ return (cu_->instruction_set == kX86) ? RegStorage::InvalidReg() : LoadHelper(helper_offset);
}
/* NOTE: if r_tgt is a temp, it will be freed following use */
-LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc, bool use_link) {
+LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset helper_offset, bool safepoint_pc,
+ bool use_link) {
LIR* call_inst;
OpKind op = use_link ? kOpBlx : kOpBx;
if (cu_->instruction_set == kX86) {
@@ -87,14 +88,14 @@
}
void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
LoadConstant(TargetReg(kArg0), arg0);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
-void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, RegStorage arg0, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
OpRegCopy(TargetReg(kArg0), arg0);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
@@ -102,12 +103,12 @@
void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
- if (arg0.wide) {
- LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
- arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
+ if (arg0.wide == 0) {
+ LoadValueDirectFixed(arg0, TargetReg(kArg0));
} else {
- LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+ RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
+ LoadValueDirectWideFixed(arg0, r_tmp);
}
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
@@ -115,7 +116,7 @@
void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
LoadConstant(TargetReg(kArg0), arg0);
LoadConstant(TargetReg(kArg1), arg1);
ClobberCallerSave();
@@ -124,11 +125,12 @@
void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
RegLocation arg1, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
if (arg1.wide == 0) {
LoadValueDirectFixed(arg1, TargetReg(kArg1));
} else {
- LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
+ RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
+ LoadValueDirectWideFixed(arg1, r_tmp);
}
LoadConstant(TargetReg(kArg0), arg0);
ClobberCallerSave();
@@ -137,25 +139,25 @@
void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
LoadValueDirectFixed(arg0, TargetReg(kArg0));
LoadConstant(TargetReg(kArg1), arg1);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
-void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, RegStorage arg1,
bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
OpRegCopy(TargetReg(kArg1), arg1);
LoadConstant(TargetReg(kArg0), arg0);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
-void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, RegStorage arg0, int arg1,
bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
OpRegCopy(TargetReg(kArg0), arg0);
LoadConstant(TargetReg(kArg1), arg1);
ClobberCallerSave();
@@ -163,16 +165,17 @@
}
void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
LoadCurrMethodDirect(TargetReg(kArg1));
LoadConstant(TargetReg(kArg0), arg0);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
-void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg1), arg0);
+void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, RegStorage arg0,
+ bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
+ DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
if (TargetReg(kArg0) != arg0) {
OpRegCopy(TargetReg(kArg0), arg0);
}
@@ -181,10 +184,10 @@
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
-void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, RegStorage arg0,
RegLocation arg2, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg1), arg0);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
+ DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
if (TargetReg(kArg0) != arg0) {
OpRegCopy(TargetReg(kArg0), arg0);
}
@@ -196,7 +199,7 @@
void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
RegLocation arg1, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
if (arg0.wide == 0) {
LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
if (arg1.wide == 0) {
@@ -207,37 +210,56 @@
}
} else {
if (cu_->instruction_set == kMips) {
- LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
+ RegStorage r_tmp;
+ if (arg1.fp) {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
+ } else {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
+ }
+ LoadValueDirectWideFixed(arg1, r_tmp);
} else {
- LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
+ RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
+ LoadValueDirectWideFixed(arg1, r_tmp);
}
}
} else {
- LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ RegStorage r_tmp;
+ if (arg0.fp) {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg0), TargetReg(kFArg1));
+ } else {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
+ }
+ LoadValueDirectWideFixed(arg0, r_tmp);
if (arg1.wide == 0) {
LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
} else {
- LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
+ RegStorage r_tmp;
+ if (arg1.fp) {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
+ } else {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ }
+ LoadValueDirectWideFixed(arg1, r_tmp);
}
}
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
-void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
- bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
+void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, RegStorage arg0,
+ RegStorage arg1, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
+ DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg()); // check copy into arg0 won't clobber arg1
OpRegCopy(TargetReg(kArg0), arg0);
OpRegCopy(TargetReg(kArg1), arg1);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
-void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
- int arg2, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
+void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, RegStorage arg0,
+ RegStorage arg1, int arg2, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
+ DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg()); // check copy into arg0 won't clobber arg1
OpRegCopy(TargetReg(kArg0), arg0);
OpRegCopy(TargetReg(kArg1), arg1);
LoadConstant(TargetReg(kArg2), arg2);
@@ -247,7 +269,7 @@
void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
int arg0, RegLocation arg2, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
LoadValueDirectFixed(arg2, TargetReg(kArg2));
LoadCurrMethodDirect(TargetReg(kArg1));
LoadConstant(TargetReg(kArg0), arg0);
@@ -257,7 +279,7 @@
void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
int arg2, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
LoadCurrMethodDirect(TargetReg(kArg1));
LoadConstant(TargetReg(kArg2), arg2);
LoadConstant(TargetReg(kArg0), arg0);
@@ -268,13 +290,14 @@
void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
int arg0, RegLocation arg1,
RegLocation arg2, bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
DCHECK_EQ(arg1.wide, 0U);
LoadValueDirectFixed(arg1, TargetReg(kArg1));
if (arg2.wide == 0) {
LoadValueDirectFixed(arg2, TargetReg(kArg2));
} else {
- LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
+ RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ LoadValueDirectWideFixed(arg2, r_tmp);
}
LoadConstant(TargetReg(kArg0), arg0);
ClobberCallerSave();
@@ -285,7 +308,7 @@
RegLocation arg0, RegLocation arg1,
RegLocation arg2,
bool safepoint_pc) {
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
DCHECK_EQ(arg0.wide, 0U);
LoadValueDirectFixed(arg0, TargetReg(kArg0));
DCHECK_EQ(arg1.wide, 0U);
@@ -312,9 +335,9 @@
*/
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0));
+ rl_src.reg = TargetReg(kArg0);
rl_src.home = false;
- MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
+ MarkLive(rl_src.reg, rl_src.s_reg_low);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
@@ -340,17 +363,17 @@
*/
for (int i = 0; i < cu_->num_ins; i++) {
PromotionMap* v_map = &promotion_map_[start_vreg + i];
- int reg = GetArgMappingToPhysicalReg(i);
+ RegStorage reg = GetArgMappingToPhysicalReg(i);
- if (reg != INVALID_REG) {
+ if (reg.Valid()) {
// If arriving in register
bool need_flush = true;
RegLocation* t_loc = &ArgLocs[i];
if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
- OpRegCopy(v_map->core_reg, reg);
+ OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
need_flush = false;
} else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
- OpRegCopy(v_map->FpReg, reg);
+ OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg);
need_flush = false;
} else {
need_flush = true;
@@ -386,11 +409,10 @@
// If arriving in frame & promoted
if (v_map->core_location == kLocPhysReg) {
LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
- v_map->core_reg);
+ RegStorage::Solo32(v_map->core_reg));
}
if (v_map->fp_location == kLocPhysReg) {
- LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
- v_map->FpReg);
+ LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg));
}
}
}
@@ -433,7 +455,8 @@
break;
case 1: // Get method->dex_cache_resolved_methods_
cg->LoadWordDisp(cg->TargetReg(kArg0),
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
+ mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ cg->TargetReg(kArg0));
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<unsigned int>(-1)) {
@@ -448,8 +471,7 @@
CHECK_EQ(cu->dex_file, target_method.dex_file);
cg->LoadWordDisp(cg->TargetReg(kArg0),
mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- (target_method.dex_method_index * 4),
- cg-> TargetReg(kArg0));
+ (target_method.dex_method_index * 4), cg->TargetReg(kArg0));
break;
case 3: // Grab the code from the method*
if (cu->instruction_set != kX86) {
@@ -643,8 +665,8 @@
const MethodReference& target_method,
uint32_t vtable_idx, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type, bool skip_this) {
- int last_arg_reg = TargetReg(kArg3);
- int next_reg = TargetReg(kArg1);
+ int last_arg_reg = TargetReg(kArg3).GetReg();
+ int next_reg = TargetReg(kArg1).GetReg();
int next_arg = 0;
if (skip_this) {
next_reg++;
@@ -653,16 +675,17 @@
for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
RegLocation rl_arg = info->args[next_arg++];
rl_arg = UpdateRawLoc(rl_arg);
- if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
- LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
+ if (rl_arg.wide && (next_reg <= TargetReg(kArg2).GetReg())) {
+ RegStorage r_tmp(RegStorage::k64BitPair, next_reg, next_reg + 1);
+ LoadValueDirectWideFixed(rl_arg, r_tmp);
next_reg++;
next_arg++;
} else {
if (rl_arg.wide) {
- rl_arg.wide = false;
+ rl_arg = NarrowRegLoc(rl_arg);
rl_arg.is_const = false;
}
- LoadValueDirectFixed(rl_arg, next_reg);
+ LoadValueDirectFixed(rl_arg, RegStorage::Solo32(next_reg));
}
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
@@ -698,13 +721,12 @@
RegLocation rl_use0 = info->args[0];
RegLocation rl_use1 = info->args[1];
RegLocation rl_use2 = info->args[2];
- if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
- rl_use2.wide) {
- int reg = -1;
+ if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && rl_use2.wide) {
+ RegStorage reg;
// Wide spans, we need the 2nd half of uses[2].
rl_arg = UpdateLocWide(rl_use2);
if (rl_arg.location == kLocPhysReg) {
- reg = rl_arg.reg.GetHighReg();
+ reg = rl_arg.reg.GetHigh();
} else {
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
@@ -719,20 +741,22 @@
}
// Loop through the rest
while (next_use < info->num_arg_words) {
- int low_reg;
- int high_reg = -1;
+ RegStorage low_reg;
+ RegStorage high_reg;
rl_arg = info->args[next_use];
rl_arg = UpdateRawLoc(rl_arg);
if (rl_arg.location == kLocPhysReg) {
- low_reg = rl_arg.reg.GetReg();
if (rl_arg.wide) {
- high_reg = rl_arg.reg.GetHighReg();
+ low_reg = rl_arg.reg.GetLow();
+ high_reg = rl_arg.reg.GetHigh();
+ } else {
+ low_reg = rl_arg.reg;
}
} else {
low_reg = TargetReg(kArg2);
if (rl_arg.wide) {
high_reg = TargetReg(kArg3);
- LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
+ LoadValueDirectWideFixed(rl_arg, RegStorage::MakeRegPair(low_reg, high_reg));
} else {
LoadValueDirectFixed(rl_arg, low_reg);
}
@@ -741,7 +765,7 @@
}
int outs_offset = (next_use + 1) * 4;
if (rl_arg.wide) {
- StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
+ StoreBaseDispWide(TargetReg(kSp), outs_offset, RegStorage::MakeRegPair(low_reg, high_reg));
next_use += 2;
} else {
StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
@@ -799,15 +823,13 @@
if (loc.wide) {
loc = UpdateLocWide(loc);
if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
- StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.reg.GetReg(), loc.reg.GetHighReg());
+ StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.reg.GetReg(), kWord);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
}
next_arg++;
}
@@ -866,8 +888,8 @@
// Allocate a free xmm temp. Since we are working through the calling sequence,
// we expect to have an xmm temporary available.
- int temp = AllocTempDouble();
- CHECK_GT(temp, 0);
+ RegStorage temp = AllocTempDouble();
+ CHECK_GT(temp.GetLowReg(), 0);
LIR* ld1 = nullptr;
LIR* ld2 = nullptr;
@@ -888,7 +910,8 @@
ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
} else if (src_is_8b_aligned) {
ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
- ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), kMovHi128FP);
+ ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1),
+ kMovHi128FP);
} else {
ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
}
@@ -897,7 +920,8 @@
st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
} else if (dest_is_8b_aligned) {
st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
- st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP);
+ st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1),
+ temp, kMovHi128FP);
} else {
st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
}
@@ -928,14 +952,16 @@
}
// Free the temporary used for the data movement.
- FreeTemp(temp);
+ // CLEANUP: temp is currently a bogus pair, elmiminate extra free when updated.
+ FreeTemp(temp.GetLow());
+ FreeTemp(temp.GetHigh());
} else {
// Moving 32-bits via general purpose register.
bytes_to_move = sizeof(uint32_t);
// Instead of allocating a new temp, simply reuse one of the registers being used
// for argument passing.
- int temp = TargetReg(kArg3);
+ RegStorage temp = TargetReg(kArg3);
// Now load the argument VR and store to the outs.
LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
@@ -1007,26 +1033,26 @@
if (!(cu_->instruction_set == kX86 && rl_idx.is_const)) {
rl_idx = LoadValue(rl_idx, kCoreReg);
}
- int reg_max;
- GenNullCheck(rl_obj.reg.GetReg(), info->opt_flags);
+ RegStorage reg_max;
+ GenNullCheck(rl_obj.reg, info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* range_check_branch = nullptr;
- int reg_off = INVALID_REG;
- int reg_ptr = INVALID_REG;
+ RegStorage reg_off;
+ RegStorage reg_ptr;
if (cu_->instruction_set != kX86) {
reg_off = AllocTemp();
reg_ptr = AllocTemp();
if (range_check) {
reg_max = AllocTemp();
- LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
+ LoadWordDisp(rl_obj.reg, count_offset, reg_max);
MarkPossibleNullPointerException(info->opt_flags);
}
- LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
+ LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
MarkPossibleNullPointerException(info->opt_flags);
- LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
- OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
+ OpRegReg(kOpCmp, rl_idx.reg, reg_max);
FreeTemp(reg_max);
range_check_branch = OpCondBranch(kCondUge, nullptr);
}
@@ -1037,34 +1063,34 @@
// Set up a launch pad to allow retry in case of bounds violation */
if (rl_idx.is_const) {
range_check_branch = OpCmpMemImmBranch(
- kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
+ kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
} else {
- OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
+ OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
range_check_branch = OpCondBranch(kCondUge, nullptr);
}
}
reg_off = AllocTemp();
reg_ptr = AllocTemp();
- LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
- LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
+ LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
}
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
} else {
- OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg());
+ OpRegReg(kOpAdd, reg_off, rl_idx.reg);
}
- FreeTemp(rl_obj.reg.GetReg());
+ FreeTemp(rl_obj.reg);
if (rl_idx.location == kLocPhysReg) {
- FreeTemp(rl_idx.reg.GetReg());
+ FreeTemp(rl_idx.reg);
}
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set != kX86) {
- LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf);
+ LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
} else {
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(),
- INVALID_REG, kUnsignedHalf, INVALID_SREG);
+ LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg,
+ RegStorage::InvalidReg(), kUnsignedHalf, INVALID_SREG);
}
FreeTemp(reg_off);
FreeTemp(reg_ptr);
@@ -1088,20 +1114,19 @@
rl_obj = LoadValue(rl_obj, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- GenNullCheck(rl_obj.reg.GetReg(), info->opt_flags);
- LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(),
- rl_result.reg.GetReg());
+ GenNullCheck(rl_obj.reg, info->opt_flags);
+ LoadWordDisp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
MarkPossibleNullPointerException(info->opt_flags);
if (is_empty) {
// dst = (dst == 0);
if (cu_->instruction_set == kThumb2) {
- int t_reg = AllocTemp();
- OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg());
- OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg);
+ RegStorage t_reg = AllocTemp();
+ OpRegReg(kOpNeg, t_reg, rl_result.reg);
+ OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
} else {
DCHECK_EQ(cu_->instruction_set, kX86);
- OpRegImm(kOpSub, rl_result.reg.GetReg(), 1);
- OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31);
+ OpRegImm(kOpSub, rl_result.reg, 1);
+ OpRegImm(kOpLsr, rl_result.reg, 31);
}
}
StoreValue(rl_dest, rl_result);
@@ -1118,15 +1143,15 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
- int r_i_low = rl_i.reg.GetReg();
- if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
+ RegStorage r_i_low = rl_i.reg.GetLow();
+ if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
// First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
r_i_low = AllocTemp();
- OpRegCopy(r_i_low, rl_i.reg.GetReg());
+ OpRegCopy(r_i_low, rl_i.reg);
}
- OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg());
- OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low);
- if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
+ OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
+ OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
+ if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
FreeTemp(r_i_low);
}
StoreValueWide(rl_dest, rl_result);
@@ -1134,7 +1159,7 @@
DCHECK(size == kWord || size == kSignedHalf);
OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
- OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg());
+ OpRegReg(op, rl_result.reg, rl_i.reg);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1149,11 +1174,11 @@
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int sign_reg = AllocTemp();
+ RegStorage sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
+ OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
+ OpRegReg(kOpXor, rl_result.reg, sign_reg);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -1169,27 +1194,25 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
// If on x86 or if we would clobber a register needed later, just copy the source first.
- if (cu_->instruction_set == kX86 || rl_result.reg.GetReg() == rl_src.reg.GetHighReg()) {
- OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
- rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
- if (rl_result.reg.GetReg() != rl_src.reg.GetReg() &&
- rl_result.reg.GetReg() != rl_src.reg.GetHighReg() &&
- rl_result.reg.GetHighReg() != rl_src.reg.GetReg() &&
+ if (cu_->instruction_set == kX86 || rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
+ OpRegCopyWide(rl_result.reg, rl_src.reg);
+ if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
+ rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
+ rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
// Reuse source registers to avoid running out of temps.
- FreeTemp(rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg);
}
rl_src = rl_result;
}
// abs(x) = y<=x>>31, (x+y)^y.
- int sign_reg = AllocTemp();
- OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
- OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
+ RegStorage sign_reg = AllocTemp();
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
+ OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -1203,7 +1226,7 @@
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x7fffffff);
+ OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -1217,8 +1240,8 @@
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
- OpRegImm(kOpAnd, rl_result.reg.GetHighReg(), 0x7fffffff);
+ OpRegCopyWide(rl_result.reg, rl_src.reg);
+ OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -1263,9 +1286,9 @@
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
- int reg_ptr = TargetReg(kArg0);
- int reg_char = TargetReg(kArg1);
- int reg_start = TargetReg(kArg2);
+ RegStorage reg_ptr = TargetReg(kArg0);
+ RegStorage reg_char = TargetReg(kArg1);
+ RegStorage reg_start = TargetReg(kArg2);
LoadValueDirectFixed(rl_obj, reg_ptr);
LoadValueDirectFixed(rl_char, reg_char);
@@ -1275,7 +1298,7 @@
RegLocation rl_start = info->args[2]; // 3rd arg only present in III flavor of IndexOf.
LoadValueDirectFixed(rl_start, reg_start);
}
- int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf));
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf));
GenNullCheck(reg_ptr, info->opt_flags);
LIR* high_code_point_branch =
rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
@@ -1305,15 +1328,15 @@
}
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
- int reg_this = TargetReg(kArg0);
- int reg_cmp = TargetReg(kArg1);
+ RegStorage reg_this = TargetReg(kArg0);
+ RegStorage reg_cmp = TargetReg(kArg1);
RegLocation rl_this = info->args[0];
RegLocation rl_cmp = info->args[1];
LoadValueDirectFixed(rl_this, reg_this);
LoadValueDirectFixed(rl_cmp, reg_cmp);
- int r_tgt = (cu_->instruction_set != kX86) ?
- LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+ RegStorage r_tgt = (cu_->instruction_set != kX86) ?
+ LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : RegStorage::InvalidReg();
GenNullCheck(reg_this, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// TUNING: check if rl_cmp.s_reg_low is already null checked
@@ -1336,7 +1359,7 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
ThreadOffset offset = Thread::PeerOffset();
if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
- LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg());
+ LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
} else {
CHECK(cu_->instruction_set == kX86);
reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
@@ -1354,20 +1377,29 @@
// Unused - RegLocation rl_src_unsafe = info->args[0];
RegLocation rl_src_obj = info->args[1]; // Object
RegLocation rl_src_offset = info->args[2]; // long low
- rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info); // result reg
- if (is_volatile) {
- GenMemBarrier(kLoadLoad);
- }
+
RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_long) {
- OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
- LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ OpRegReg(kOpAdd, rl_object.reg, rl_offset.reg);
+ LoadBaseDispWide(rl_object.reg, 0, rl_result.reg, INVALID_SREG);
+ } else {
+ LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, kWord);
+ }
+
+ if (is_volatile) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
+ }
+
+ if (is_long) {
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1382,9 +1414,10 @@
// Unused - RegLocation rl_src_unsafe = info->args[0];
RegLocation rl_src_obj = info->args[1]; // Object
RegLocation rl_src_offset = info->args[2]; // long low
- rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
RegLocation rl_src_value = info->args[4]; // value to store
if (is_volatile || is_ordered) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
@@ -1392,20 +1425,22 @@
RegLocation rl_value;
if (is_long) {
rl_value = LoadValueWide(rl_src_value, kCoreReg);
- OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
- StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
+ OpRegReg(kOpAdd, rl_object.reg, rl_offset.reg);
+ StoreBaseDispWide(rl_object.reg, 0, rl_value.reg);
} else {
rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord);
+ StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, kWord);
}
// Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
FreeTemp(rl_offset.reg.GetReg());
+
if (is_volatile) {
+ // A load might follow the volatile store so insert a StoreLoad barrier.
GenMemBarrier(kStoreLoad);
}
if (is_object) {
- MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg());
+ MarkGCCard(rl_value.reg, rl_object.reg);
}
return true;
}
@@ -1417,7 +1452,7 @@
((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
(info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0)) {
RegLocation rl_obj = LoadValue(info->args[0], kCoreReg);
- GenImmedCheck(kCondEq, rl_obj.reg.GetReg(), 0, kThrowNullPointer);
+ GenImmedCheck(kCondEq, rl_obj.reg, 0, kThrowNullPointer);
}
return;
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 3b79df9..36d6199 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -27,7 +27,7 @@
* Load an immediate value into a fixed or temp register. Target
* register is clobbered, and marked in_use.
*/
-LIR* Mir2Lir::LoadConstant(int r_dest, int value) {
+LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
if (IsTemp(r_dest)) {
Clobber(r_dest);
MarkInUse(r_dest);
@@ -40,7 +40,7 @@
* promoted floating point register, also copy a zero into the int/ref identity of
* that sreg.
*/
-void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg) {
+void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
if (rl_dest.fp) {
int pmap_index = SRegToPMap(rl_dest.s_reg_low);
if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
@@ -55,19 +55,19 @@
if (!used_as_reference) {
return;
}
- int temp_reg = zero_reg;
- if (temp_reg == INVALID_REG) {
+ RegStorage temp_reg = zero_reg;
+ if (!temp_reg.Valid()) {
temp_reg = AllocTemp();
LoadConstant(temp_reg, 0);
}
if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
// Promoted - just copy in a zero
- OpRegCopy(promotion_map_[pmap_index].core_reg, temp_reg);
+ OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
} else {
// Lives in the frame, need to store.
StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, kWord);
}
- if (zero_reg == INVALID_REG) {
+ if (!zero_reg.Valid()) {
FreeTemp(temp_reg);
}
}
@@ -75,13 +75,12 @@
}
/* Load a word at base + displacement. Displacement must be word multiple */
-LIR* Mir2Lir::LoadWordDisp(int rBase, int displacement, int r_dest) {
- return LoadBaseDisp(rBase, displacement, r_dest, kWord,
- INVALID_SREG);
+LIR* Mir2Lir::LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
+ return LoadBaseDisp(r_base, displacement, r_dest, kWord, INVALID_SREG);
}
-LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) {
- return StoreBaseDisp(rBase, displacement, r_src, kWord);
+LIR* Mir2Lir::StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseDisp(r_base, displacement, r_src, kWord);
}
/*
@@ -89,10 +88,10 @@
* using this routine, as it doesn't perform any bookkeeping regarding
* register liveness. That is the responsibility of the caller.
*/
-void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) {
+void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
rl_src = UpdateLoc(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(r_dest, rl_src.reg.GetReg());
+ OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
@@ -107,7 +106,7 @@
* register. Should be used when loading to a fixed register (for example,
* loading arguments to an out of line call.
*/
-void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest) {
+void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest) {
Clobber(r_dest);
MarkInUse(r_dest);
LoadValueDirect(rl_src, r_dest);
@@ -118,18 +117,16 @@
* using this routine, as it doesn't perform any bookkeeping regarding
* register liveness. That is the responsibility of the caller.
*/
-void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo,
- int reg_hi) {
+void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest) {
rl_src = UpdateLocWide(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopyWide(reg_lo, reg_hi, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopyWide(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
- LoadConstantWide(reg_lo, reg_hi, mir_graph_->ConstantValueWide(rl_src));
+ LoadConstantWide(r_dest, mir_graph_->ConstantValueWide(rl_src));
} else {
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- LoadBaseDispWide(TargetReg(kSp), SRegOffset(rl_src.s_reg_low),
- reg_lo, reg_hi, INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, INVALID_SREG);
}
}
@@ -138,21 +135,18 @@
* registers. Should be used when loading to a fixed registers (for example,
* loading arguments to an out of line call.
*/
-void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo,
- int reg_hi) {
- Clobber(reg_lo);
- Clobber(reg_hi);
- MarkInUse(reg_lo);
- MarkInUse(reg_hi);
- LoadValueDirectWide(rl_src, reg_lo, reg_hi);
+void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest) {
+ Clobber(r_dest);
+ MarkInUse(r_dest);
+ LoadValueDirectWide(rl_src, r_dest);
}
RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) {
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirect(rl_src, rl_src.reg.GetReg());
+ LoadValueDirect(rl_src, rl_src.reg);
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
+ MarkLive(rl_src.reg, rl_src.s_reg_low);
}
return rl_src;
}
@@ -175,34 +169,32 @@
rl_src = UpdateLoc(rl_src);
rl_dest = UpdateLoc(rl_dest);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.reg.GetReg()) ||
- IsPromoted(rl_src.reg.GetReg()) ||
+ if (IsLive(rl_src.reg) ||
+ IsPromoted(rl_src.reg) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live/promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_dest.reg, rl_src.reg);
} else {
// Just re-assign the registers. Dest gets Src's regs
rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.reg);
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirect(rl_src, rl_dest.reg.GetReg());
+ LoadValueDirect(rl_src, rl_dest.reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg, rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.reg.GetReg()) &&
- oat_live_out(rl_dest.s_reg_low)) {
+ if (IsDirty(rl_dest.reg) && oat_live_out(rl_dest.s_reg_low)) {
def_start = last_lir_insn_;
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), kWord);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kWord);
MarkClean(rl_dest);
def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -216,10 +208,10 @@
DCHECK(rl_src.wide);
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirectWide(rl_src, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ LoadValueDirectWide(rl_src, rl_src.reg);
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
- MarkLive(rl_src.reg.GetHighReg(), GetSRegHi(rl_src.s_reg_low));
+ MarkLive(rl_src.reg.GetLow(), rl_src.s_reg_low);
+ MarkLive(rl_src.reg.GetHigh(), GetSRegHi(rl_src.s_reg_low));
}
return rl_src;
}
@@ -237,59 +229,51 @@
}
LIR* def_start;
LIR* def_end;
- DCHECK((rl_src.location != kLocPhysReg) ||
- (IsFpReg(rl_src.reg.GetReg()) == IsFpReg(rl_src.reg.GetHighReg())));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
rl_src = UpdateLocWide(rl_src);
rl_dest = UpdateLocWide(rl_dest);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.reg.GetReg()) ||
- IsLive(rl_src.reg.GetHighReg()) ||
- IsPromoted(rl_src.reg.GetReg()) ||
- IsPromoted(rl_src.reg.GetHighReg()) ||
+ if (IsLive(rl_src.reg) ||
+ IsPromoted(rl_src.reg) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live or promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(),
- rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopyWide(rl_dest.reg, rl_src.reg);
} else {
// Just re-assign the registers. Dest gets Src's regs
rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
- Clobber(rl_src.reg.GetHighReg());
+ Clobber(rl_src.reg);
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirectWide(rl_src, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ LoadValueDirectWide(rl_src, rl_dest.reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetLow(), rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
- MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
+ // FIXME: wide reg update.
+ if (rl_dest.reg.GetLowReg() != rl_dest.reg.GetHighReg()) {
+ MarkLive(rl_dest.reg.GetHigh(), GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ MarkPair(rl_dest.reg.GetLowReg(), rl_dest.reg.GetHighReg());
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.reg.GetReg()) ||
- IsDirty(rl_dest.reg.GetHighReg())) &&
- (oat_live_out(rl_dest.s_reg_low) ||
+ if (IsDirty(rl_dest.reg) && (oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
- StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
MarkClean(rl_dest);
def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -300,25 +284,24 @@
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_dest.reg, rl_src.reg);
} else {
// Just re-assign the register. Dest gets Src's reg.
rl_dest.location = kLocPhysReg;
rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg, rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.reg.GetReg()) &&
+ if (IsDirty(rl_dest.reg) &&
oat_live_out(rl_dest.s_reg_low)) {
LIR *def_start = last_lir_insn_;
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), kWord);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kWord);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -329,45 +312,43 @@
}
void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
- DCHECK_EQ(IsFpReg(rl_src.reg.GetReg()), IsFpReg(rl_src.reg.GetHighReg()));
+ DCHECK_EQ(IsFpReg(rl_src.reg.GetLowReg()), IsFpReg(rl_src.reg.GetHighReg()));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopyWide(rl_dest.reg, rl_src.reg);
} else {
// Just re-assign the registers. Dest gets Src's regs.
rl_dest.location = kLocPhysReg;
rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.reg.GetLowReg());
Clobber(rl_src.reg.GetHighReg());
}
// Dest is now live and dirty (until/if we flush it to home location).
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetLow(), rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
- MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
+ // FIXME: wide reg.
+ if (rl_dest.reg.GetLowReg() != rl_dest.reg.GetHighReg()) {
+ MarkLive(rl_dest.reg.GetHigh(), GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ MarkPair(rl_dest.reg.GetLowReg(), rl_dest.reg.GetHighReg());
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.reg.GetReg()) ||
- IsDirty(rl_dest.reg.GetHighReg())) &&
- (oat_live_out(rl_dest.s_reg_low) ||
+ if (IsDirty(rl_dest.reg) && (oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
LIR *def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
- StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -375,7 +356,7 @@
}
/* Utilities to load the current Method* */
-void Mir2Lir::LoadCurrMethodDirect(int r_tgt) {
+void Mir2Lir::LoadCurrMethodDirect(RegStorage r_tgt) {
LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt);
}
@@ -386,13 +367,13 @@
RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
DCHECK(!loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.reg.GetReg()));
- if (IsTemp(loc.reg.GetReg())) {
- Clobber(loc.reg.GetReg());
+ DCHECK(!IsFpReg(loc.reg));
+ if (IsTemp(loc.reg)) {
+ Clobber(loc.reg);
} else {
- int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.reg.GetReg());
- loc.reg.SetReg(temp_low);
+ RegStorage temp_low = AllocTemp();
+ OpRegCopy(temp_low, loc.reg);
+ loc.reg = temp_low;
}
// Ensure that this doesn't represent the original SR any more.
@@ -400,24 +381,25 @@
return loc;
}
+// FIXME: wide regs.
RegLocation Mir2Lir::ForceTempWide(RegLocation loc) {
DCHECK(loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.reg.GetReg()));
+ DCHECK(!IsFpReg(loc.reg.GetLowReg()));
DCHECK(!IsFpReg(loc.reg.GetHighReg()));
- if (IsTemp(loc.reg.GetReg())) {
- Clobber(loc.reg.GetReg());
+ if (IsTemp(loc.reg.GetLowReg())) {
+ Clobber(loc.reg.GetLowReg());
} else {
- int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.reg.GetReg());
- loc.reg.SetReg(temp_low);
+ RegStorage temp_low = AllocTemp();
+ OpRegCopy(temp_low, loc.reg.GetLow());
+ loc.reg.SetLowReg(temp_low.GetReg());
}
if (IsTemp(loc.reg.GetHighReg())) {
Clobber(loc.reg.GetHighReg());
} else {
- int temp_high = AllocTemp();
- OpRegCopy(temp_high, loc.reg.GetHighReg());
- loc.reg.SetHighReg(temp_high);
+ RegStorage temp_high = AllocTemp();
+ OpRegCopy(temp_high, loc.reg.GetHigh());
+ loc.reg.SetHighReg(temp_high.GetReg());
}
// Ensure that this doesn't represent the original SR any more.
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 6df91e6..8f64408 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -39,7 +39,7 @@
}
/* Convert a more expensive instruction (ie load) into a move */
-void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) {
+void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src) {
/* Insert a move to replace the load */
LIR* move_lir;
move_lir = OpRegCopyNoInsert(dest, src);
@@ -92,7 +92,10 @@
((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) || // Skip wide loads.
((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
(REG_USE0 | REG_USE1 | REG_USE2)) || // Skip wide stores.
- !(target_flags & (IS_LOAD | IS_STORE))) {
+ // Skip instructions that are neither loads or stores.
+ !(target_flags & (IS_LOAD | IS_STORE)) ||
+ // Skip instructions that do both load and store.
+ ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
continue;
}
@@ -166,7 +169,9 @@
* a move
*/
if (check_lir->operands[0] != native_reg_id) {
- ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
+ // TODO: update for 64-bit regs.
+ ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
+ RegStorage::Solo32(native_reg_id));
}
NopLIR(check_lir);
}
@@ -183,9 +188,10 @@
* Different destination register -
* insert a move
*/
- if (check_lir->operands[0] !=
- native_reg_id) {
- ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
+ if (check_lir->operands[0] != native_reg_id) {
+ // TODO: update for 64-bit regs.
+ ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
+ RegStorage::Solo32(native_reg_id));
}
NopLIR(check_lir);
} else {
@@ -293,7 +299,8 @@
/* Skip non-interesting instructions */
if (!(target_flags & IS_LOAD) ||
(this_lir->flags.is_nop == true) ||
- ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1))) {
+ ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
+ ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
continue;
}
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index bd3355f..ee142e5 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -432,12 +432,12 @@
* Long conditional branch
* -----------------------
* bne rs,rt,hop
- * bal .+8 ; r_RA <- anchor
- * lui r_AT, ((target-anchor) >> 16)
+ * bal .+8 ; rRA <- anchor
+ * lui rAT, ((target-anchor) >> 16)
* anchor:
- * ori r_AT, r_AT, ((target-anchor) & 0xffff)
- * addu r_AT, r_AT, r_RA
- * jr r_AT
+ * ori rAT, rAT, ((target-anchor) & 0xffff)
+ * addu rAT, rAT, rRA
+ * jr rAT
* hop:
*
* Orig unconditional branch
@@ -446,12 +446,12 @@
*
* Long unconditional branch
* -----------------------
- * bal .+8 ; r_RA <- anchor
- * lui r_AT, ((target-anchor) >> 16)
+ * bal .+8 ; rRA <- anchor
+ * lui rAT, ((target-anchor) >> 16)
* anchor:
- * ori r_AT, r_AT, ((target-anchor) & 0xffff)
- * addu r_AT, r_AT, r_RA
- * jr r_AT
+ * ori rAT, rAT, ((target-anchor) & 0xffff)
+ * addu rAT, rAT, rRA
+ * jr rAT
*
*
* NOTE: An out-of-range bal isn't supported because it should
@@ -489,16 +489,16 @@
LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC);
InsertLIRBefore(lir, curr_pc);
LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
- LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0, WrapPointer(anchor), 0, 0,
+ LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, rAT, 0, WrapPointer(anchor), 0, 0,
lir->target);
InsertLIRBefore(lir, delta_hi);
InsertLIRBefore(lir, anchor);
- LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0, WrapPointer(anchor), 0, 0,
+ LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, rAT, 0, WrapPointer(anchor), 0, 0,
lir->target);
InsertLIRBefore(lir, delta_lo);
- LIR* addu = RawLIR(dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
+ LIR* addu = RawLIR(dalvik_offset, kMipsAddu, rAT, rAT, rRA);
InsertLIRBefore(lir, addu);
- LIR* jr = RawLIR(dalvik_offset, kMipsJr, r_AT);
+ LIR* jr = RawLIR(dalvik_offset, kMipsJr, rAT);
InsertLIRBefore(lir, jr);
if (!unconditional) {
InsertLIRBefore(lir, hop_target);
@@ -559,7 +559,7 @@
InsertLIRBefore(lir, new_delta_lo);
LIR *new_addu =
RawLIR(lir->dalvik_offset, kMipsAddu,
- lir->operands[0], lir->operands[0], r_RA);
+ lir->operands[0], lir->operands[0], rRA);
InsertLIRBefore(lir, new_addu);
NopLIR(lir);
res = kRetryAll;
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 95fd6e7..972457a 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -32,8 +32,8 @@
/*
* The lack of pc-relative loads on Mips presents somewhat of a challenge
* for our PIC switch table strategy. To materialize the current location
- * we'll do a dummy JAL and reference our tables using r_RA as the
- * base register. Note that r_RA will be used both as the base to
+ * we'll do a dummy JAL and reference our tables using rRA as the
+ * base register. Note that rRA will be used both as the base to
* locate the switch table data and as the reference base for the switch
* target offsets stored in the table. We'll use a special pseudo-instruction
* to represent the jal and trigger the construction of the
@@ -42,21 +42,21 @@
*
* The test loop will look something like:
*
- * ori rEnd, r_ZERO, #table_size ; size in bytes
- * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
+ * ori r_end, rZERO, #table_size ; size in bytes
+ * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
* nop ; opportunistically fill
* BaseLabel:
- * addiu rBase, r_RA, <table> - <BaseLabel> ; table relative to BaseLabel
- addu rEnd, rEnd, rBase ; end of table
+ * addiu r_base, rRA, <table> - <BaseLabel> ; table relative to BaseLabel
+ addu r_end, r_end, r_base ; end of table
* lw r_val, [rSP, v_reg_off] ; Test Value
* loop:
- * beq rBase, rEnd, done
- * lw r_key, 0(rBase)
- * addu rBase, 8
+ * beq r_base, r_end, done
+ * lw r_key, 0(r_base)
+ * addu r_base, 8
* bne r_val, r_key, loop
- * lw r_disp, -4(rBase)
- * addu r_RA, r_disp
- * jr r_RA
+ * lw r_disp, -4(r_base)
+ * addu rRA, r_disp
+ * jr rRA
* done:
*
*/
@@ -82,18 +82,18 @@
int size_hi = byte_size >> 16;
int size_lo = byte_size & 0xffff;
- int rEnd = AllocTemp();
+ RegStorage r_end = AllocTemp();
if (size_hi) {
- NewLIR2(kMipsLui, rEnd, size_hi);
+ NewLIR2(kMipsLui, r_end.GetReg(), size_hi);
}
// Must prevent code motion for the curr pc pair
GenBarrier(); // Scheduling barrier
NewLIR0(kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot
if (size_hi) {
- NewLIR3(kMipsOri, rEnd, rEnd, size_lo);
+ NewLIR3(kMipsOri, r_end.GetReg(), r_end.GetReg(), size_lo);
} else {
- NewLIR3(kMipsOri, rEnd, r_ZERO, size_lo);
+ NewLIR3(kMipsOri, r_end.GetReg(), rZERO, size_lo);
}
GenBarrier(); // Scheduling barrier
@@ -101,24 +101,24 @@
LIR* base_label = NewLIR0(kPseudoTargetLabel);
// Remember base label so offsets can be computed later
tab_rec->anchor = base_label;
- int rBase = AllocTemp();
- NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
- OpRegRegReg(kOpAdd, rEnd, rEnd, rBase);
+ RegStorage r_base = AllocTemp();
+ NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
+ OpRegRegReg(kOpAdd, r_end, r_end, r_base);
// Grab switch test value
rl_src = LoadValue(rl_src, kCoreReg);
// Test loop
- int r_key = AllocTemp();
+ RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL);
- LoadWordDisp(rBase, 0, r_key);
- OpRegImm(kOpAdd, rBase, 8);
- OpCmpBranch(kCondNe, rl_src.reg.GetReg(), r_key, loop_label);
- int r_disp = AllocTemp();
- LoadWordDisp(rBase, -4, r_disp);
- OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
- OpReg(kOpBx, r_RA);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ LoadWordDisp(r_base, 0, r_key);
+ OpRegImm(kOpAdd, r_base, 8);
+ OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
+ RegStorage r_disp = AllocTemp();
+ LoadWordDisp(r_base, -4, r_disp);
+ OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
+ OpReg(kOpBx, rs_rRA);
// Loop exit
LIR* exit_label = NewLIR0(kPseudoTargetLabel);
@@ -129,13 +129,13 @@
* Code pattern will look something like:
*
* lw r_val
- * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
+ * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
* nop ; opportunistically fill
* [subiu r_val, bias] ; Remove bias if low_val != 0
* bound check -> done
- * lw r_disp, [r_RA, r_val]
- * addu r_RA, r_disp
- * jr r_RA
+ * lw r_disp, [rRA, r_val]
+ * addu rRA, r_disp
+ * jr rRA
* done:
*/
void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
@@ -160,9 +160,9 @@
// Prepare the bias. If too big, handle 1st stage here
int low_key = s4FromSwitchData(&table[2]);
bool large_bias = false;
- int r_key;
+ RegStorage r_key;
if (low_key == 0) {
- r_key = rl_src.reg.GetReg();
+ r_key = rl_src.reg;
} else if ((low_key & 0xffff) != low_key) {
r_key = AllocTemp();
LoadConstant(r_key, low_key);
@@ -179,9 +179,9 @@
NewLIR0(kMipsNop);
} else {
if (large_bias) {
- OpRegRegReg(kOpSub, r_key, rl_src.reg.GetReg(), r_key);
+ OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key);
} else {
- OpRegRegImm(kOpSub, r_key, rl_src.reg.GetReg(), low_key);
+ OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
}
}
GenBarrier(); // Scheduling barrier
@@ -195,16 +195,16 @@
LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
// Materialize the table base pointer
- int rBase = AllocTemp();
- NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
+ RegStorage r_base = AllocTemp();
+ NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
// Load the displacement from the switch table
- int r_disp = AllocTemp();
- LoadBaseIndexed(rBase, r_key, r_disp, 2, kWord);
+ RegStorage r_disp = AllocTemp();
+ LoadBaseIndexed(r_base, r_key, r_disp, 2, kWord);
- // Add to r_AP and go
- OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
- OpReg(kOpBx, r_RA);
+ // Add to rAP and go
+ OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
+ OpReg(kOpBx, rs_rRA);
/* branch_over target here */
LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -238,13 +238,13 @@
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
LockCallTemps();
- LoadValueDirectFixed(rl_src, rMIPS_ARG0);
+ LoadValueDirectFixed(rl_src, rs_rMIPS_ARG0);
// Must prevent code motion for the curr pc pair
GenBarrier();
NewLIR0(kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot with the helper load
- int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData));
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData));
GenBarrier(); // Scheduling barrier
// Construct BaseLabel and set up table base register
@@ -262,10 +262,10 @@
void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int reset_reg = AllocTemp();
- LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.reg.GetReg());
+ RegStorage reset_reg = AllocTemp();
+ LoadWordDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg);
LoadConstant(reset_reg, 0);
- StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg);
+ StoreWordDisp(rs_rMIPS_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
@@ -273,14 +273,13 @@
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) {
- int reg_card_base = AllocTemp();
- int reg_card_no = AllocTemp();
+void MipsMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+ RegStorage reg_card_base = AllocTemp();
+ RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
- LoadWordDisp(rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+ LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
- kUnsignedByte);
+ StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
branch_over->target = target;
FreeTemp(reg_card_base);
@@ -307,11 +306,11 @@
bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
(static_cast<size_t>(frame_size_) < Thread::kStackOverflowReservedBytes));
NewLIR0(kPseudoMethodEntry);
- int check_reg = AllocTemp();
- int new_sp = AllocTemp();
+ RegStorage check_reg = AllocTemp();
+ RegStorage new_sp = AllocTemp();
if (!skip_overflow_check) {
/* Load stack limit */
- LoadWordDisp(rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
+ LoadWordDisp(rs_rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
}
/* Spill core callee saves */
SpillCoreRegs();
@@ -329,24 +328,24 @@
m2l_->ResetDefTracking();
GenerateTargetLabel();
// LR is offset 0 since we push in reverse order.
- m2l_->LoadWordDisp(kMipsRegSP, 0, kMipsRegLR);
- m2l_->OpRegImm(kOpAdd, kMipsRegSP, sp_displace_);
+ m2l_->LoadWordDisp(rs_rMIPS_SP, 0, rs_rRA);
+ m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_);
m2l_->ClobberCallerSave();
ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
- int r_tgt = m2l_->CallHelperSetup(func_offset); // Doesn't clobber LR.
+ RegStorage r_tgt = m2l_->CallHelperSetup(func_offset); // Doesn't clobber LR.
m2l_->CallHelper(r_tgt, func_offset, false /* MarkSafepointPC */, false /* UseLink */);
}
private:
const size_t sp_displace_;
};
- OpRegRegImm(kOpSub, new_sp, rMIPS_SP, frame_sub);
+ OpRegRegImm(kOpSub, new_sp, rs_rMIPS_SP, frame_sub);
LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 4));
// TODO: avoid copy for small frame sizes.
- OpRegCopy(rMIPS_SP, new_sp); // Establish stack
+ OpRegCopy(rs_rMIPS_SP, new_sp); // Establish stack
} else {
- OpRegImm(kOpSub, rMIPS_SP, frame_sub);
+ OpRegImm(kOpSub, rs_rMIPS_SP, frame_sub);
}
FlushIns(ArgLocs, rl_method);
@@ -367,11 +366,11 @@
NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
- OpReg(kOpBx, r_RA);
+ OpReg(kOpBx, rs_rRA);
}
void MipsMir2Lir::GenSpecialExitSequence() {
- OpReg(kOpBx, r_RA);
+ OpReg(kOpBx, rs_rRA);
}
} // namespace art
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 28ebe0e..0ef43b3 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -22,38 +22,43 @@
namespace art {
-class MipsMir2Lir : public Mir2Lir {
+class MipsMir2Lir FINAL : public Mir2Lir {
public:
MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen utilities.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
- int LoadHelper(ThreadOffset offset);
+ RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
- LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
- int s_reg);
- LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
- LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_dest, int r_dest_hi, OpSize size, int s_reg);
- LIR* LoadConstantNoClobber(int r_dest, int value);
- LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
- LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
- LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
- LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
- LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg);
- void MarkGCCard(int val_reg, int tgt_addr_reg);
+ RegStorage LoadHelper(ThreadOffset offset);
+ LIR* LoadBaseDisp(int r_base, int displacement, int r_dest, OpSize size, int s_reg);
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ int s_reg);
+ LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size);
+ LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_dest, RegStorage r_dest_hi, OpSize size, int s_reg);
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size);
+ LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_src, RegStorage r_src_hi, OpSize size, int s_reg);
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
bool IsFpReg(int reg);
+ bool IsFpReg(RegStorage reg);
bool SameRegType(int reg1, int reg2);
- int AllocTypedTemp(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
- int TargetReg(SpecialTargetRegister reg);
- int GetArgMappingToPhysicalReg(int arg_num);
+ RegStorage TargetReg(SpecialTargetRegister reg);
+ RegStorage GetArgMappingToPhysicalReg(int arg_num);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
@@ -64,8 +69,8 @@
uint64_t GetRegMaskCommon(int reg);
void AdjustSpillMask();
void ClobberCallerSave();
- void FlushReg(int reg);
- void FlushRegWide(int reg1, int reg2);
+ void FlushReg(RegStorage reg);
+ void FlushRegWide(RegStorage reg);
void FreeCallTemps();
void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
void LockCallTemps();
@@ -89,22 +94,25 @@
// Required for target - Dalvik-level generators.
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
+ RegLocation rl_src1, RegLocation rl_src2);
void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale);
void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift);
+ void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
@@ -112,15 +120,18 @@
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
- ThrowKind kind);
- RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+ void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset,
+ ThrowKind kind);
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheck(int reg_lo, int reg_hi);
+ void GenDivZeroCheck(RegStorage reg);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
void GenSpecialExitSequence();
@@ -131,7 +142,7 @@
void GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
+ int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
@@ -140,36 +151,39 @@
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
- LIR* OpFpRegCopy(int r_dest, int r_src);
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpIT(ConditionCode cond, const char* guide);
- LIR* OpMem(OpKind op, int rBase, int disp);
- LIR* OpPcRelLoad(int reg, LIR* target);
- LIR* OpReg(OpKind op, int r_dest_src);
- LIR* OpRegCopy(int r_dest, int r_src);
- LIR* OpRegCopyNoInsert(int r_dest, int r_src);
- LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
- LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
- LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
- LIR* OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src);
- LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
- LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+ LIR* OpReg(OpKind op, RegStorage r_dest_src);
+ LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+ LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
- LIR* OpVldm(int rBase, int count);
- LIR* OpVstm(int rBase, int count);
- void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
- void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+ LIR* OpVldm(RegStorage r_base, int count);
+ LIR* OpVstm(RegStorage r_base, int count);
+ void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
+ void OpRegCopyWide(RegStorage dest, RegStorage src);
void OpTlsCmp(ThreadOffset offset, int val);
- LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
- int s_reg);
- LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
+ // TODO: collapse r_dest.
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
+ RegStorage r_dest_hi, OpSize size, int s_reg);
+ // TODO: collapse r_src.
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+ RegStorage r_src_hi, OpSize size);
void SpillCoreRegs();
void UnSpillCoreRegs();
static const MipsEncodingMap EncodingMap[kMipsLast];
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index cf4f19f..2bc5540 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -111,8 +111,8 @@
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR3(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -157,14 +157,14 @@
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, kFPReg);
src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
@@ -199,13 +199,15 @@
FlushAllRegs();
LockCallTemps();
if (wide) {
- LoadValueDirectWideFixed(rl_src1, rMIPS_FARG0, rMIPS_FARG1);
- LoadValueDirectWideFixed(rl_src2, rMIPS_FARG2, rMIPS_FARG3);
+ RegStorage r_tmp1(RegStorage::k64BitPair, rMIPS_FARG0, rMIPS_FARG1);
+ RegStorage r_tmp2(RegStorage::k64BitPair, rMIPS_FARG2, rMIPS_FARG3);
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
} else {
- LoadValueDirectFixed(rl_src1, rMIPS_FARG0);
- LoadValueDirectFixed(rl_src2, rMIPS_FARG2);
+ LoadValueDirectFixed(rl_src1, rs_rMIPS_FARG0);
+ LoadValueDirectFixed(rl_src2, rs_rMIPS_FARG2);
}
- int r_tgt = LoadHelper(offset);
+ RegStorage r_tgt = LoadHelper(offset);
// NOTE: not a safepoint
OpReg(kOpBlx, r_tgt);
RegLocation rl_result = GetReturn(false);
@@ -221,7 +223,7 @@
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -229,8 +231,8 @@
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
+ OpRegCopy(rl_result.reg, rl_src.reg);
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 9fcc8bb..270d895 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -44,15 +44,15 @@
RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- int t0 = AllocTemp();
- int t1 = AllocTemp();
+ int t0 = AllocTemp().GetReg();
+ int t1 = AllocTemp().GetReg();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
NewLIR3(kMipsSlt, t0, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
NewLIR3(kMipsSlt, t1, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg.GetReg(), 0, NULL);
- NewLIR3(kMipsSltu, t0, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- NewLIR3(kMipsSltu, t1, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ NewLIR3(kMipsSltu, t0, rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ NewLIR3(kMipsSltu, t1, rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
FreeTemp(t0);
FreeTemp(t1);
@@ -61,8 +61,7 @@
StoreValue(rl_dest, rl_result);
}
-LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
- LIR* target) {
+LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
LIR* branch;
MipsOpCode slt_op;
MipsOpCode br_op;
@@ -113,13 +112,13 @@
return NULL;
}
if (cmp_zero) {
- branch = NewLIR2(br_op, src1, src2);
+ branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
} else {
- int t_reg = AllocTemp();
+ int t_reg = AllocTemp().GetReg();
if (swapped) {
- NewLIR3(slt_op, t_reg, src2, src1);
+ NewLIR3(slt_op, t_reg, src2.GetReg(), src1.GetReg());
} else {
- NewLIR3(slt_op, t_reg, src1, src2);
+ NewLIR3(slt_op, t_reg, src1.GetReg(), src2.GetReg());
}
branch = NewLIR1(br_op, t_reg);
FreeTemp(t_reg);
@@ -128,12 +127,11 @@
return branch;
}
-LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
- int check_value, LIR* target) {
+LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
LIR* branch;
if (check_value != 0) {
// TUNING: handle s16 & kCondLt/Mi case using slti
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, check_value);
branch = OpCmpBranch(cond, reg, t_reg, target);
FreeTemp(t_reg);
@@ -150,60 +148,66 @@
case kCondNe: opc = kMipsBnez; break;
default:
// Tuning: use slti when applicable
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, check_value);
branch = OpCmpBranch(cond, reg, t_reg, target);
FreeTemp(t_reg);
return branch;
}
- branch = NewLIR1(opc, reg);
+ branch = NewLIR1(opc, reg.GetReg());
branch->target = target;
return branch;
}
-LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
- if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src))
+LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
+ // If src or dest is a pair, we'll be using low reg.
+ if (r_dest.IsPair()) {
+ r_dest = r_dest.GetLow();
+ }
+ if (r_src.IsPair()) {
+ r_src = r_src.GetLow();
+ }
+ if (MIPS_FPREG(r_dest.GetReg()) || MIPS_FPREG(r_src.GetReg()))
return OpFpRegCopy(r_dest, r_src);
LIR* res = RawLIR(current_dalvik_offset_, kMipsMove,
- r_dest, r_src);
+ r_dest.GetReg(), r_src.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
return res;
}
-LIR* MipsMir2Lir::OpRegCopy(int r_dest, int r_src) {
+LIR* MipsMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
LIR *res = OpRegCopyNoInsert(r_dest, r_src);
AppendLIR(res);
return res;
}
-void MipsMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
- int src_hi) {
- bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi);
- bool src_fp = MIPS_FPREG(src_lo) && MIPS_FPREG(src_hi);
- assert(MIPS_FPREG(src_lo) == MIPS_FPREG(src_hi));
- assert(MIPS_FPREG(dest_lo) == MIPS_FPREG(dest_hi));
+void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+ bool dest_fp = MIPS_FPREG(r_dest.GetLowReg());
+ bool src_fp = MIPS_FPREG(r_src.GetLowReg());
if (dest_fp) {
if (src_fp) {
- OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+ // FIXME: handle this here - reserve OpRegCopy for 32-bit copies.
+ OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
+ RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
} else {
/* note the operands are swapped for the mtc1 instr */
- NewLIR2(kMipsMtc1, src_lo, dest_lo);
- NewLIR2(kMipsMtc1, src_hi, dest_hi);
+ NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
+ NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
}
} else {
if (src_fp) {
- NewLIR2(kMipsMfc1, dest_lo, src_lo);
- NewLIR2(kMipsMfc1, dest_hi, src_hi);
+ NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
+ NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
} else {
// Handle overlap
- if (src_hi == dest_lo) {
- OpRegCopy(dest_hi, src_hi);
- OpRegCopy(dest_lo, src_lo);
+ if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
} else {
- OpRegCopy(dest_lo, src_lo);
- OpRegCopy(dest_hi, src_hi);
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
}
}
}
@@ -217,34 +221,34 @@
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
-LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code,
- int reg1, int base, int offset, ThrowKind kind) {
+LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
+ int offset, ThrowKind kind) {
LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
return NULL;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
bool is_div) {
- NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2);
+ NewLIR4(kMipsDiv, rHI, rLO, reg1.GetReg(), reg2.GetReg());
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
+ NewLIR2(kMipsMflo, rl_result.reg.GetReg(), rLO);
} else {
- NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
+ NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), rHI);
}
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
bool is_div) {
- int t_reg = AllocTemp();
- NewLIR3(kMipsAddiu, t_reg, r_ZERO, lit);
- NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg);
+ int t_reg = AllocTemp().GetReg();
+ NewLIR3(kMipsAddiu, t_reg, rZERO, lit);
+ NewLIR4(kMipsDiv, rHI, rLO, reg1.GetReg(), t_reg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
+ NewLIR2(kMipsMflo, rl_result.reg.GetReg(), rLO);
} else {
- NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
+ NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), rHI);
}
FreeTemp(t_reg);
return rl_result;
@@ -261,7 +265,8 @@
return rl_dest;
}
-void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
+void MipsMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale,
+ int offset) {
LOG(FATAL) << "Unexpected use of OpLea for Arm";
}
@@ -285,12 +290,12 @@
return false;
}
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address.wide = 0; // ignore high half in info->args[1]
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -301,26 +306,26 @@
return false;
}
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address.wide = 0; // ignore high half in info->args[1]
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_src_value = info->args[2]; // [size] value
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
return true;
}
-LIR* MipsMir2Lir::OpPcRelLoad(int reg, LIR* target) {
+LIR* MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
return NULL;
}
-LIR* MipsMir2Lir::OpVldm(int rBase, int count) {
+LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
return NULL;
}
-LIR* MipsMir2Lir::OpVstm(int rBase, int count) {
+LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
return NULL;
}
@@ -328,30 +333,31 @@
void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
-void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
- int t_reg = AllocTemp();
- OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
+void MipsMir2Lir::GenDivZeroCheck(RegStorage reg) {
+ DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
+ RegStorage t_reg = AllocTemp();
+ OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
FreeTemp(t_reg);
}
// Test suspend flag, return target of taken suspend branch
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
- OpRegImm(kOpSub, rMIPS_SUSPEND, 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
+ OpRegImm(kOpSub, rs_rMIPS_SUSPEND, 1);
+ return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS_SUSPEND, 0, target);
}
// Decrement register and branch on condition
-LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
+LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
OpRegImm(kOpSub, reg, 1);
return OpCmpImmBranch(c_code, reg, 0, target);
}
@@ -362,6 +368,11 @@
return false;
}
+bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
+ return false;
+}
+
LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
LOG(FATAL) << "Unexpected use of OpIT in Mips";
return NULL;
@@ -385,11 +396,11 @@
* addu v1,v1,t1
*/
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
- int t_reg = AllocTemp();
- OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
- NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(kOpAdd, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src2.reg.GetLow(), rl_src1.reg.GetLow());
+ RegStorage t_reg = AllocTemp();
+ OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHigh(), rl_src1.reg.GetHigh());
+ NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ OpRegRegReg(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -407,11 +418,11 @@
* subu v1,v1,t1
*/
- int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
+ RegStorage t_reg = AllocTemp();
+ NewLIR3(kMipsSltu, t_reg.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -427,11 +438,11 @@
* subu v1,v1,t1
*/
- OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.reg.GetReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
+ OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+ RegStorage t_reg = AllocTemp();
+ NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -471,36 +482,36 @@
}
/* null object? */
- GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg, opt_flags);
- int reg_ptr = AllocTemp();
+ RegStorage reg_ptr = AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- int reg_len = INVALID_REG;
+ RegStorage reg_len;
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.reg, len_offset, reg_len);
}
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
FreeTemp(rl_array.reg.GetReg());
if ((size == kLong) || (size == kDouble)) {
if (scale) {
- int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
+ RegStorage r_new_index = AllocTemp();
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
}
- FreeTemp(rl_index.reg.GetReg());
+ FreeTemp(rl_index.reg);
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
FreeTemp(reg_ptr);
StoreValueWide(rl_dest, rl_result);
@@ -508,10 +519,10 @@
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -536,27 +547,27 @@
rl_array = LoadValue(rl_array, kCoreReg);
rl_index = LoadValue(rl_index, kCoreReg);
- int reg_ptr = INVALID_REG;
+ RegStorage reg_ptr;
bool allocated_reg_ptr_temp = false;
if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
Clobber(rl_array.reg.GetReg());
- reg_ptr = rl_array.reg.GetReg();
+ reg_ptr = rl_array.reg;
} else {
reg_ptr = AllocTemp();
- OpRegCopy(reg_ptr, rl_array.reg.GetReg());
+ OpRegCopy(reg_ptr, rl_array.reg);
allocated_reg_ptr_temp = true;
}
/* null object? */
- GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- int reg_len = INVALID_REG;
+ RegStorage reg_len;
if (needs_range_check) {
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.reg, len_offset, reg_len);
}
/* reg_ptr -> array data */
OpRegImm(kOpAdd, reg_ptr, data_offset);
@@ -564,35 +575,34 @@
if ((size == kLong) || (size == kDouble)) {
// TUNING: specific wide routine that can handle fp regs
if (scale) {
- int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
+ RegStorage r_new_index = AllocTemp();
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
}
rl_src = LoadValueWide(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
- scale, size);
+ StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+ MarkGCCard(rl_src.reg, rl_array.reg);
}
}
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 77ae337..96cd3d8 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -109,36 +109,37 @@
// Mask to strip off fp flags.
#define MIPS_FP_REG_MASK (MIPS_FP_REG_OFFSET-1)
-#ifdef HAVE_LITTLE_ENDIAN
#define LOWORD_OFFSET 0
#define HIWORD_OFFSET 4
-#define r_ARG0 r_A0
-#define r_ARG1 r_A1
-#define r_ARG2 r_A2
-#define r_ARG3 r_A3
-#define r_RESULT0 r_V0
-#define r_RESULT1 r_V1
-#else
-#define LOWORD_OFFSET 4
-#define HIWORD_OFFSET 0
-#define r_ARG0 r_A1
-#define r_ARG1 r_A0
-#define r_ARG2 r_A3
-#define r_ARG3 r_A2
-#define r_RESULT0 r_V1
-#define r_RESULT1 r_V0
-#endif
+#define rARG0 rA0
+#define rs_rARG0 rs_rA0
+#define rARG1 rA1
+#define rs_rARG1 rs_rA1
+#define rARG2 rA2
+#define rs_rARG2 rs_rA2
+#define rARG3 rA3
+#define rs_rARG3 rs_rA3
+#define rRESULT0 rV0
+#define rs_rRESULT0 rs_rV0
+#define rRESULT1 rV1
+#define rs_rRESULT1 rs_rV1
-// These are the same for both big and little endian.
-#define r_FARG0 r_F12
-#define r_FARG1 r_F13
-#define r_FARG2 r_F14
-#define r_FARG3 r_F15
-#define r_FRESULT0 r_F0
-#define r_FRESULT1 r_F1
+#define rFARG0 rF12
+#define rs_rFARG0 rs_rF12
+#define rFARG1 rF13
+#define rs_rFARG1 rs_rF13
+#define rFARG2 rF14
+#define rs_rFARG2 rs_rF14
+#define rFARG3 rF15
+#define rs_rFARG3 rs_rF15
+#define rFRESULT0 rF0
+#define rs_rFRESULT0 rs_rF0
+#define rFRESULT1 rF1
+#define rs_rFRESULT1 rs_rF1
// Regs not used for Mips.
-#define rMIPS_PC INVALID_REG
+#define rMIPS_LR RegStorage::kInvalidRegVal
+#define rMIPS_PC RegStorage::kInvalidRegVal
enum MipsResourceEncodingPos {
kMipsGPReg0 = 0,
@@ -158,130 +159,183 @@
#define ENCODE_MIPS_REG_PC (1ULL << kMipsRegPC)
enum MipsNativeRegisterPool {
- r_ZERO = 0,
- r_AT = 1,
- r_V0 = 2,
- r_V1 = 3,
- r_A0 = 4,
- r_A1 = 5,
- r_A2 = 6,
- r_A3 = 7,
- r_T0 = 8,
- r_T1 = 9,
- r_T2 = 10,
- r_T3 = 11,
- r_T4 = 12,
- r_T5 = 13,
- r_T6 = 14,
- r_T7 = 15,
- r_S0 = 16,
- r_S1 = 17,
- r_S2 = 18,
- r_S3 = 19,
- r_S4 = 20,
- r_S5 = 21,
- r_S6 = 22,
- r_S7 = 23,
- r_T8 = 24,
- r_T9 = 25,
- r_K0 = 26,
- r_K1 = 27,
- r_GP = 28,
- r_SP = 29,
- r_FP = 30,
- r_RA = 31,
+ rZERO = 0,
+ rAT = 1,
+ rV0 = 2,
+ rV1 = 3,
+ rA0 = 4,
+ rA1 = 5,
+ rA2 = 6,
+ rA3 = 7,
+ rT0 = 8,
+ rT1 = 9,
+ rT2 = 10,
+ rT3 = 11,
+ rT4 = 12,
+ rT5 = 13,
+ rT6 = 14,
+ rT7 = 15,
+ rS0 = 16,
+ rS1 = 17,
+ rS2 = 18,
+ rS3 = 19,
+ rS4 = 20,
+ rS5 = 21,
+ rS6 = 22,
+ rS7 = 23,
+ rT8 = 24,
+ rT9 = 25,
+ rK0 = 26,
+ rK1 = 27,
+ rGP = 28,
+ rSP = 29,
+ rFP = 30,
+ rRA = 31,
- r_F0 = 0 + MIPS_FP_REG_OFFSET,
- r_F1,
- r_F2,
- r_F3,
- r_F4,
- r_F5,
- r_F6,
- r_F7,
- r_F8,
- r_F9,
- r_F10,
- r_F11,
- r_F12,
- r_F13,
- r_F14,
- r_F15,
+ rF0 = 0 + MIPS_FP_REG_OFFSET,
+ rF1,
+ rF2,
+ rF3,
+ rF4,
+ rF5,
+ rF6,
+ rF7,
+ rF8,
+ rF9,
+ rF10,
+ rF11,
+ rF12,
+ rF13,
+ rF14,
+ rF15,
#if 0
/*
* TODO: The shared resource mask doesn't have enough bit positions to describe all
* MIPS registers. Expand it and enable use of fp registers 16 through 31.
*/
- r_F16,
- r_F17,
- r_F18,
- r_F19,
- r_F20,
- r_F21,
- r_F22,
- r_F23,
- r_F24,
- r_F25,
- r_F26,
- r_F27,
- r_F28,
- r_F29,
- r_F30,
- r_F31,
+ rF16,
+ rF17,
+ rF18,
+ rF19,
+ rF20,
+ rF21,
+ rF22,
+ rF23,
+ rF24,
+ rF25,
+ rF26,
+ rF27,
+ rF28,
+ rF29,
+ rF30,
+ rF31,
#endif
- r_DF0 = r_F0 + MIPS_FP_DOUBLE,
- r_DF1 = r_F2 + MIPS_FP_DOUBLE,
- r_DF2 = r_F4 + MIPS_FP_DOUBLE,
- r_DF3 = r_F6 + MIPS_FP_DOUBLE,
- r_DF4 = r_F8 + MIPS_FP_DOUBLE,
- r_DF5 = r_F10 + MIPS_FP_DOUBLE,
- r_DF6 = r_F12 + MIPS_FP_DOUBLE,
- r_DF7 = r_F14 + MIPS_FP_DOUBLE,
+ rDF0 = rF0 + MIPS_FP_DOUBLE,
+ rDF1 = rF2 + MIPS_FP_DOUBLE,
+ rDF2 = rF4 + MIPS_FP_DOUBLE,
+ rDF3 = rF6 + MIPS_FP_DOUBLE,
+ rDF4 = rF8 + MIPS_FP_DOUBLE,
+ rDF5 = rF10 + MIPS_FP_DOUBLE,
+ rDF6 = rF12 + MIPS_FP_DOUBLE,
+ rDF7 = rF14 + MIPS_FP_DOUBLE,
#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
- r_DF8 = r_F16 + MIPS_FP_DOUBLE,
- r_DF9 = r_F18 + MIPS_FP_DOUBLE,
- r_DF10 = r_F20 + MIPS_FP_DOUBLE,
- r_DF11 = r_F22 + MIPS_FP_DOUBLE,
- r_DF12 = r_F24 + MIPS_FP_DOUBLE,
- r_DF13 = r_F26 + MIPS_FP_DOUBLE,
- r_DF14 = r_F28 + MIPS_FP_DOUBLE,
- r_DF15 = r_F30 + MIPS_FP_DOUBLE,
+ rDF8 = rF16 + MIPS_FP_DOUBLE,
+ rDF9 = rF18 + MIPS_FP_DOUBLE,
+ rDF10 = rF20 + MIPS_FP_DOUBLE,
+ rDF11 = rF22 + MIPS_FP_DOUBLE,
+ rDF12 = rF24 + MIPS_FP_DOUBLE,
+ rDF13 = rF26 + MIPS_FP_DOUBLE,
+ rDF14 = rF28 + MIPS_FP_DOUBLE,
+ rDF15 = rF30 + MIPS_FP_DOUBLE,
#endif
- r_HI = MIPS_EXTRA_REG_OFFSET,
- r_LO,
- r_PC,
+ rHI = MIPS_EXTRA_REG_OFFSET,
+ rLO,
+ rPC,
};
-#define rMIPS_SUSPEND r_S0
-#define rMIPS_SELF r_S1
-#define rMIPS_SP r_SP
-#define rMIPS_ARG0 r_ARG0
-#define rMIPS_ARG1 r_ARG1
-#define rMIPS_ARG2 r_ARG2
-#define rMIPS_ARG3 r_ARG3
-#define rMIPS_FARG0 r_FARG0
-#define rMIPS_FARG1 r_FARG1
-#define rMIPS_FARG2 r_FARG2
-#define rMIPS_FARG3 r_FARG3
-#define rMIPS_RET0 r_RESULT0
-#define rMIPS_RET1 r_RESULT1
-#define rMIPS_INVOKE_TGT r_T9
-#define rMIPS_COUNT INVALID_REG
-#define rMIPS_LR r_RA
+const RegStorage rs_rZERO(RegStorage::k32BitSolo, rZERO);
+const RegStorage rs_rAT(RegStorage::k32BitSolo, rAT);
+const RegStorage rs_rV0(RegStorage::k32BitSolo, rV0);
+const RegStorage rs_rV1(RegStorage::k32BitSolo, rV1);
+const RegStorage rs_rA0(RegStorage::k32BitSolo, rA0);
+const RegStorage rs_rA1(RegStorage::k32BitSolo, rA1);
+const RegStorage rs_rA2(RegStorage::k32BitSolo, rA2);
+const RegStorage rs_rA3(RegStorage::k32BitSolo, rA3);
+const RegStorage rs_rT0(RegStorage::k32BitSolo, rT0);
+const RegStorage rs_rT1(RegStorage::k32BitSolo, rT1);
+const RegStorage rs_rT2(RegStorage::k32BitSolo, rT2);
+const RegStorage rs_rT3(RegStorage::k32BitSolo, rT3);
+const RegStorage rs_rT4(RegStorage::k32BitSolo, rT4);
+const RegStorage rs_rT5(RegStorage::k32BitSolo, rT5);
+const RegStorage rs_rT6(RegStorage::k32BitSolo, rT6);
+const RegStorage rs_rT7(RegStorage::k32BitSolo, rT7);
+const RegStorage rs_rS0(RegStorage::k32BitSolo, rS0);
+const RegStorage rs_rS1(RegStorage::k32BitSolo, rS1);
+const RegStorage rs_rS2(RegStorage::k32BitSolo, rS2);
+const RegStorage rs_rS3(RegStorage::k32BitSolo, rS3);
+const RegStorage rs_rS4(RegStorage::k32BitSolo, rS4);
+const RegStorage rs_rS5(RegStorage::k32BitSolo, rS5);
+const RegStorage rs_rS6(RegStorage::k32BitSolo, rS6);
+const RegStorage rs_rS7(RegStorage::k32BitSolo, rS7);
+const RegStorage rs_rT8(RegStorage::k32BitSolo, rT8);
+const RegStorage rs_rT9(RegStorage::k32BitSolo, rT9);
+const RegStorage rs_rK0(RegStorage::k32BitSolo, rK0);
+const RegStorage rs_rK1(RegStorage::k32BitSolo, rK1);
+const RegStorage rs_rGP(RegStorage::k32BitSolo, rGP);
+const RegStorage rs_rSP(RegStorage::k32BitSolo, rSP);
+const RegStorage rs_rFP(RegStorage::k32BitSolo, rFP);
+const RegStorage rs_rRA(RegStorage::k32BitSolo, rRA);
+const RegStorage rs_rF12(RegStorage::k32BitSolo, rF12);
+const RegStorage rs_rF13(RegStorage::k32BitSolo, rF13);
+const RegStorage rs_rF14(RegStorage::k32BitSolo, rF14);
+const RegStorage rs_rF15(RegStorage::k32BitSolo, rF15);
+const RegStorage rs_rF0(RegStorage::k32BitSolo, rF0);
+const RegStorage rs_rF1(RegStorage::k32BitSolo, rF1);
+
+// TODO: reduce/eliminate use of these.
+#define rMIPS_SUSPEND rS0
+#define rs_rMIPS_SUSPEND rs_rS0
+#define rMIPS_SELF rS1
+#define rs_rMIPS_SELF rs_rS1
+#define rMIPS_SP rSP
+#define rs_rMIPS_SP rs_rSP
+#define rMIPS_ARG0 rARG0
+#define rs_rMIPS_ARG0 rs_rARG0
+#define rMIPS_ARG1 rARG1
+#define rs_rMIPS_ARG1 rs_rARG1
+#define rMIPS_ARG2 rARG2
+#define rs_rMIPS_ARG2 rs_rARG2
+#define rMIPS_ARG3 rARG3
+#define rs_rMIPS_ARG3 rs_rARG3
+#define rMIPS_FARG0 rFARG0
+#define rs_rMIPS_FARG0 rs_rFARG0
+#define rMIPS_FARG1 rFARG1
+#define rs_rMIPS_FARG1 rs_rFARG1
+#define rMIPS_FARG2 rFARG2
+#define rs_rMIPS_FARG2 rs_rFARG2
+#define rMIPS_FARG3 rFARG3
+#define rs_MIPS_FARG3 rs_rFARG3
+#define rMIPS_RET0 rRESULT0
+#define rs_MIPS_RET0 rs_rRESULT0
+#define rMIPS_RET1 rRESULT1
+#define rs_rMIPS_RET1 rs_rRESULT1
+#define rMIPS_INVOKE_TGT rT9
+#define rs_rMIPS_INVOKE_TGT rs_rT9
+#define rMIPS_COUNT RegStorage::kInvalidRegVal
// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
const RegLocation mips_loc_c_return
{kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r_V0), INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_wide
{kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r_V0, r_V1), INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k64BitPair, rV0, rV1), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_float
{kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r_F0), INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_double
{kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r_F0, r_F1), INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k64BitPair, rF0, rF1), INVALID_SREG, INVALID_SREG};
enum MipsShiftEncodings {
kMipsLsl = 0x0,
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index b7fb2f4..67a44fa 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -26,18 +26,18 @@
namespace art {
-static int core_regs[] = {r_ZERO, r_AT, r_V0, r_V1, r_A0, r_A1, r_A2, r_A3,
- r_T0, r_T1, r_T2, r_T3, r_T4, r_T5, r_T6, r_T7,
- r_S0, r_S1, r_S2, r_S3, r_S4, r_S5, r_S6, r_S7, r_T8,
- r_T9, r_K0, r_K1, r_GP, r_SP, r_FP, r_RA};
-static int ReservedRegs[] = {r_ZERO, r_AT, r_S0, r_S1, r_K0, r_K1, r_GP, r_SP,
- r_RA};
-static int core_temps[] = {r_V0, r_V1, r_A0, r_A1, r_A2, r_A3, r_T0, r_T1, r_T2,
- r_T3, r_T4, r_T5, r_T6, r_T7, r_T8};
-static int FpRegs[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
- r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
-static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
- r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
+static int core_regs[] = {rZERO, rAT, rV0, rV1, rA0, rA1, rA2, rA3,
+ rT0, rT1, rT2, rT3, rT4, rT5, rT6, rT7,
+ rS0, rS1, rS2, rS3, rS4, rS5, rS6, rS7, rT8,
+ rT9, rK0, rK1, rGP, rSP, rFP, rRA};
+static int ReservedRegs[] = {rZERO, rAT, rS0, rS1, rK0, rK1, rGP, rSP,
+ rRA};
+static int core_temps[] = {rV0, rV1, rA0, rA1, rA2, rA3, rT0, rT1, rT2,
+ rT3, rT4, rT5, rT6, rT7, rT8};
+static int FpRegs[] = {rF0, rF1, rF2, rF3, rF4, rF5, rF6, rF7,
+ rF8, rF9, rF10, rF11, rF12, rF13, rF14, rF15};
+static int fp_temps[] = {rF0, rF1, rF2, rF3, rF4, rF5, rF6, rF7,
+ rF8, rF9, rF10, rF11, rF12, rF13, rF14, rF15};
RegLocation MipsMir2Lir::LocCReturn() {
return mips_loc_c_return;
@@ -56,43 +56,43 @@
}
// Return a target-dependent special register.
-int MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
- int res = INVALID_REG;
+RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
+ int res_reg = RegStorage::kInvalidRegVal;
switch (reg) {
- case kSelf: res = rMIPS_SELF; break;
- case kSuspend: res = rMIPS_SUSPEND; break;
- case kLr: res = rMIPS_LR; break;
- case kPc: res = rMIPS_PC; break;
- case kSp: res = rMIPS_SP; break;
- case kArg0: res = rMIPS_ARG0; break;
- case kArg1: res = rMIPS_ARG1; break;
- case kArg2: res = rMIPS_ARG2; break;
- case kArg3: res = rMIPS_ARG3; break;
- case kFArg0: res = rMIPS_FARG0; break;
- case kFArg1: res = rMIPS_FARG1; break;
- case kFArg2: res = rMIPS_FARG2; break;
- case kFArg3: res = rMIPS_FARG3; break;
- case kRet0: res = rMIPS_RET0; break;
- case kRet1: res = rMIPS_RET1; break;
- case kInvokeTgt: res = rMIPS_INVOKE_TGT; break;
- case kHiddenArg: res = r_T0; break;
- case kHiddenFpArg: res = INVALID_REG; break;
- case kCount: res = rMIPS_COUNT; break;
+ case kSelf: res_reg = rMIPS_SELF; break;
+ case kSuspend: res_reg = rMIPS_SUSPEND; break;
+ case kLr: res_reg = rMIPS_LR; break;
+ case kPc: res_reg = rMIPS_PC; break;
+ case kSp: res_reg = rMIPS_SP; break;
+ case kArg0: res_reg = rMIPS_ARG0; break;
+ case kArg1: res_reg = rMIPS_ARG1; break;
+ case kArg2: res_reg = rMIPS_ARG2; break;
+ case kArg3: res_reg = rMIPS_ARG3; break;
+ case kFArg0: res_reg = rMIPS_FARG0; break;
+ case kFArg1: res_reg = rMIPS_FARG1; break;
+ case kFArg2: res_reg = rMIPS_FARG2; break;
+ case kFArg3: res_reg = rMIPS_FARG3; break;
+ case kRet0: res_reg = rMIPS_RET0; break;
+ case kRet1: res_reg = rMIPS_RET1; break;
+ case kInvokeTgt: res_reg = rMIPS_INVOKE_TGT; break;
+ case kHiddenArg: res_reg = rT0; break;
+ case kHiddenFpArg: res_reg = RegStorage::kInvalidRegVal; break;
+ case kCount: res_reg = rMIPS_COUNT; break;
}
- return res;
+ return RegStorage::Solo32(res_reg);
}
-int MipsMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+RegStorage MipsMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
// For the 32-bit internal ABI, the first 3 arguments are passed in registers.
switch (arg_num) {
case 0:
- return rMIPS_ARG1;
+ return rs_rMIPS_ARG1;
case 1:
- return rMIPS_ARG2;
+ return rs_rMIPS_ARG2;
case 2:
- return rMIPS_ARG3;
+ return rs_rMIPS_ARG3;
default:
- return INVALID_REG;
+ return RegStorage::InvalidReg();
}
}
@@ -311,7 +311,7 @@
*/
void MipsMir2Lir::AdjustSpillMask() {
- core_spill_mask_ |= (1 << r_RA);
+ core_spill_mask_ |= (1 << rRA);
num_core_spills_++;
}
@@ -325,9 +325,9 @@
LOG(FATAL) << "No support yet for promoted FP regs";
}
-void MipsMir2Lir::FlushRegWide(int reg1, int reg2) {
- RegisterInfo* info1 = GetRegInfo(reg1);
- RegisterInfo* info2 = GetRegInfo(reg2);
+void MipsMir2Lir::FlushRegWide(RegStorage reg) {
+ RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
+ RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
DCHECK(info1 && info2 && info1->pair && info2->pair &&
(info1->partner == info2->reg) &&
(info2->partner == info1->reg));
@@ -342,16 +342,18 @@
if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
info1 = info2;
int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
- StoreBaseDispWide(rMIPS_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+ StoreBaseDispWide(rs_rMIPS_SP, VRegOffset(v_reg),
+ RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
}
}
-void MipsMir2Lir::FlushReg(int reg) {
- RegisterInfo* info = GetRegInfo(reg);
+void MipsMir2Lir::FlushReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ RegisterInfo* info = GetRegInfo(reg.GetReg());
if (info->live && info->dirty) {
info->dirty = false;
int v_reg = mir_graph_->SRegToVReg(info->s_reg);
- StoreBaseDisp(rMIPS_SP, VRegOffset(v_reg), reg, kWord);
+ StoreBaseDisp(rs_rMIPS_SP, VRegOffset(v_reg), reg, kWord);
}
}
@@ -360,47 +362,51 @@
return MIPS_FPREG(reg);
}
+bool MipsMir2Lir::IsFpReg(RegStorage reg) {
+ return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
+}
+
/* Clobber all regs that might be used by an external C call */
void MipsMir2Lir::ClobberCallerSave() {
- Clobber(r_ZERO);
- Clobber(r_AT);
- Clobber(r_V0);
- Clobber(r_V1);
- Clobber(r_A0);
- Clobber(r_A1);
- Clobber(r_A2);
- Clobber(r_A3);
- Clobber(r_T0);
- Clobber(r_T1);
- Clobber(r_T2);
- Clobber(r_T3);
- Clobber(r_T4);
- Clobber(r_T5);
- Clobber(r_T6);
- Clobber(r_T7);
- Clobber(r_T8);
- Clobber(r_T9);
- Clobber(r_K0);
- Clobber(r_K1);
- Clobber(r_GP);
- Clobber(r_FP);
- Clobber(r_RA);
- Clobber(r_F0);
- Clobber(r_F1);
- Clobber(r_F2);
- Clobber(r_F3);
- Clobber(r_F4);
- Clobber(r_F5);
- Clobber(r_F6);
- Clobber(r_F7);
- Clobber(r_F8);
- Clobber(r_F9);
- Clobber(r_F10);
- Clobber(r_F11);
- Clobber(r_F12);
- Clobber(r_F13);
- Clobber(r_F14);
- Clobber(r_F15);
+ Clobber(rZERO);
+ Clobber(rAT);
+ Clobber(rV0);
+ Clobber(rV1);
+ Clobber(rA0);
+ Clobber(rA1);
+ Clobber(rA2);
+ Clobber(rA3);
+ Clobber(rT0);
+ Clobber(rT1);
+ Clobber(rT2);
+ Clobber(rT3);
+ Clobber(rT4);
+ Clobber(rT5);
+ Clobber(rT6);
+ Clobber(rT7);
+ Clobber(rT8);
+ Clobber(rT9);
+ Clobber(rK0);
+ Clobber(rK1);
+ Clobber(rGP);
+ Clobber(rFP);
+ Clobber(rRA);
+ Clobber(rF0);
+ Clobber(rF1);
+ Clobber(rF2);
+ Clobber(rF3);
+ Clobber(rF4);
+ Clobber(rF5);
+ Clobber(rF6);
+ Clobber(rF7);
+ Clobber(rF8);
+ Clobber(rF9);
+ Clobber(rF10);
+ Clobber(rF11);
+ Clobber(rF12);
+ Clobber(rF13);
+ Clobber(rF14);
+ Clobber(rF15);
}
RegLocation MipsMir2Lir::GetReturnWideAlt() {
@@ -443,17 +449,15 @@
int low_reg;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
- low_reg = AllocTempDouble();
- high_reg = low_reg + 1;
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ return AllocTempDouble();
}
- low_reg = AllocTemp();
- high_reg = AllocTemp();
+ low_reg = AllocTemp().GetReg();
+ high_reg = AllocTemp().GetReg();
return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
-int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+RegStorage MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
return AllocTempFloat();
}
@@ -494,11 +498,14 @@
}
void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
- if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
- (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
- // No overlap, free both
- FreeTemp(rl_free.reg.GetReg());
- FreeTemp(rl_free.reg.GetHighReg());
+ DCHECK(rl_keep.wide);
+ DCHECK(rl_free.wide);
+ if ((rl_free.reg.GetLowReg() != rl_keep.reg.GetLowReg()) &&
+ (rl_free.reg.GetLowReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetLowReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+ // No overlap, free.
+ FreeTemp(rl_free.reg);
}
}
/*
@@ -507,14 +514,14 @@
* ensure that all branch instructions can be restarted if
* there is a trap in the shadow. Allocate a temp register.
*/
-int MipsMir2Lir::LoadHelper(ThreadOffset offset) {
- LoadWordDisp(rMIPS_SELF, offset.Int32Value(), r_T9);
- return r_T9;
+RegStorage MipsMir2Lir::LoadHelper(ThreadOffset offset) {
+ LoadWordDisp(rs_rMIPS_SELF, offset.Int32Value(), rs_rT9);
+ return rs_rT9;
}
LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
- int tmp = AllocTemp();
- LoadWordDisp(rMIPS_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
+ RegStorage tmp = AllocTemp();
+ LoadWordDisp(rs_rMIPS_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
LIR *inst = LoadWordDisp(tmp, 0, tmp);
FreeTemp(tmp);
return inst;
@@ -526,11 +533,11 @@
}
uint32_t mask = core_spill_mask_;
int offset = num_core_spills_ * 4;
- OpRegImm(kOpSub, rMIPS_SP, offset);
+ OpRegImm(kOpSub, rs_rSP, offset);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
offset -= 4;
- StoreWordDisp(rMIPS_SP, offset, reg);
+ StoreWordDisp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
}
}
}
@@ -544,10 +551,10 @@
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
offset -= 4;
- LoadWordDisp(rMIPS_SP, offset, reg);
+ LoadWordDisp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
}
}
- OpRegImm(kOpAdd, rMIPS_SP, frame_size_);
+ OpRegImm(kOpAdd, rs_rSP, frame_size_);
}
bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 21c971c..4f31341 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -21,29 +21,29 @@
namespace art {
/* This file contains codegen for the MIPS32 ISA. */
-LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src) {
+LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
int opcode;
/* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(MIPS_DOUBLEREG(r_dest), MIPS_DOUBLEREG(r_src));
- if (MIPS_DOUBLEREG(r_dest)) {
+ DCHECK_EQ(MIPS_DOUBLEREG(r_dest.GetReg()), MIPS_DOUBLEREG(r_src.GetReg()));
+ if (MIPS_DOUBLEREG(r_dest.GetReg())) {
opcode = kMipsFmovd;
} else {
- if (MIPS_SINGLEREG(r_dest)) {
- if (MIPS_SINGLEREG(r_src)) {
+ if (MIPS_SINGLEREG(r_dest.GetReg())) {
+ if (MIPS_SINGLEREG(r_src.GetReg())) {
opcode = kMipsFmovs;
} else {
/* note the operands are swapped for the mtc1 instr */
- int t_opnd = r_src;
+ RegStorage t_opnd = r_src;
r_src = r_dest;
r_dest = t_opnd;
opcode = kMipsMtc1;
}
} else {
- DCHECK(MIPS_SINGLEREG(r_src));
+ DCHECK(MIPS_SINGLEREG(r_src.GetReg()));
opcode = kMipsMfc1;
}
}
- LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src, r_dest);
+ LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
@@ -75,31 +75,31 @@
* 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) {
+LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
LIR *res;
- int r_dest_save = r_dest;
- int is_fp_reg = MIPS_FPREG(r_dest);
+ RegStorage r_dest_save = r_dest;
+ int is_fp_reg = MIPS_FPREG(r_dest.GetReg());
if (is_fp_reg) {
- DCHECK(MIPS_SINGLEREG(r_dest));
+ DCHECK(MIPS_SINGLEREG(r_dest.GetReg()));
r_dest = AllocTemp();
}
/* See if the value can be constructed cheaply */
if (value == 0) {
- res = NewLIR2(kMipsMove, r_dest, r_ZERO);
+ res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
} else if ((value > 0) && (value <= 65535)) {
- res = NewLIR3(kMipsOri, r_dest, r_ZERO, value);
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value);
} else if ((value < 0) && (value >= -32768)) {
- res = NewLIR3(kMipsAddiu, r_dest, r_ZERO, value);
+ res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value);
} else {
- res = NewLIR2(kMipsLui, r_dest, value >> 16);
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
if (value & 0xffff)
- NewLIR3(kMipsOri, r_dest, r_dest, value);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
}
if (is_fp_reg) {
- NewLIR2(kMipsMtc1, r_dest, r_dest_save);
+ NewLIR2(kMipsMtc1, r_dest.GetReg(), r_dest_save.GetReg());
FreeTemp(r_dest);
}
@@ -112,23 +112,22 @@
return res;
}
-LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src) {
+LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
MipsOpCode opcode = kMipsNop;
switch (op) {
case kOpBlx:
opcode = kMipsJalr;
break;
case kOpBx:
- return NewLIR1(kMipsJr, r_dest_src);
+ return NewLIR1(kMipsJr, r_dest_src.GetReg());
break;
default:
LOG(FATAL) << "Bad case in OpReg";
}
- return NewLIR2(opcode, r_RA, r_dest_src);
+ return NewLIR2(opcode, rRA, r_dest_src.GetReg());
}
-LIR* MipsMir2Lir::OpRegImm(OpKind op, int r_dest_src1,
- int value) {
+LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
LIR *res;
bool neg = (value < 0);
int abs_value = (neg) ? -value : value;
@@ -146,19 +145,19 @@
break;
}
if (short_form) {
- res = NewLIR2(opcode, r_dest_src1, abs_value);
+ res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
} else {
- int r_scratch = AllocTemp();
+ RegStorage r_scratch = AllocTemp();
res = LoadConstant(r_scratch, value);
if (op == kOpCmp)
- NewLIR2(opcode, r_dest_src1, r_scratch);
+ NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
else
- NewLIR3(opcode, r_dest_src1, r_dest_src1, r_scratch);
+ NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
}
return res;
}
-LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) {
+LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
MipsOpCode opcode = kMipsNop;
switch (op) {
case kOpAdd:
@@ -196,10 +195,10 @@
LOG(FATAL) << "bad case in OpRegRegReg";
break;
}
- return NewLIR3(opcode, r_dest, r_src1, r_src2);
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
}
-LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
+LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
LIR *res;
MipsOpCode opcode = kMipsNop;
bool short_form = true;
@@ -268,21 +267,21 @@
}
if (short_form) {
- res = NewLIR3(opcode, r_dest, r_src1, value);
+ res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
} else {
if (r_dest != r_src1) {
res = LoadConstant(r_dest, value);
- NewLIR3(opcode, r_dest, r_src1, r_dest);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
} else {
- int r_scratch = AllocTemp();
+ RegStorage r_scratch = AllocTemp();
res = LoadConstant(r_scratch, value);
- NewLIR3(opcode, r_dest, r_src1, r_scratch);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
}
}
return res;
}
-LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
+LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
MipsOpCode opcode = kMipsNop;
LIR *res;
switch (op) {
@@ -290,9 +289,9 @@
opcode = kMipsMove;
break;
case kOpMvn:
- return NewLIR3(kMipsNor, r_dest_src1, r_src2, r_ZERO);
+ return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO);
case kOpNeg:
- return NewLIR3(kMipsSubu, r_dest_src1, r_ZERO, r_src2);
+ return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
case kOpAdd:
case kOpAnd:
case kOpMul:
@@ -302,7 +301,7 @@
return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
case kOp2Byte:
#if __mips_isa_rev >= 2
- res = NewLIR2(kMipsSeb, r_dest_src1, r_src2);
+ res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
#else
res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
@@ -310,53 +309,54 @@
return res;
case kOp2Short:
#if __mips_isa_rev >= 2
- res = NewLIR2(kMipsSeh, r_dest_src1, r_src2);
+ res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
#else
res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
#endif
return res;
case kOp2Char:
- return NewLIR3(kMipsAndi, r_dest_src1, r_src2, 0xFFFF);
+ return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
default:
LOG(FATAL) << "Bad case in OpRegReg";
break;
}
- return NewLIR2(opcode, r_dest_src1, r_src2);
+ return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
-LIR* MipsMir2Lir::OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type) {
+LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) {
UNIMPLEMENTED(FATAL);
return nullptr;
}
-LIR* MipsMir2Lir::OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type) {
+LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
UNIMPLEMENTED(FATAL);
return nullptr;
}
-LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
+LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
return NULL;
}
-LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
+LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR *res;
- res = LoadConstantNoClobber(r_dest_lo, Low32Bits(value));
- LoadConstantNoClobber(r_dest_hi, High32Bits(value));
+ res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
+ LoadConstantNoClobber(r_dest.GetHigh(), High32Bits(value));
return res;
}
/* Load value from base + scaled index. */
-LIR* MipsMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
+LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) {
LIR *first = NULL;
LIR *res;
MipsOpCode opcode = kMipsNop;
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
- if (MIPS_FPREG(r_dest)) {
- DCHECK(MIPS_SINGLEREG(r_dest));
+ if (MIPS_FPREG(r_dest.GetReg())) {
+ DCHECK(MIPS_SINGLEREG(r_dest.GetReg()));
DCHECK((size == kWord) || (size == kSingle));
size = kSingle;
} else {
@@ -365,10 +365,10 @@
}
if (!scale) {
- first = NewLIR3(kMipsAddu, t_reg , rBase, r_index);
+ first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
} else {
first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMipsAddu, t_reg , rBase, t_reg);
+ NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
}
switch (size) {
@@ -394,21 +394,20 @@
LOG(FATAL) << "Bad case in LoadBaseIndexed";
}
- res = NewLIR3(opcode, r_dest, 0, t_reg);
+ res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
FreeTemp(t_reg);
return (first) ? first : res;
}
/* store value base base + scaled index. */
-LIR* MipsMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
LIR *first = NULL;
MipsOpCode opcode = kMipsNop;
- int r_new_index = r_index;
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
- if (MIPS_FPREG(r_src)) {
- DCHECK(MIPS_SINGLEREG(r_src));
+ if (MIPS_FPREG(r_src.GetReg())) {
+ DCHECK(MIPS_SINGLEREG(r_src.GetReg()));
DCHECK((size == kWord) || (size == kSingle));
size = kSingle;
} else {
@@ -417,10 +416,10 @@
}
if (!scale) {
- first = NewLIR3(kMipsAddu, t_reg , rBase, r_index);
+ first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
} else {
first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMipsAddu, t_reg , rBase, t_reg);
+ NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
}
switch (size) {
@@ -441,13 +440,13 @@
default:
LOG(FATAL) << "Bad case in StoreBaseIndexed";
}
- NewLIR3(opcode, r_src, 0, t_reg);
- FreeTemp(r_new_index);
+ NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
return first;
}
-LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
- int r_dest_hi, OpSize size, int s_reg) {
+// FIXME: don't split r_dest into 2 containers.
+LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
+ RegStorage r_dest_hi, OpSize size, int s_reg) {
/*
* Load value from base + displacement. Optionally perform null check
* on base (which must have an associated s_reg and MIR). If not
@@ -468,15 +467,16 @@
case kDouble:
pair = true;
opcode = kMipsLw;
- if (MIPS_FPREG(r_dest)) {
+ if (MIPS_FPREG(r_dest.GetReg())) {
opcode = kMipsFlwc1;
- if (MIPS_DOUBLEREG(r_dest)) {
- r_dest = r_dest - MIPS_FP_DOUBLE;
+ if (MIPS_DOUBLEREG(r_dest.GetReg())) {
+ // TODO: rework to use k64BitSolo
+ r_dest.SetReg(r_dest.GetReg() - MIPS_FP_DOUBLE);
} else {
- DCHECK(MIPS_FPREG(r_dest_hi));
- DCHECK(r_dest == (r_dest_hi - 1));
+ DCHECK(MIPS_FPREG(r_dest_hi.GetReg()));
+ DCHECK_EQ(r_dest.GetReg(), r_dest_hi.GetReg() - 1);
}
- r_dest_hi = r_dest + 1;
+ r_dest_hi.SetReg(r_dest.GetReg() + 1);
}
short_form = IS_SIMM16_2WORD(displacement);
DCHECK_EQ((displacement & 0x3), 0);
@@ -484,9 +484,9 @@
case kWord:
case kSingle:
opcode = kMipsLw;
- if (MIPS_FPREG(r_dest)) {
+ if (MIPS_FPREG(r_dest.GetReg())) {
opcode = kMipsFlwc1;
- DCHECK(MIPS_SINGLEREG(r_dest));
+ DCHECK(MIPS_SINGLEREG(r_dest.GetReg()));
}
DCHECK_EQ((displacement & 0x3), 0);
break;
@@ -510,30 +510,28 @@
if (short_form) {
if (!pair) {
- load = res = NewLIR3(opcode, r_dest, displacement, rBase);
+ load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
} else {
- load = res = NewLIR3(opcode, r_dest,
- displacement + LOWORD_OFFSET, rBase);
- load2 = NewLIR3(opcode, r_dest_hi,
- displacement + HIWORD_OFFSET, rBase);
+ load = res = NewLIR3(opcode, r_dest.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+ load2 = NewLIR3(opcode, r_dest_hi.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
}
} else {
if (pair) {
- int r_tmp = AllocTemp();
- res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
- load = NewLIR3(opcode, r_dest, LOWORD_OFFSET, r_tmp);
- load2 = NewLIR3(opcode, r_dest_hi, HIWORD_OFFSET, r_tmp);
+ RegStorage r_tmp = AllocTemp();
+ res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+ load = NewLIR3(opcode, r_dest.GetReg(), LOWORD_OFFSET, r_tmp.GetReg());
+ load2 = NewLIR3(opcode, r_dest_hi.GetReg(), HIWORD_OFFSET, r_tmp.GetReg());
FreeTemp(r_tmp);
} else {
- int r_tmp = (rBase == r_dest) ? AllocTemp() : r_dest;
- res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
- load = NewLIR3(opcode, r_dest, 0, r_tmp);
+ RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
+ res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+ load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
if (r_tmp != r_dest)
FreeTemp(r_tmp);
}
}
- if (rBase == rMIPS_SP) {
+ if (r_base == rs_rMIPS_SP) {
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, pair /* is64bit */);
if (pair) {
@@ -544,19 +542,19 @@
return load;
}
-LIR* MipsMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
+LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, int s_reg) {
- return LoadBaseDispBody(rBase, displacement, r_dest, -1,
- size, s_reg);
+ return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size,
+ s_reg);
}
-LIR* MipsMir2Lir::LoadBaseDispWide(int rBase, int displacement,
- int r_dest_lo, int r_dest_hi, int s_reg) {
- return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+LIR* MipsMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
+ int s_reg) {
+ return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), kLong, s_reg);
}
-LIR* MipsMir2Lir::StoreBaseDispBody(int rBase, int displacement,
- int r_src, int r_src_hi, OpSize size) {
+LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
+ RegStorage r_src, RegStorage r_src_hi, OpSize size) {
LIR *res;
LIR *store = NULL;
LIR *store2 = NULL;
@@ -569,15 +567,15 @@
case kDouble:
pair = true;
opcode = kMipsSw;
- if (MIPS_FPREG(r_src)) {
+ if (MIPS_FPREG(r_src.GetReg())) {
opcode = kMipsFswc1;
- if (MIPS_DOUBLEREG(r_src)) {
- r_src = r_src - MIPS_FP_DOUBLE;
+ if (MIPS_DOUBLEREG(r_src.GetReg())) {
+ r_src.SetReg(r_src.GetReg() - MIPS_FP_DOUBLE);
} else {
- DCHECK(MIPS_FPREG(r_src_hi));
- DCHECK_EQ(r_src, (r_src_hi - 1));
+ DCHECK(MIPS_FPREG(r_src_hi.GetReg()));
+ DCHECK_EQ(r_src.GetReg(), (r_src_hi.GetReg() - 1));
}
- r_src_hi = r_src + 1;
+ r_src_hi.SetReg(r_src.GetReg() + 1);
}
short_form = IS_SIMM16_2WORD(displacement);
DCHECK_EQ((displacement & 0x3), 0);
@@ -585,9 +583,9 @@
case kWord:
case kSingle:
opcode = kMipsSw;
- if (MIPS_FPREG(r_src)) {
+ if (MIPS_FPREG(r_src.GetReg())) {
opcode = kMipsFswc1;
- DCHECK(MIPS_SINGLEREG(r_src));
+ DCHECK(MIPS_SINGLEREG(r_src.GetReg()));
}
DCHECK_EQ((displacement & 0x3), 0);
break;
@@ -601,31 +599,29 @@
opcode = kMipsSb;
break;
default:
- LOG(FATAL) << "Bad case in StoreBaseIndexedBody";
+ LOG(FATAL) << "Bad case in StoreBaseDispBody";
}
if (short_form) {
if (!pair) {
- store = res = NewLIR3(opcode, r_src, displacement, rBase);
+ store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
} else {
- store = res = NewLIR3(opcode, r_src, displacement + LOWORD_OFFSET,
- rBase);
- store2 = NewLIR3(opcode, r_src_hi, displacement + HIWORD_OFFSET,
- rBase);
+ store = res = NewLIR3(opcode, r_src.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+ store2 = NewLIR3(opcode, r_src_hi.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
}
} else {
- int r_scratch = AllocTemp();
- res = OpRegRegImm(kOpAdd, r_scratch, rBase, displacement);
+ RegStorage r_scratch = AllocTemp();
+ res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
if (!pair) {
- store = NewLIR3(opcode, r_src, 0, r_scratch);
+ store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
} else {
- store = NewLIR3(opcode, r_src, LOWORD_OFFSET, r_scratch);
- store2 = NewLIR3(opcode, r_src_hi, HIWORD_OFFSET, r_scratch);
+ store = NewLIR3(opcode, r_src.GetReg(), LOWORD_OFFSET, r_scratch.GetReg());
+ store2 = NewLIR3(opcode, r_src_hi.GetReg(), HIWORD_OFFSET, r_scratch.GetReg());
}
FreeTemp(r_scratch);
}
- if (rBase == rMIPS_SP) {
+ if (r_base == rs_rMIPS_SP) {
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, pair /* is64bit */);
if (pair) {
@@ -637,14 +633,13 @@
return res;
}
-LIR* MipsMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
+LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
+ return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
}
-LIR* MipsMir2Lir::StoreBaseDispWide(int rBase, int displacement,
- int r_src_lo, int r_src_hi) {
- return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
+LIR* MipsMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), kLong);
}
LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
@@ -652,25 +647,26 @@
return NULL;
}
-LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) {
+LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
return NULL;
}
-LIR* MipsMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg) {
+LIR* MipsMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_src, RegStorage r_src_hi,
+ OpSize size, int s_reg) {
LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
return NULL;
}
-LIR* MipsMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
- int offset) {
+LIR* MipsMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
return NULL;
}
-LIR* MipsMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_dest, int r_dest_hi, OpSize size, int s_reg) {
+LIR* MipsMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_dest, RegStorage r_dest_hi,
+ OpSize size, int s_reg) {
LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
return NULL;
}
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 8b1f81d..b2362fc 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -192,6 +192,10 @@
SetupRegMask(&lir->u.m.def_mask, lir->operands[1]);
}
+ if (flags & REG_DEF2) {
+ SetupRegMask(&lir->u.m.def_mask, lir->operands[2]);
+ }
+
if (flags & REG_USE0) {
SetupRegMask(&lir->u.m.use_mask, lir->operands[0]);
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 82664e2..e81a037 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -24,20 +24,23 @@
namespace art {
void Mir2Lir::LockArg(int in_position, bool wide) {
- int reg_arg_low = GetArgMappingToPhysicalReg(in_position);
- int reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) : INVALID_REG;
+ RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
+ RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
+ RegStorage::InvalidReg();
- if (reg_arg_low != INVALID_REG) {
+ if (reg_arg_low.Valid()) {
LockTemp(reg_arg_low);
}
- if (reg_arg_high != INVALID_REG && reg_arg_low != reg_arg_high) {
+ if (reg_arg_high.Valid() && reg_arg_low != reg_arg_high) {
LockTemp(reg_arg_high);
}
}
-int Mir2Lir::LoadArg(int in_position, bool wide) {
- int reg_arg_low = GetArgMappingToPhysicalReg(in_position);
- int reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) : INVALID_REG;
+// TODO: needs revisit for 64-bit.
+RegStorage Mir2Lir::LoadArg(int in_position, bool wide) {
+ RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
+ RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
+ RegStorage::InvalidReg();
int offset = StackVisitor::GetOutVROffset(in_position);
if (cu_->instruction_set == kX86) {
@@ -50,13 +53,13 @@
}
// If the VR is wide and there is no register for high part, we need to load it.
- if (wide && reg_arg_high == INVALID_REG) {
+ if (wide && !reg_arg_high.Valid()) {
// If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
- if (reg_arg_low == INVALID_REG) {
+ if (!reg_arg_low.Valid()) {
RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
- reg_arg_low = new_regs.GetReg();
- reg_arg_high = new_regs.GetHighReg();
- LoadBaseDispWide(TargetReg(kSp), offset, reg_arg_low, reg_arg_high, INVALID_SREG);
+ reg_arg_low = new_regs.GetLow();
+ reg_arg_high = new_regs.GetHigh();
+ LoadBaseDispWide(TargetReg(kSp), offset, new_regs, INVALID_SREG);
} else {
reg_arg_high = AllocTemp();
int offset_high = offset + sizeof(uint32_t);
@@ -65,14 +68,13 @@
}
// If the low part is not in a register yet, we need to load it.
- if (reg_arg_low == INVALID_REG) {
+ if (!reg_arg_low.Valid()) {
reg_arg_low = AllocTemp();
LoadWordDisp(TargetReg(kSp), offset, reg_arg_low);
}
if (wide) {
- // TODO: replace w/ RegStorage.
- return ENCODE_REG_PAIR(reg_arg_low, reg_arg_high);
+ return RegStorage::MakeRegPair(reg_arg_low, reg_arg_high);
} else {
return reg_arg_low;
}
@@ -90,27 +92,27 @@
}
if (!rl_dest.wide) {
- int reg = GetArgMappingToPhysicalReg(in_position);
- if (reg != INVALID_REG) {
- OpRegCopy(rl_dest.reg.GetReg(), reg);
+ RegStorage reg = GetArgMappingToPhysicalReg(in_position);
+ if (reg.Valid()) {
+ OpRegCopy(rl_dest.reg, reg);
} else {
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg);
}
} else {
- int reg_arg_low = GetArgMappingToPhysicalReg(in_position);
- int reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
+ RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
+ RegStorage reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
- if (reg_arg_low != INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), reg_arg_low, reg_arg_high);
- } else if (reg_arg_low != INVALID_REG && reg_arg_high == INVALID_REG) {
- OpRegCopy(rl_dest.reg.GetReg(), reg_arg_low);
+ if (reg_arg_low.Valid() && reg_arg_high.Valid()) {
+ OpRegCopyWide(rl_dest.reg, RegStorage::MakeRegPair(reg_arg_low, reg_arg_high));
+ } else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
+ OpRegCopy(rl_dest.reg, reg_arg_low);
int offset_high = offset + sizeof(uint32_t);
- LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHighReg());
- } else if (reg_arg_low == INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopy(rl_dest.reg.GetHighReg(), reg_arg_high);
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
+ LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHigh());
+ } else if (!reg_arg_low.Valid() && reg_arg_high.Valid()) {
+ OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
} else {
- LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg, INVALID_SREG);
}
}
}
@@ -131,14 +133,17 @@
GenPrintLabel(mir);
LockArg(data.object_arg);
RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
- int reg_obj = LoadArg(data.object_arg);
+ RegStorage reg_obj = LoadArg(data.object_arg);
if (wide) {
- LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg, INVALID_SREG);
} else {
- LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg.GetReg(), kWord, INVALID_SREG);
+ LoadWordDisp(reg_obj, data.field_offset, rl_dest.reg);
}
if (data.is_volatile) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
}
return true;
}
@@ -157,20 +162,20 @@
GenPrintLabel(mir);
LockArg(data.object_arg);
LockArg(data.src_arg, wide);
- int reg_obj = LoadArg(data.object_arg);
- int reg_src = LoadArg(data.src_arg, wide);
+ RegStorage reg_obj = LoadArg(data.object_arg);
+ RegStorage reg_src = LoadArg(data.src_arg, wide);
if (data.is_volatile) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
if (wide) {
- int low_reg, high_reg;
- DECODE_REG_PAIR(reg_src, low_reg, high_reg);
- StoreBaseDispWide(reg_obj, data.field_offset, low_reg, high_reg);
+ StoreBaseDispWide(reg_obj, data.field_offset, reg_src);
} else {
StoreBaseDisp(reg_obj, data.field_offset, reg_src, kWord);
}
if (data.is_volatile) {
- GenMemBarrier(kLoadLoad);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
}
if (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT)) {
MarkGCCard(reg_src, reg_obj);
@@ -211,7 +216,7 @@
successful = true;
RegLocation rl_dest = GetReturn(cu_->shorty[0] == 'F');
GenPrintLabel(mir);
- LoadConstant(rl_dest.reg.GetReg(), static_cast<int>(special.d.data));
+ LoadConstant(rl_dest.reg, static_cast<int>(special.d.data));
return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
break;
}
@@ -379,19 +384,19 @@
case Instruction::CONST_4:
case Instruction::CONST_16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.reg.GetReg(), vB);
+ LoadConstantNoClobber(rl_result.reg, vB);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.reg.GetReg());
+ Workaround7250540(rl_dest, rl_result.reg);
}
break;
case Instruction::CONST_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.reg.GetReg(), vB << 16);
+ LoadConstantNoClobber(rl_result.reg, vB << 16);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.reg.GetReg());
+ Workaround7250540(rl_dest, rl_result.reg);
}
break;
@@ -406,8 +411,7 @@
case Instruction::CONST_WIDE_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
- static_cast<int64_t>(vB) << 48);
+ LoadConstantWide(rl_result.reg, static_cast<int64_t>(vB) << 48);
StoreValueWide(rl_dest, rl_result);
break;
@@ -439,9 +443,9 @@
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kCoreReg);
- GenNullCheck(rl_src[0].reg.GetReg(), opt_flags);
+ GenNullCheck(rl_src[0].reg, opt_flags);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_src[0].reg.GetReg(), len_offset, rl_result.reg.GetReg());
+ LoadWordDisp(rl_src[0].reg, len_offset, rl_result.reg);
StoreValue(rl_dest, rl_result);
break;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 5a1f6cd..68c3d0f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -56,6 +56,7 @@
#define NO_OPERAND (1ULL << kNoOperand)
#define REG_DEF0 (1ULL << kRegDef0)
#define REG_DEF1 (1ULL << kRegDef1)
+#define REG_DEF2 (1ULL << kRegDef2)
#define REG_DEFA (1ULL << kRegDefA)
#define REG_DEFD (1ULL << kRegDefD)
#define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0)
@@ -439,9 +440,11 @@
LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
+ // Handle bookkeeping to convert a wide RegLocation to a narow RegLocation. No code generated.
+ RegLocation NarrowRegLoc(RegLocation loc);
// Shared by all targets - implemented in local_optimizations.cc
- void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src);
+ void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src);
void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
@@ -460,28 +463,36 @@
void Clobber(int reg) {
ClobberBody(GetRegInfo(reg));
}
+ void Clobber(RegStorage reg);
void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg);
void ClobberSReg(int s_reg);
int SRegToPMap(int s_reg);
- void RecordCorePromotion(int reg, int s_reg);
- int AllocPreservedCoreReg(int s_reg);
- void RecordFpPromotion(int reg, int s_reg);
- int AllocPreservedSingle(int s_reg);
- int AllocPreservedDouble(int s_reg);
- int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, bool required);
- virtual int AllocTempDouble();
- int AllocFreeTemp();
- int AllocTemp();
- int AllocTempFloat();
+ void RecordCorePromotion(RegStorage reg, int s_reg);
+ RegStorage AllocPreservedCoreReg(int s_reg);
+ void RecordFpPromotion(RegStorage reg, int s_reg);
+ RegStorage AllocPreservedSingle(int s_reg);
+ RegStorage AllocPreservedDouble(int s_reg);
+ RegStorage AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, bool required);
+ virtual RegStorage AllocTempDouble();
+ RegStorage AllocFreeTemp();
+ RegStorage AllocTemp();
+ RegStorage AllocTempFloat();
RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg);
RegisterInfo* AllocLive(int s_reg, int reg_class);
void FreeTemp(int reg);
+ void FreeTemp(RegStorage reg);
RegisterInfo* IsLive(int reg);
+ RegisterInfo* IsLive(RegStorage reg);
RegisterInfo* IsTemp(int reg);
+ RegisterInfo* IsTemp(RegStorage reg);
RegisterInfo* IsPromoted(int reg);
+ RegisterInfo* IsPromoted(RegStorage reg);
bool IsDirty(int reg);
+ bool IsDirty(RegStorage reg);
void LockTemp(int reg);
+ void LockTemp(RegStorage reg);
void ResetDef(int reg);
+ void ResetDef(RegStorage reg);
void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2);
void MarkDef(RegLocation rl, LIR *start, LIR *finish);
void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
@@ -493,15 +504,19 @@
void FlushSpecificReg(RegisterInfo* info);
void FlushAllRegsBody(RegisterInfo* info, int num_regs);
void FlushAllRegs();
- bool RegClassMatches(int reg_class, int reg);
- void MarkLive(int reg, int s_reg);
+ bool RegClassMatches(int reg_class, RegStorage reg);
+ void MarkLive(RegStorage reg, int s_reg);
void MarkTemp(int reg);
+ void MarkTemp(RegStorage reg);
void UnmarkTemp(int reg);
+ void UnmarkTemp(RegStorage reg);
void MarkPair(int low_reg, int high_reg);
void MarkClean(RegLocation loc);
void MarkDirty(RegLocation loc);
void MarkInUse(int reg);
+ void MarkInUse(RegStorage reg);
void CopyRegInfo(int new_reg, int old_reg);
+ void CopyRegInfo(RegStorage new_reg, RegStorage old_reg);
bool CheckCorePoolSanity();
RegLocation UpdateLoc(RegLocation loc);
virtual RegLocation UpdateLocWide(RegLocation loc);
@@ -545,14 +560,12 @@
void HandleSlowPaths();
void GenBarrier();
LIR* GenCheck(ConditionCode c_code, ThrowKind kind);
- LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val,
- ThrowKind kind);
- LIR* GenNullCheck(int m_reg, int opt_flags);
void MarkPossibleNullPointerException(int opt_flags);
void MarkPossibleStackOverflowException();
- void ForceImplicitNullCheck(int reg, int opt_flags);
- LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
- ThrowKind kind);
+ void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
+ LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind);
+ LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
+ LIR* GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2, ThrowKind kind);
void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
RegLocation rl_src2, LIR* taken, LIR* fall_through);
void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
@@ -578,10 +591,8 @@
void GenConstString(uint32_t string_idx, RegLocation rl_dest);
void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
void GenThrow(RegLocation rl_src);
- void GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src);
- void GenCheckCast(uint32_t insn_idx, uint32_t type_idx,
- RegLocation rl_src);
+ void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
+ void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
@@ -601,10 +612,11 @@
RegLocation rl_src1, RegLocation rl_src2);
// Shared by all targets - implemented in gen_invoke.cc.
- int CallHelperSetup(ThreadOffset helper_offset);
- LIR* CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc, bool use_link = true);
+ LIR* CallHelper(RegStorage r_tgt, ThreadOffset helper_offset, bool safepoint_pc,
+ bool use_link = true);
+ RegStorage CallHelperSetup(ThreadOffset helper_offset);
void CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
- void CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
+ void CallRuntimeHelperReg(ThreadOffset helper_offset, RegStorage arg0, bool safepoint_pc);
void CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
bool safepoint_pc);
void CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
@@ -613,21 +625,21 @@
RegLocation arg1, bool safepoint_pc);
void CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0,
int arg1, bool safepoint_pc);
- void CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
+ void CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, RegStorage arg1,
bool safepoint_pc);
- void CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
+ void CallRuntimeHelperRegImm(ThreadOffset helper_offset, RegStorage arg0, int arg1,
bool safepoint_pc);
void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0,
bool safepoint_pc);
- void CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
- void CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0,
+ void CallRuntimeHelperRegMethod(ThreadOffset helper_offset, RegStorage arg0, bool safepoint_pc);
+ void CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, RegStorage arg0,
RegLocation arg2, bool safepoint_pc);
void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset,
RegLocation arg0, RegLocation arg1,
bool safepoint_pc);
- void CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
+ void CallRuntimeHelperRegReg(ThreadOffset helper_offset, RegStorage arg0, RegStorage arg1,
bool safepoint_pc);
- void CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
+ void CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, RegStorage arg0, RegStorage arg1,
int arg2, bool safepoint_pc);
void CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, int arg0,
RegLocation arg2, bool safepoint_pc);
@@ -697,16 +709,16 @@
// Shared by all targets - implemented in gen_loadstore.cc.
RegLocation LoadCurrMethod();
- void LoadCurrMethodDirect(int r_tgt);
- LIR* LoadConstant(int r_dest, int value);
- LIR* LoadWordDisp(int rBase, int displacement, int r_dest);
+ void LoadCurrMethodDirect(RegStorage r_tgt);
+ LIR* LoadConstant(RegStorage r_dest, int value);
+ LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest);
RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
- void LoadValueDirect(RegLocation rl_src, int r_dest);
- void LoadValueDirectFixed(RegLocation rl_src, int r_dest);
- void LoadValueDirectWide(RegLocation rl_src, int reg_lo, int reg_hi);
- void LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, int reg_hi);
- LIR* StoreWordDisp(int rBase, int displacement, int r_src);
+ void LoadValueDirect(RegLocation rl_src, RegStorage r_dest);
+ void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest);
+ void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest);
+ void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
+ LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src);
/**
* @brief Used to do the final store in the destination as per bytecode semantics.
@@ -793,49 +805,59 @@
* @param check_value The immediate to compare to.
* @returns The branch instruction that was generated.
*/
- virtual LIR* OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg,
+ virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target);
// Required for target - codegen helpers.
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
- virtual int LoadHelper(ThreadOffset offset) = 0;
+ virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
virtual LIR* CheckSuspendUsingLoad() = 0;
- virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
- virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+ virtual RegStorage LoadHelper(ThreadOffset offset) = 0;
+ virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ int s_reg) = 0;
+ virtual LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
int s_reg) = 0;
- virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size) = 0;
- virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_dest, int r_dest_hi, OpSize size, int s_reg) = 0;
- virtual LIR* LoadConstantNoClobber(int r_dest, int value) = 0;
- virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) = 0;
- virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size) = 0;
- virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi) = 0;
- virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size) = 0;
- virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg) = 0;
- virtual void MarkGCCard(int val_reg, int tgt_addr_reg) = 0;
+ virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+ int scale, OpSize size) = 0;
+ virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_dest, RegStorage r_dest_hi,
+ OpSize size, int s_reg) = 0;
+ virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
+ virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
+ virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) = 0;
+ virtual LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) = 0;
+ virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
+ int scale, OpSize size) = 0;
+ virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_src, RegStorage r_src_hi,
+ OpSize size, int s_reg) = 0;
+ virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
// Required for target - register utilities.
virtual bool IsFpReg(int reg) = 0;
+ virtual bool IsFpReg(RegStorage reg) = 0;
virtual bool SameRegType(int reg1, int reg2) = 0;
- virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0;
+ virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class) = 0;
virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class) = 0;
+ // TODO: elminate S2d.
virtual int S2d(int low_reg, int high_reg) = 0;
- virtual int TargetReg(SpecialTargetRegister reg) = 0;
- virtual int GetArgMappingToPhysicalReg(int arg_num) = 0;
+ virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0;
+ virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0;
virtual RegLocation GetReturnAlt() = 0;
virtual RegLocation GetReturnWideAlt() = 0;
virtual RegLocation LocCReturn() = 0;
virtual RegLocation LocCReturnDouble() = 0;
virtual RegLocation LocCReturnFloat() = 0;
virtual RegLocation LocCReturnWide() = 0;
+ // TODO: use to reduce/eliminate xx_FPREG() macro use.
virtual uint32_t FpRegMask() = 0;
virtual uint64_t GetRegMaskCommon(int reg) = 0;
virtual void AdjustSpillMask() = 0;
virtual void ClobberCallerSave() = 0;
- virtual void FlushReg(int reg) = 0;
- virtual void FlushRegWide(int reg1, int reg2) = 0;
+ virtual void FlushReg(RegStorage reg) = 0;
+ virtual void FlushRegWide(RegStorage reg) = 0;
virtual void FreeCallTemps() = 0;
virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0;
virtual void LockCallTemps() = 0;
@@ -892,20 +914,17 @@
virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
- virtual void GenOrLong(Instruction::Code,
- RegLocation rl_dest, RegLocation rl_src1,
+ virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
- virtual void GenSubLong(Instruction::Code,
- RegLocation rl_dest, RegLocation rl_src1,
+ virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
- virtual void GenXorLong(Instruction::Code,
- RegLocation rl_dest, RegLocation rl_src1,
+ virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
- virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base,
+ virtual LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
int offset, ThrowKind kind) = 0;
- virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi,
+ virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) = 0;
- virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit,
+ virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
bool is_div) = 0;
/*
* @brief Generate an integer div or rem operation by a literal.
@@ -924,10 +943,9 @@
* @param lit Divisor.
* @param is_div 'true' if this is a division, 'false' for a remainder.
*/
- virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1,
- int lit, bool is_div) = 0;
- virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) = 0;
+ virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) = 0;
+ virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0;
/**
* @brief Used for generating code that throws ArithmeticException if both registers are zero.
@@ -935,15 +953,12 @@
* @param reg_lo The register holding the lower 32-bits.
* @param reg_hi The register holding the upper 32-bits.
*/
- virtual void GenDivZeroCheck(int reg_lo, int reg_hi) = 0;
+ virtual void GenDivZeroCheck(RegStorage reg) = 0;
- virtual void GenEntrySequence(RegLocation* ArgLocs,
- RegLocation rl_method) = 0;
+ virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0;
virtual void GenExitSequence() = 0;
- virtual void GenFillArrayData(DexOffset table_offset,
- RegLocation rl_src) = 0;
- virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
- bool is_double) = 0;
+ virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0;
+ virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0;
virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;
/**
@@ -953,42 +968,48 @@
*/
virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;
+ /**
+ * @brief Used to generate a memory barrier in an architecture specific way.
+ * @details The last generated LIR will be considered for use as barrier. Namely,
+ * if the last LIR can be updated in a way where it will serve the semantics of
+ * barrier, then it will be used as such. Otherwise, a new LIR will be generated
+ * that can keep the semantics.
+ * @param barrier_kind The kind of memory barrier to generate.
+ */
virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0;
+
virtual void GenMoveException(RegLocation rl_dest) = 0;
- virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit, int first_bit,
- int second_bit) = 0;
+ virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit) = 0;
virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
- virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset,
- RegLocation rl_src) = 0;
- virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset,
- RegLocation rl_src) = 0;
+ virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
+ virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale,
bool card_mark) = 0;
- virtual void GenShiftImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_shift) = 0;
+ virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift) = 0;
// Required for target - single operation generators.
virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
- virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) = 0;
- virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target) = 0;
+ virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0;
+ virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
+ LIR* target) = 0;
virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
- virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) = 0;
- virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0;
+ virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0;
+ virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
- virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0;
- virtual LIR* OpPcRelLoad(int reg, LIR* target) = 0;
- virtual LIR* OpReg(OpKind op, int r_dest_src) = 0;
- virtual LIR* OpRegCopy(int r_dest, int r_src) = 0;
- virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src) = 0;
- virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value) = 0;
- virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0;
- virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0;
+ virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0;
+ virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0;
+ virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0;
+ virtual LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
+ virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
+ virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0;
+ virtual LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) = 0;
+ virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0;
/**
* @brief Used to generate an LIR that does a load from mem to reg.
@@ -998,7 +1019,8 @@
* @param move_type Specification on the move desired (size, alignment, register kind).
* @return Returns the generate move LIR.
*/
- virtual LIR* OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type) = 0;
+ virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) = 0;
/**
* @brief Used to generate an LIR that does a store from reg to mem.
@@ -1009,7 +1031,8 @@
* @param is_aligned Whether the memory location is known to be aligned.
* @return Returns the generate move LIR.
*/
- virtual LIR* OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type) = 0;
+ virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
+ MoveType move_type) = 0;
/**
* @brief Used for generating a conditional register to register operation.
@@ -1019,16 +1042,18 @@
* @param r_src The source physical register.
* @return Returns the newly created LIR or null in case of creation failure.
*/
- virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) = 0;
+ virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0;
- virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0;
- virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) = 0;
+ virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0;
+ virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
+ RegStorage r_src2) = 0;
virtual LIR* OpTestSuspend(LIR* target) = 0;
virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0;
- virtual LIR* OpVldm(int rBase, int count) = 0;
- virtual LIR* OpVstm(int rBase, int count) = 0;
- virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset) = 0;
- virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi) = 0;
+ virtual LIR* OpVldm(RegStorage r_base, int count) = 0;
+ virtual LIR* OpVstm(RegStorage r_base, int count) = 0;
+ virtual void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale,
+ int offset) = 0;
+ virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0;
virtual void OpTlsCmp(ThreadOffset offset, int val) = 0;
virtual bool InexpensiveConstantInt(int32_t value) = 0;
virtual bool InexpensiveConstantFloat(int32_t value) = 0;
@@ -1040,7 +1065,7 @@
virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
// Temp workaround
- void Workaround7250540(RegLocation rl_dest, int value);
+ void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg);
protected:
Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
@@ -1153,7 +1178,7 @@
* @param wide Whether the argument is 64-bit or not.
* @return Returns the register (or register pair) for the loaded argument.
*/
- int LoadArg(int in_position, bool wide = false);
+ RegStorage LoadArg(int in_position, bool wide = false);
/**
* @brief Used to load a VR argument directly to a specified register location.
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 3cb6fd0..3c49756 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -81,6 +81,15 @@
DumpRegPool(reg_pool_->FPRegs, reg_pool_->num_fp_regs);
}
+void Mir2Lir::Clobber(RegStorage reg) {
+ if (reg.IsPair()) {
+ ClobberBody(GetRegInfo(reg.GetLowReg()));
+ ClobberBody(GetRegInfo(reg.GetHighReg()));
+ } else {
+ ClobberBody(GetRegInfo(reg.GetReg()));
+ }
+}
+
void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) {
for (int i = 0; i< num_regs; i++) {
if (p[i].s_reg == s_reg) {
@@ -144,25 +153,26 @@
}
}
-void Mir2Lir::RecordCorePromotion(int reg, int s_reg) {
+void Mir2Lir::RecordCorePromotion(RegStorage reg, int s_reg) {
int p_map_idx = SRegToPMap(s_reg);
int v_reg = mir_graph_->SRegToVReg(s_reg);
- GetRegInfo(reg)->in_use = true;
- core_spill_mask_ |= (1 << reg);
+ int reg_num = reg.GetReg();
+ GetRegInfo(reg_num)->in_use = true;
+ core_spill_mask_ |= (1 << reg_num);
// Include reg for later sort
- core_vmap_table_.push_back(reg << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
+ core_vmap_table_.push_back(reg_num << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
num_core_spills_++;
promotion_map_[p_map_idx].core_location = kLocPhysReg;
- promotion_map_[p_map_idx].core_reg = reg;
+ promotion_map_[p_map_idx].core_reg = reg_num;
}
/* Reserve a callee-save register. Return -1 if none available */
-int Mir2Lir::AllocPreservedCoreReg(int s_reg) {
- int res = -1;
+RegStorage Mir2Lir::AllocPreservedCoreReg(int s_reg) {
+ RegStorage res;
RegisterInfo* core_regs = reg_pool_->core_regs;
for (int i = 0; i < reg_pool_->num_core_regs; i++) {
if (!core_regs[i].is_temp && !core_regs[i].in_use) {
- res = core_regs[i].reg;
+ res = RegStorage::Solo32(core_regs[i].reg);
RecordCorePromotion(res, s_reg);
break;
}
@@ -170,22 +180,23 @@
return res;
}
-void Mir2Lir::RecordFpPromotion(int reg, int s_reg) {
+void Mir2Lir::RecordFpPromotion(RegStorage reg, int s_reg) {
int p_map_idx = SRegToPMap(s_reg);
int v_reg = mir_graph_->SRegToVReg(s_reg);
- GetRegInfo(reg)->in_use = true;
- MarkPreservedSingle(v_reg, reg);
+ int reg_num = reg.GetReg();
+ GetRegInfo(reg_num)->in_use = true;
+ MarkPreservedSingle(v_reg, reg_num);
promotion_map_[p_map_idx].fp_location = kLocPhysReg;
- promotion_map_[p_map_idx].FpReg = reg;
+ promotion_map_[p_map_idx].FpReg = reg_num;
}
// Reserve a callee-save fp single register.
-int Mir2Lir::AllocPreservedSingle(int s_reg) {
- int res = -1; // Return code if none available.
+RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
+ RegStorage res;
RegisterInfo* FPRegs = reg_pool_->FPRegs;
for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
if (!FPRegs[i].is_temp && !FPRegs[i].in_use) {
- res = FPRegs[i].reg;
+ res = RegStorage::Solo32(FPRegs[i].reg);
RecordFpPromotion(res, s_reg);
break;
}
@@ -201,8 +212,9 @@
* allocate if we can't meet the requirements for the pair of
* s_reg<=sX[even] & (s_reg+1)<= sX+1.
*/
-int Mir2Lir::AllocPreservedDouble(int s_reg) {
- int res = -1; // Assume failure
+// TODO: needs rewrite to support non-backed 64-bit float regs.
+RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
+ RegStorage res;
int v_reg = mir_graph_->SRegToVReg(s_reg);
int p_map_idx = SRegToPMap(s_reg);
if (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg) {
@@ -210,19 +222,19 @@
int high_reg = promotion_map_[p_map_idx+1].FpReg;
if ((high_reg & 1) == 0) {
// High reg is even - fail.
- return res;
+ return res; // Invalid.
}
// Is the low reg of the pair free?
RegisterInfo* p = GetRegInfo(high_reg-1);
if (p->in_use || p->is_temp) {
// Already allocated or not preserved - fail.
- return res;
+ return res; // Invalid.
}
// OK - good to go.
- res = p->reg;
+ res = RegStorage(RegStorage::k64BitPair, p->reg, p->reg + 1);
p->in_use = true;
- DCHECK_EQ((res & 1), 0);
- MarkPreservedSingle(v_reg, res);
+ DCHECK_EQ((res.GetReg() & 1), 0);
+ MarkPreservedSingle(v_reg, res.GetReg());
} else {
RegisterInfo* FPRegs = reg_pool_->FPRegs;
for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
@@ -231,27 +243,27 @@
!FPRegs[i+1].is_temp && !FPRegs[i+1].in_use &&
((FPRegs[i+1].reg & 0x1) == 0x1) &&
(FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
- res = FPRegs[i].reg;
+ res = RegStorage(RegStorage::k64BitPair, FPRegs[i].reg, FPRegs[i].reg+1);
FPRegs[i].in_use = true;
- MarkPreservedSingle(v_reg, res);
+ MarkPreservedSingle(v_reg, res.GetLowReg());
FPRegs[i+1].in_use = true;
- DCHECK_EQ(res + 1, FPRegs[i+1].reg);
- MarkPreservedSingle(v_reg+1, res+1);
+ DCHECK_EQ(res.GetLowReg() + 1, FPRegs[i+1].reg);
+ MarkPreservedSingle(v_reg+1, res.GetLowReg() + 1);
break;
}
}
}
- if (res != -1) {
+ if (res.Valid()) {
promotion_map_[p_map_idx].fp_location = kLocPhysReg;
- promotion_map_[p_map_idx].FpReg = res;
+ promotion_map_[p_map_idx].FpReg = res.GetLowReg();
promotion_map_[p_map_idx+1].fp_location = kLocPhysReg;
- promotion_map_[p_map_idx+1].FpReg = res + 1;
+ promotion_map_[p_map_idx+1].FpReg = res.GetLowReg() + 1;
}
return res;
}
-int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
- bool required) {
+RegStorage Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
+ bool required) {
int next = *next_temp;
for (int i = 0; i< num_regs; i++) {
if (next >= num_regs)
@@ -261,7 +273,7 @@
p[next].in_use = true;
p[next].pair = false;
*next_temp = next + 1;
- return p[next].reg;
+ return RegStorage::Solo32(p[next].reg);
}
next++;
}
@@ -274,7 +286,7 @@
p[next].in_use = true;
p[next].pair = false;
*next_temp = next + 1;
- return p[next].reg;
+ return RegStorage::Solo32(p[next].reg);
}
next++;
}
@@ -284,11 +296,12 @@
reg_pool_->num_core_regs);
LOG(FATAL) << "No free temp registers";
}
- return -1; // No register available
+ return RegStorage::InvalidReg(); // No register available
}
// REDO: too many assumptions.
-int Mir2Lir::AllocTempDouble() {
+// Virtualize - this is target dependent.
+RegStorage Mir2Lir::AllocTempDouble() {
RegisterInfo* p = reg_pool_->FPRegs;
int num_regs = reg_pool_->num_fp_regs;
/* Start looking at an even reg */
@@ -310,7 +323,8 @@
if (reg_pool_->next_fp_reg >= num_regs) {
reg_pool_->next_fp_reg = 0;
}
- return p[next].reg;
+ // FIXME: should return k64BitSolo.
+ return RegStorage(RegStorage::k64BitPair, p[next].reg, p[next+1].reg);
}
next += 2;
}
@@ -332,28 +346,28 @@
if (reg_pool_->next_fp_reg >= num_regs) {
reg_pool_->next_fp_reg = 0;
}
- return p[next].reg;
+ return RegStorage(RegStorage::k64BitPair, p[next].reg, p[next+1].reg);
}
next += 2;
}
LOG(FATAL) << "No free temp registers (pair)";
- return -1;
+ return RegStorage::InvalidReg();
}
/* Return a temp if one is available, -1 otherwise */
-int Mir2Lir::AllocFreeTemp() {
+RegStorage Mir2Lir::AllocFreeTemp() {
return AllocTempBody(reg_pool_->core_regs,
reg_pool_->num_core_regs,
®_pool_->next_core_reg, false);
}
-int Mir2Lir::AllocTemp() {
+RegStorage Mir2Lir::AllocTemp() {
return AllocTempBody(reg_pool_->core_regs,
reg_pool_->num_core_regs,
®_pool_->next_core_reg, true);
}
-int Mir2Lir::AllocTempFloat() {
+RegStorage Mir2Lir::AllocTempFloat() {
return AllocTempBody(reg_pool_->FPRegs,
reg_pool_->num_fp_regs,
®_pool_->next_fp_reg, true);
@@ -403,26 +417,71 @@
p->pair = false;
}
+void Mir2Lir::FreeTemp(RegStorage reg) {
+ if (reg.IsPair()) {
+ FreeTemp(reg.GetLowReg());
+ FreeTemp(reg.GetHighReg());
+ } else {
+ FreeTemp(reg.GetReg());
+ }
+}
+
Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) {
RegisterInfo* p = GetRegInfo(reg);
return p->live ? p : NULL;
}
+Mir2Lir::RegisterInfo* Mir2Lir::IsLive(RegStorage reg) {
+ if (reg.IsPair()) {
+ DCHECK_EQ(IsLive(reg.GetLowReg()) == nullptr, IsLive(reg.GetHighReg()) == nullptr);
+ return IsLive(reg.GetLowReg());
+ } else {
+ return IsLive(reg.GetReg());
+ }
+}
+
Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) {
RegisterInfo* p = GetRegInfo(reg);
return (p->is_temp) ? p : NULL;
}
+Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(RegStorage reg) {
+ if (reg.IsPair()) {
+ DCHECK_EQ(IsTemp(reg.GetLowReg()) == nullptr, IsTemp(reg.GetHighReg()) == nullptr);
+ return IsTemp(reg.GetLowReg());
+ } else {
+ return IsTemp(reg.GetReg());
+ }
+}
+
Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg) {
RegisterInfo* p = GetRegInfo(reg);
return (p->is_temp) ? NULL : p;
}
+Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(RegStorage reg) {
+ if (reg.IsPair()) {
+ DCHECK_EQ(IsPromoted(reg.GetLowReg()) == nullptr, IsPromoted(reg.GetHighReg()) == nullptr);
+ return IsPromoted(reg.GetLowReg());
+ } else {
+ return IsPromoted(reg.GetReg());
+ }
+}
+
bool Mir2Lir::IsDirty(int reg) {
RegisterInfo* p = GetRegInfo(reg);
return p->dirty;
}
+bool Mir2Lir::IsDirty(RegStorage reg) {
+ if (reg.IsPair()) {
+ DCHECK_EQ(IsDirty(reg.GetLowReg()), IsDirty(reg.GetHighReg()));
+ return IsDirty(reg.GetLowReg());
+ } else {
+ return IsDirty(reg.GetReg());
+ }
+}
+
/*
* Similar to AllocTemp(), but forces the allocation of a specific
* register. No check is made to see if the register was previously
@@ -435,10 +494,20 @@
p->live = false;
}
+void Mir2Lir::LockTemp(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ LockTemp(reg.GetReg());
+}
+
void Mir2Lir::ResetDef(int reg) {
ResetDefBody(GetRegInfo(reg));
}
+void Mir2Lir::ResetDef(RegStorage reg) {
+ DCHECK(!reg.IsPair()); // Is this done? If so, do on both low and high.
+ ResetDef(reg.GetReg());
+}
+
void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) {
if (start && finish) {
LIR *p;
@@ -474,7 +543,7 @@
DCHECK(rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
+ RegisterInfo* p = GetRegInfo(rl.reg.GetLowReg());
ResetDef(rl.reg.GetHighReg()); // Only track low of pair
p->def_start = start->next;
p->def_end = finish;
@@ -483,7 +552,7 @@
RegLocation Mir2Lir::WideToNarrow(RegLocation rl) {
DCHECK(rl.wide);
if (rl.location == kLocPhysReg) {
- RegisterInfo* info_lo = GetRegInfo(rl.reg.GetReg());
+ RegisterInfo* info_lo = GetRegInfo(rl.reg.GetLowReg());
RegisterInfo* info_hi = GetRegInfo(rl.reg.GetHighReg());
if (info_lo->is_temp) {
info_lo->pair = false;
@@ -495,6 +564,7 @@
info_hi->def_start = NULL;
info_hi->def_end = NULL;
}
+ rl.reg = RegStorage::Solo32(rl.reg.GetLowReg());
}
rl.wide = false;
return rl;
@@ -512,7 +582,7 @@
void Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
+ RegisterInfo* p_low = IsTemp(rl.reg.GetLowReg());
RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_low->pair);
@@ -521,7 +591,7 @@
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.reg.GetReg());
+ ResetDef(rl.reg.GetLowReg());
ResetDef(rl.reg.GetHighReg());
}
@@ -547,9 +617,9 @@
void Mir2Lir::FlushSpecificReg(RegisterInfo* info) {
if (info->pair) {
- FlushRegWide(info->reg, info->partner);
+ FlushRegWide(RegStorage(RegStorage::k64BitPair, info->reg, info->partner));
} else {
- FlushReg(info->reg);
+ FlushReg(RegStorage::Solo32(info->reg));
}
}
@@ -572,19 +642,21 @@
// TUNING: rewrite all of this reg stuff. Probably use an attribute table
-bool Mir2Lir::RegClassMatches(int reg_class, int reg) {
+bool Mir2Lir::RegClassMatches(int reg_class, RegStorage reg) {
+ int reg_num = reg.IsPair() ? reg.GetLowReg() : reg.GetReg();
if (reg_class == kAnyReg) {
return true;
} else if (reg_class == kCoreReg) {
- return !IsFpReg(reg);
+ return !IsFpReg(reg_num);
} else {
- return IsFpReg(reg);
+ return IsFpReg(reg_num);
}
}
-void Mir2Lir::MarkLive(int reg, int s_reg) {
- RegisterInfo* info = GetRegInfo(reg);
- if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) {
+void Mir2Lir::MarkLive(RegStorage reg, int s_reg) {
+ DCHECK(!reg.IsPair()); // Could be done - but would that be meaningful?
+ RegisterInfo* info = GetRegInfo(reg.GetReg());
+ if ((info->s_reg == s_reg) && info->live) {
return; /* already live */
} else if (s_reg != INVALID_SREG) {
ClobberSReg(s_reg);
@@ -605,12 +677,22 @@
info->is_temp = true;
}
+void Mir2Lir::MarkTemp(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ MarkTemp(reg.GetReg());
+}
+
void Mir2Lir::UnmarkTemp(int reg) {
RegisterInfo* info = GetRegInfo(reg);
tempreg_info_.Delete(info);
info->is_temp = false;
}
+void Mir2Lir::UnmarkTemp(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ UnmarkTemp(reg.GetReg());
+}
+
void Mir2Lir::MarkPair(int low_reg, int high_reg) {
DCHECK_NE(low_reg, high_reg);
RegisterInfo* info_lo = GetRegInfo(low_reg);
@@ -621,11 +703,14 @@
}
void Mir2Lir::MarkClean(RegLocation loc) {
- RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
- info->dirty = false;
if (loc.wide) {
+ RegisterInfo* info = GetRegInfo(loc.reg.GetLowReg());
+ info->dirty = false;
info = GetRegInfo(loc.reg.GetHighReg());
info->dirty = false;
+ } else {
+ RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
+ info->dirty = false;
}
}
@@ -634,11 +719,14 @@
// If already home, can't be dirty
return;
}
- RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
- info->dirty = true;
if (loc.wide) {
+ RegisterInfo* info = GetRegInfo(loc.reg.GetLowReg());
+ info->dirty = true;
info = GetRegInfo(loc.reg.GetHighReg());
info->dirty = true;
+ } else {
+ RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
+ info->dirty = true;
}
}
@@ -647,6 +735,15 @@
info->in_use = true;
}
+void Mir2Lir::MarkInUse(RegStorage reg) {
+ if (reg.IsPair()) {
+ MarkInUse(reg.GetLowReg());
+ MarkInUse(reg.GetHighReg());
+ } else {
+ MarkInUse(reg.GetReg());
+ }
+}
+
void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) {
RegisterInfo* new_info = GetRegInfo(new_reg);
RegisterInfo* old_info = GetRegInfo(old_reg);
@@ -658,6 +755,12 @@
new_info->reg = new_reg;
}
+void Mir2Lir::CopyRegInfo(RegStorage new_reg, RegStorage old_reg) {
+ DCHECK(!new_reg.IsPair());
+ DCHECK(!old_reg.IsPair());
+ CopyRegInfo(new_reg.GetReg(), old_reg.GetReg());
+}
+
bool Mir2Lir::CheckCorePoolSanity() {
for (static int i = 0; i < reg_pool_->num_core_regs; i++) {
if (reg_pool_->core_regs[i].pair) {
@@ -707,12 +810,11 @@
Clobber(info_lo->partner);
FreeTemp(info_lo->reg);
} else {
- loc.reg = RegStorage(RegStorage::k32BitSolo, info_lo->reg);
+ loc.reg = RegStorage::Solo32(info_lo->reg);
loc.location = kLocPhysReg;
}
}
}
-
return loc;
}
@@ -746,8 +848,8 @@
// Can reuse - update the register usage info
loc.location = kLocPhysReg;
loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -778,28 +880,23 @@
RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int32_t low_reg;
- int32_t high_reg;
loc = UpdateLocWide(loc);
/* If already in registers, we can assume proper form. Right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.reg.GetReg()), IsFpReg(loc.reg.GetHighReg()));
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ DCHECK_EQ(IsFpReg(loc.reg.GetLowReg()), IsFpReg(loc.reg.GetHighReg()));
+ DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
+ if (!RegClassMatches(reg_class, loc.reg)) {
/* Wrong register class. Reallocate and copy */
RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class);
- low_reg = new_regs.GetReg();
- high_reg = new_regs.GetHighReg();
- OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
- CopyRegInfo(low_reg, loc.reg.GetReg());
- CopyRegInfo(high_reg, loc.reg.GetHighReg());
- Clobber(loc.reg.GetReg());
- Clobber(loc.reg.GetHighReg());
+ OpRegCopyWide(new_regs, loc.reg);
+ CopyRegInfo(new_regs.GetLowReg(), loc.reg.GetLowReg());
+ CopyRegInfo(new_regs.GetHighReg(), loc.reg.GetHighReg());
+ Clobber(loc.reg);
loc.reg = new_regs;
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
}
return loc;
}
@@ -809,47 +906,44 @@
loc.reg = AllocTypedTempWide(loc.fp, reg_class);
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ MarkLive(loc.reg.GetLow(), loc.s_reg_low);
// Does this wide value live in two registers or one vector register?
- if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
- MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
+ if (loc.reg.GetLowReg() != loc.reg.GetHighReg()) {
+ MarkLive(loc.reg.GetHigh(), GetSRegHi(loc.s_reg_low));
}
}
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
return loc;
}
RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
- int new_reg;
-
if (loc.wide)
return EvalLocWide(loc, reg_class, update);
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ if (!RegClassMatches(reg_class, loc.reg)) {
/* Wrong register class. Realloc, copy and transfer ownership */
- new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.reg.GetReg());
- CopyRegInfo(new_reg, loc.reg.GetReg());
- Clobber(loc.reg.GetReg());
- loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
+ RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class);
+ OpRegCopy(new_reg, loc.reg);
+ CopyRegInfo(new_reg, loc.reg);
+ Clobber(loc.reg);
+ loc.reg = new_reg;
}
return loc;
}
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
- new_reg = AllocTypedTemp(loc.fp, reg_class);
- loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
+ loc.reg = AllocTypedTemp(loc.fp, reg_class);
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ MarkLive(loc.reg, loc.s_reg_low);
}
return loc;
}
@@ -972,8 +1066,8 @@
AllocPreservedDouble(low_sreg);
}
} else if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
- int reg = AllocPreservedSingle(FpRegs[i].s_reg);
- if (reg < 0) {
+ RegStorage reg = AllocPreservedSingle(FpRegs[i].s_reg);
+ if (!reg.Valid()) {
break; // No more left.
}
}
@@ -985,8 +1079,8 @@
int p_map_idx = SRegToPMap(core_regs[i].s_reg);
if (promotion_map_[p_map_idx].core_location !=
kLocPhysReg) {
- int reg = AllocPreservedCoreReg(core_regs[i].s_reg);
- if (reg < 0) {
+ RegStorage reg = AllocPreservedCoreReg(core_regs[i].s_reg);
+ if (!reg.Valid()) {
break; // No more left
}
}
@@ -1001,13 +1095,13 @@
if (curr->fp) {
if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].FpReg);
+ curr->reg = RegStorage::Solo32(promotion_map_[p_map_idx].FpReg);
curr->home = true;
}
} else {
if (promotion_map_[p_map_idx].core_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].core_reg);
+ curr->reg = RegStorage::Solo32(promotion_map_[p_map_idx].core_reg);
curr->home = true;
}
}
@@ -1060,13 +1154,13 @@
RegLocation gpr_res = LocCReturnWide();
RegLocation fpr_res = LocCReturnDouble();
RegLocation res = is_double ? fpr_res : gpr_res;
- Clobber(res.reg.GetReg());
+ Clobber(res.reg.GetLowReg());
Clobber(res.reg.GetHighReg());
- LockTemp(res.reg.GetReg());
+ LockTemp(res.reg.GetLowReg());
LockTemp(res.reg.GetHighReg());
// Does this wide value live in two registers or one vector register?
- if (res.reg.GetReg() != res.reg.GetHighReg()) {
- MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+ if (res.reg.GetLowReg() != res.reg.GetHighReg()) {
+ MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
}
return res;
}
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 9cafcee..e7a1a69 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -344,6 +344,7 @@
{ kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
{ kX86LockCmpxchg8bM, kMem, IS_STORE | IS_BINARY_OP | REG_USE0 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0 }, "Lock Cmpxchg8b", "[!0r+!1d]" },
{ kX86LockCmpxchg8bA, kArray, IS_STORE | IS_QUAD_OP | REG_USE01 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0 }, "Lock Cmpxchg8b", "[!0r+!1r<<!2d+!3d]" },
+ { kX86XchgMR, kMemReg, IS_STORE | IS_LOAD | IS_TERTIARY_OP | REG_DEF2 | REG_USE02, { 0, 0, 0x87, 0, 0, 0, 0, 0 }, "Xchg", "[!0r+!1d],!2r" },
EXT_0F_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0),
EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 68e2b6d..d97cf4d 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -40,8 +40,7 @@
int key = keys[i];
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.reg.GetReg(), key,
- &block_label_list_[case_block->id]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]);
}
}
@@ -82,37 +81,37 @@
// NewLIR0(kX86Bkpt);
// Materialize a pointer to the switch table
- int start_of_method_reg;
+ RegStorage start_of_method_reg;
if (base_of_code_ != nullptr) {
// We can use the saved value.
RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
rl_method = LoadValue(rl_method, kCoreReg);
- start_of_method_reg = rl_method.reg.GetReg();
+ start_of_method_reg = rl_method.reg;
store_method_addr_used_ = true;
} else {
start_of_method_reg = AllocTemp();
- NewLIR1(kX86StartOfMethod, start_of_method_reg);
+ NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
}
int low_key = s4FromSwitchData(&table[2]);
- int keyReg;
+ RegStorage keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.reg.GetReg();
+ keyReg = rl_src.reg;
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
LIR* branch_over = OpCondBranch(kCondHi, NULL);
// Load the displacement from the switch table
- int disp_reg = AllocTemp();
- NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2, WrapPointer(tab_rec));
+ RegStorage disp_reg = AllocTemp();
+ NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(), 2, WrapPointer(tab_rec));
// Add displacement to start of method
OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
// ..and go!
- LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg);
+ LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg());
tab_rec->anchor = switch_branch;
/* branch_over target here */
@@ -145,20 +144,20 @@
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
- LoadValueDirectFixed(rl_src, rX86_ARG0);
+ LoadValueDirectFixed(rl_src, rs_rX86_ARG0);
// Materialize a pointer to the fill data image
if (base_of_code_ != nullptr) {
// We can use the saved value.
RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
- LoadValueDirect(rl_method, rX86_ARG2);
+ LoadValueDirect(rl_method, rs_rX86_ARG2);
store_method_addr_used_ = true;
} else {
NewLIR1(kX86StartOfMethod, rX86_ARG2);
}
NewLIR2(kX86PcRelAdr, rX86_ARG1, WrapPointer(tab_rec));
NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0,
- rX86_ARG1, true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rs_rX86_ARG0,
+ rs_rX86_ARG1, true);
}
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
@@ -172,14 +171,13 @@
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) {
- int reg_card_base = AllocTemp();
- int reg_card_no = AllocTemp();
+void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+ RegStorage reg_card_base = AllocTemp();
+ RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
- NewLIR2(kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
+ NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), Thread::CardTableOffset().Int32Value());
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
- kUnsignedByte);
+ StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
branch_over->target = target;
FreeTemp(reg_card_base);
@@ -199,7 +197,7 @@
/* Build frame, return address already on stack */
// TODO: 64 bit.
- stack_decrement_ = OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
+ stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - 4);
/*
* We can safely skip the stack overflow check if we're
@@ -222,11 +220,12 @@
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel();
- m2l_->OpRegImm(kOpAdd, kX86RegSP, sp_displace_);
+ m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
m2l_->ClobberCallerSave();
ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
// Assumes codegen and target are in thumb2 mode.
- m2l_->CallHelper(0, func_offset, false /* MarkSafepointPC */, false /* UseLink */);
+ m2l_->CallHelper(RegStorage::InvalidReg(), func_offset, false /* MarkSafepointPC */,
+ false /* UseLink */);
}
private:
@@ -252,7 +251,7 @@
// We have been asked to save the address of the method start for later use.
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rX86_ARG0);
int displacement = SRegOffset(base_of_code_->s_reg_low);
- setup_method_address_[1] = StoreBaseDisp(rX86_SP, displacement, rX86_ARG0, kWord);
+ setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0, kWord);
}
FreeTemp(rX86_ARG0);
@@ -271,7 +270,7 @@
NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
/* Remove frame except for return address */
- stack_increment_ = OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
+ stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - 4);
NewLIR0(kX86Ret);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 275a2d9..4c495a1 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -22,39 +22,44 @@
namespace art {
-class X86Mir2Lir : public Mir2Lir {
+class X86Mir2Lir FINAL : public Mir2Lir {
public:
X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
- int LoadHelper(ThreadOffset offset);
+ RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
- LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
- int s_reg);
- LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
- LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_dest, int r_dest_hi, OpSize size, int s_reg);
- LIR* LoadConstantNoClobber(int r_dest, int value);
- LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
- LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
- LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
- LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
- LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg);
- void MarkGCCard(int val_reg, int tgt_addr_reg);
+ RegStorage LoadHelper(ThreadOffset offset);
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ int s_reg);
+ LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size);
+ // TODO: collapse r_dest, r_dest_hi
+ LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_dest, RegStorage r_dest_hi, OpSize size, int s_reg);
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size);
+ // TODO: collapse r_src, r_src_hi
+ LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_src, RegStorage r_src_hi, OpSize size, int s_reg);
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
bool IsFpReg(int reg);
+ bool IsFpReg(RegStorage reg);
bool SameRegType(int reg1, int reg2);
- // TODO: for consistency, make this return a RegStorage as well?
- int AllocTypedTemp(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
- int TargetReg(SpecialTargetRegister reg);
- int GetArgMappingToPhysicalReg(int arg_num);
+ RegStorage TargetReg(SpecialTargetRegister reg);
+ RegStorage GetArgMappingToPhysicalReg(int arg_num);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
@@ -65,8 +70,8 @@
uint64_t GetRegMaskCommon(int reg);
void AdjustSpillMask();
void ClobberCallerSave();
- void FlushReg(int reg);
- void FlushRegWide(int reg1, int reg2);
+ void FlushReg(RegStorage reg);
+ void FlushRegWide(RegStorage reg);
void FreeCallTemps();
void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
void LockCallTemps();
@@ -89,23 +94,26 @@
bool IsUnconditionalBranch(LIR* lir);
// Required for target - Dalvik-level generators.
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale);
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale);
void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
@@ -113,17 +121,21 @@
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
- ThrowKind kind);
- LIR* GenMemImmedCheck(ConditionCode c_code, int base, int offset, int check_value,
+ void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset,
+ ThrowKind kind);
+ LIR* GenMemImmedCheck(ConditionCode c_code, RegStorage base, int offset, int check_value,
ThrowKind kind);
- RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+ // TODO: collapse reg_lo, reg_hi
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheck(int reg_lo, int reg_hi);
+ void GenDivZeroCheck(RegStorage reg);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
void GenSpecialExitSequence();
@@ -133,8 +145,8 @@
void GenSelect(BasicBlock* bb, MIR* mir);
void GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
- int lit, int first_bit, int second_bit);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
@@ -154,8 +166,8 @@
* @param rl_src2 constant source operand
* @param op Opcode to be generated
*/
- void GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, Instruction::Code op);
+ void GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ Instruction::Code op);
/**
* @brief Generate a long arithmetic operation.
@@ -165,8 +177,8 @@
* @param op The DEX opcode for the operation.
* @param is_commutative The sources can be swapped if needed.
*/
- void GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, Instruction::Code op, bool is_commutative);
+ void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ Instruction::Code op, bool is_commutative);
/**
* @brief Generate a two operand long arithmetic operation.
@@ -191,8 +203,8 @@
* @param rl_dest Result to be set to 0 or 1.
* @param rl_src Object to be tested.
*/
- void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
- RegLocation rl_dest, RegLocation rl_src);
+ void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src);
/*
*
* @brief Implement Set up instanceof a class with x86 specific code.
@@ -208,38 +220,37 @@
void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
bool type_known_abstract, bool use_declaring_class,
bool can_assume_type_is_in_dex_cache,
- uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src);
+ uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
// Single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
- LIR* OpFpRegCopy(int r_dest, int r_src);
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpIT(ConditionCode cond, const char* guide);
- LIR* OpMem(OpKind op, int rBase, int disp);
- LIR* OpPcRelLoad(int reg, LIR* target);
- LIR* OpReg(OpKind op, int r_dest_src);
- LIR* OpRegCopy(int r_dest, int r_src);
- LIR* OpRegCopyNoInsert(int r_dest, int r_src);
- LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
- LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+ LIR* OpReg(OpKind op, RegStorage r_dest_src);
+ LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+ LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
+ LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
- LIR* OpRegMem(OpKind op, int r_dest, RegLocation value);
- LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
- LIR* OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src);
- LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
- LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
- LIR* OpVldm(int rBase, int count);
- LIR* OpVstm(int rBase, int count);
- void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
- void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+ LIR* OpVldm(RegStorage r_base, int count);
+ LIR* OpVstm(RegStorage r_base, int count);
+ void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
+ void OpRegCopyWide(RegStorage dest, RegStorage src);
void OpTlsCmp(ThreadOffset offset, int val);
void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset);
@@ -254,7 +265,7 @@
RegLocation UpdateLocWide(RegLocation loc);
RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
- int AllocTempDouble();
+ RegStorage AllocTempDouble();
void ResetDefLocWide(RegLocation rl);
/*
@@ -264,8 +275,8 @@
* @param rl_lhs Left hand operand.
* @param rl_rhs Right hand operand.
*/
- void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_lhs, RegLocation rl_rhs);
+ void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
+ RegLocation rl_rhs);
/*
* @brief Dump a RegLocation using printf
@@ -327,8 +338,7 @@
void EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg);
void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
- void EmitOpArray(const X86EncodingMap* entry, uint8_t base, uint8_t index,
- int scale, int disp);
+ void EmitOpArray(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp);
void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
void EmitMemImm(const X86EncodingMap* entry, uint8_t base, int disp, int32_t imm);
void EmitRegMem(const X86EncodingMap* entry, uint8_t reg, uint8_t base, int disp);
@@ -340,7 +350,8 @@
void EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2);
void EmitRegRegImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, int32_t imm);
void EmitRegRegImmRev(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, int32_t imm);
- void EmitRegMemImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t base, int disp, int32_t imm);
+ void EmitRegMemImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t base, int disp,
+ int32_t imm);
void EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
void EmitThreadImm(const X86EncodingMap* entry, int disp, int imm);
void EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
@@ -372,6 +383,8 @@
void OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg);
void GenConstWide(RegLocation rl_dest, int64_t value);
+ static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
+
/*
* @brief generate inline code for fast case of Strng.indexOf.
* @param info Call parameters
@@ -427,8 +440,8 @@
* @param is_div 'true' if this is a division, 'false' for a remainder.
* @param check_zero 'true' if an exception should be generated if the divisor is 0.
*/
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero);
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, bool check_zero);
/*
* @brief Generate an integer div or rem operation by a literal.
@@ -455,7 +468,7 @@
* @param src Source Register.
* @param val Constant multiplier.
*/
- void GenImulRegImm(int dest, int src, int val);
+ void GenImulRegImm(RegStorage dest, RegStorage src, int val);
/*
* Generate an imul of a memory location by a constant or a better sequence.
@@ -464,7 +477,7 @@
* @param displacement Displacement on stack of Symbolic Register.
* @param val Constant multiplier.
*/
- void GenImulMemImm(int dest, int sreg, int displacement, int val);
+ void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
/*
* @brief Compare memory to immediate, and branch if condition true.
@@ -475,7 +488,7 @@
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
*/
- LIR* OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg,
+ LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target);
/*
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index b745207..ec4d9db 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -63,15 +63,15 @@
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- int r_dest = rl_result.reg.GetReg();
- int r_src1 = rl_src1.reg.GetReg();
- int r_src2 = rl_src2.reg.GetReg();
+ RegStorage r_dest = rl_result.reg;
+ RegStorage r_src1 = rl_src1.reg;
+ RegStorage r_src2 = rl_src2.reg;
if (r_dest == r_src2) {
r_src2 = AllocTempFloat();
OpRegCopy(r_src2, r_dest);
}
OpRegCopy(r_dest, r_src1);
- NewLIR2(op, r_dest, r_src2);
+ NewLIR2(op, r_dest.GetReg(), r_src2.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -118,14 +118,15 @@
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- int r_dest = S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg());
- int r_src1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
- int r_src2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
+ // TODO: update with direct 64-bit reg.
+ int r_dest = S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg());
+ int r_src1 = S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ int r_src2 = S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg());
if (r_dest == r_src2) {
- r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
- OpRegCopy(r_src2, r_dest);
+ r_src2 = AllocTempDouble().GetLowReg() | X86_FP_DOUBLE;
+ OpRegCopy(RegStorage::Solo64(r_src2), RegStorage::Solo64(r_dest));
}
- OpRegCopy(r_dest, r_src1);
+ OpRegCopy(RegStorage::Solo64(r_dest), RegStorage::Solo64(r_src1));
NewLIR2(op, r_dest, r_src2);
StoreValueWide(rl_dest, rl_result);
}
@@ -140,7 +141,7 @@
// If the source is in physical register, then put it in its location on stack.
if (rl_src.location == kLocPhysReg) {
- RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetReg());
+ RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetLowReg());
if (lo_info != nullptr && lo_info->is_temp) {
// Calling FlushSpecificReg because it will only write back VR if it is dirty.
@@ -148,19 +149,19 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg);
}
}
// Push the source virtual register onto the x87 stack.
- LIR *fild64 = NewLIR2NoDest(kX86Fild64M, TargetReg(kSp), src_v_reg_offset + LOWORD_OFFSET);
+ LIR *fild64 = NewLIR2NoDest(kX86Fild64M, TargetReg(kSp).GetReg(), src_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
// Now pop off x87 stack and store it in the destination VR's stack location.
int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset;
- LIR *fstp = NewLIR2NoDest(opcode, TargetReg(kSp), displacement);
+ LIR *fstp = NewLIR2NoDest(opcode, TargetReg(kSp).GetReg(), displacement);
AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double);
/*
@@ -181,13 +182,13 @@
if (is_double) {
rl_result = EvalLocWide(rl_dest, kFPReg, true);
- LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, INVALID_SREG);
StoreFinalValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg());
+ LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
@@ -223,9 +224,9 @@
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int temp_reg = AllocTempFloat();
+ int temp_reg = AllocTempFloat().GetReg();
- LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
+ LoadConstant(rl_result.reg, 0x7fffffff);
NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.reg.GetReg());
NewLIR2(kX86ComissRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
@@ -241,13 +242,13 @@
}
case Instruction::DOUBLE_TO_INT: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = rl_src.reg.GetReg();
+ src_reg = rl_src.reg.GetLowReg();
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
+ int temp_reg = AllocTempDouble().GetLowReg() | X86_FP_DOUBLE;
- LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
+ LoadConstant(rl_result.reg, 0x7fffffff);
NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.reg.GetReg());
NewLIR2(kX86ComisdRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
@@ -278,14 +279,14 @@
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, rcSrc);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, rcSrc);
src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
@@ -307,14 +308,14 @@
src_reg2 = rl_src2.reg.GetReg();
} else {
rl_src1 = LoadValueWide(rl_src1, kFPReg);
- src_reg1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
+ src_reg1 = S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- src_reg2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
+ src_reg2 = S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg());
}
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstantNoClobber(rl_result.reg.GetReg(), unordered_gt ? 1 : 0);
+ LoadConstantNoClobber(rl_result.reg, unordered_gt ? 1 : 0);
if (single) {
NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
} else {
@@ -357,8 +358,8 @@
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
@@ -418,7 +419,7 @@
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -426,8 +427,8 @@
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
+ OpRegCopy(rl_result.reg, rl_src.reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -436,8 +437,8 @@
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+ NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
return true;
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 1fe0af9..5ef7060 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -26,10 +26,10 @@
/*
* Perform register memory operation.
*/
-LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code,
- int reg1, int base, int offset, ThrowKind kind) {
+LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
+ int offset, ThrowKind kind) {
LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
- current_dalvik_offset_, reg1, base, offset);
+ current_dalvik_offset_, reg1.GetReg(), base.GetReg(), offset);
OpRegMem(kOpCmp, reg1, base, offset);
LIR* branch = OpCondBranch(c_code, tgt);
// Remember branch target - will process later
@@ -40,11 +40,11 @@
/*
* Perform a compare of memory to immediate value
*/
-LIR* X86Mir2Lir::GenMemImmedCheck(ConditionCode c_code,
- int base, int offset, int check_value, ThrowKind kind) {
+LIR* X86Mir2Lir::GenMemImmedCheck(ConditionCode c_code, RegStorage base, int offset,
+ int check_value, ThrowKind kind) {
LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
- current_dalvik_offset_, base, check_value, 0);
- NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base, offset, check_value);
+ current_dalvik_offset_, base.GetReg(), check_value, 0);
+ NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base.GetReg(), offset, check_value);
LIR* branch = OpCondBranch(c_code, tgt);
// Remember branch target - will process later
throw_launchpads_.Insert(tgt);
@@ -61,18 +61,20 @@
RegLocation rl_src2) {
FlushAllRegs();
LockCallTemps(); // Prepare for explicit register usage
- LoadValueDirectWideFixed(rl_src1, r0, r1);
- LoadValueDirectWideFixed(rl_src2, r2, r3);
+ RegStorage r_tmp1(RegStorage::k64BitPair, r0, r1);
+ RegStorage r_tmp2(RegStorage::k64BitPair, r2, r3);
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
// Compute (r1:r0) = (r1:r0) - (r3:r2)
- OpRegReg(kOpSub, r0, r2); // r0 = r0 - r2
- OpRegReg(kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ OpRegReg(kOpSub, rs_r0, rs_r2); // r0 = r0 - r2
+ OpRegReg(kOpSbc, rs_r1, rs_r3); // r1 = r1 - r3 - CF
NewLIR2(kX86Set8R, r2, kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0
NewLIR2(kX86Movzx8RR, r2, r2);
- OpReg(kOpNeg, r2); // r2 = -r2
- OpRegReg(kOpOr, r0, r1); // r0 = high | low - sets ZF
+ OpReg(kOpNeg, rs_r2); // r2 = -r2
+ OpRegReg(kOpOr, rs_r0, rs_r1); // r0 = high | low - sets ZF
NewLIR2(kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0
NewLIR2(kX86Movzx8RR, r0, r0);
- OpRegReg(kOpOr, r0, r2); // r0 = r0 | r2
+ OpRegReg(kOpOr, rs_r0, rs_r2); // r0 = r0 | r2
RegLocation rl_result = LocCReturn();
StoreValue(rl_dest, rl_result);
}
@@ -101,9 +103,8 @@
return kX86CondO;
}
-LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
- LIR* target) {
- NewLIR2(kX86Cmp32RR, src1, src2);
+LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
+ NewLIR2(kX86Cmp32RR, src1.GetReg(), src2.GetReg());
X86ConditionCode cc = X86ConditionEncoding(cond);
LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
cc);
@@ -111,13 +112,13 @@
return branch;
}
-LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
+LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
int check_value, LIR* target) {
if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
// TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
- NewLIR2(kX86Test32RR, reg, reg);
+ NewLIR2(kX86Test32RR, reg.GetReg(), reg.GetReg());
} else {
- NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
+ NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg.GetReg(), check_value);
}
X86ConditionCode cc = X86ConditionEncoding(cond);
LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
@@ -125,61 +126,70 @@
return branch;
}
-LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
- if (X86_FPREG(r_dest) || X86_FPREG(r_src))
+LIR* X86Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
+ // If src or dest is a pair, we'll be using low reg.
+ if (r_dest.IsPair()) {
+ r_dest = r_dest.GetLow();
+ }
+ if (r_src.IsPair()) {
+ r_src = r_src.GetLow();
+ }
+ if (X86_FPREG(r_dest.GetReg()) || X86_FPREG(r_src.GetReg()))
return OpFpRegCopy(r_dest, r_src);
LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR,
- r_dest, r_src);
+ r_dest.GetReg(), r_src.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
return res;
}
-LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src) {
+LIR* X86Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
LIR *res = OpRegCopyNoInsert(r_dest, r_src);
AppendLIR(res);
return res;
}
-void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi,
- int src_lo, int src_hi) {
- bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
- bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
- assert(X86_FPREG(src_lo) == X86_FPREG(src_hi));
- assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
+void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+ // FIXME: handle k64BitSolo when we start using them.
+ DCHECK(r_dest.IsPair());
+ DCHECK(r_src.IsPair());
+ bool dest_fp = X86_FPREG(r_dest.GetLowReg());
+ bool src_fp = X86_FPREG(r_src.GetLowReg());
if (dest_fp) {
if (src_fp) {
- OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+ // TODO: we ought to handle this case here - reserve OpRegCopy for 32-bit copies.
+ OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
+ RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
} else {
// TODO: Prevent this from happening in the code. The result is often
// unused or could have been loaded more easily from memory.
- NewLIR2(kX86MovdxrRR, dest_lo, src_lo);
- dest_hi = AllocTempDouble();
- NewLIR2(kX86MovdxrRR, dest_hi, src_hi);
- NewLIR2(kX86PunpckldqRR, dest_lo, dest_hi);
- FreeTemp(dest_hi);
+ NewLIR2(kX86MovdxrRR, r_dest.GetLowReg(), r_src.GetLowReg());
+ RegStorage r_tmp = AllocTempDouble();
+ NewLIR2(kX86MovdxrRR, r_tmp.GetLowReg(), r_src.GetHighReg());
+ NewLIR2(kX86PunpckldqRR, r_dest.GetLowReg(), r_tmp.GetLowReg());
+ FreeTemp(r_tmp);
}
} else {
if (src_fp) {
- NewLIR2(kX86MovdrxRR, dest_lo, src_lo);
- NewLIR2(kX86PsrlqRI, src_lo, 32);
- NewLIR2(kX86MovdrxRR, dest_hi, src_lo);
+ NewLIR2(kX86MovdrxRR, r_dest.GetLowReg(), r_src.GetLowReg());
+ NewLIR2(kX86PsrlqRI, r_src.GetLowReg(), 32);
+ NewLIR2(kX86MovdrxRR, r_dest.GetHighReg(), r_src.GetLowReg());
} else {
// Handle overlap
- if (src_hi == dest_lo && src_lo == dest_hi) {
+ if (r_src.GetHighReg() == r_dest.GetLowReg() && r_src.GetLowReg() == r_dest.GetHighReg()) {
// Deal with cycles.
- int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, dest_hi);
- OpRegCopy(dest_hi, dest_lo);
- OpRegCopy(dest_lo, temp_reg);
+ RegStorage temp_reg = AllocTemp();
+ OpRegCopy(temp_reg, r_dest.GetHigh());
+ OpRegCopy(r_dest.GetHigh(), r_dest.GetLow());
+ OpRegCopy(r_dest.GetLow(), temp_reg);
FreeTemp(temp_reg);
- } else if (src_hi == dest_lo) {
- OpRegCopy(dest_hi, src_hi);
- OpRegCopy(dest_lo, src_lo);
+ } else if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
} else {
- OpRegCopy(dest_lo, src_lo);
- OpRegCopy(dest_hi, src_hi);
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
}
}
}
@@ -219,30 +229,31 @@
* mov t1, $true_case
* cmovz result_reg, t1
*/
- const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg());
+ const bool result_reg_same_as_src =
+ (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg());
const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src);
const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src);
const bool catch_all_case = !(true_zero_case || false_zero_case);
if (true_zero_case || false_zero_case) {
- OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
}
if (true_zero_case || false_zero_case || catch_all_case) {
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.reg, 0);
}
if (catch_all_case) {
- OpRegImm(kOpMov, rl_result.reg.GetReg(), false_val);
+ OpRegImm(kOpMov, rl_result.reg, false_val);
}
if (true_zero_case || false_zero_case || catch_all_case) {
ConditionCode cc = true_zero_case ? NegateComparison(ccode) : ccode;
int immediateForTemp = true_zero_case ? false_val : true_val;
- int temp1_reg = AllocTemp();
+ RegStorage temp1_reg = AllocTemp();
OpRegImm(kOpMov, temp1_reg, immediateForTemp);
- OpCondRegReg(kOpCmov, cc, rl_result.reg.GetReg(), temp1_reg);
+ OpCondRegReg(kOpCmov, cc, rl_result.reg, temp1_reg);
FreeTemp(temp1_reg);
}
@@ -269,15 +280,15 @@
*/
// kMirOpSelect is generated just for conditional cases when comparison is done with zero.
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.reg, 0);
if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {
- OpCondRegReg(kOpCmov, NegateComparison(ccode), rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ OpCondRegReg(kOpCmov, NegateComparison(ccode), rl_result.reg, rl_false.reg);
} else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {
- OpCondRegReg(kOpCmov, ccode, rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ OpCondRegReg(kOpCmov, ccode, rl_result.reg, rl_true.reg);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
- OpCondRegReg(kOpCmov, ccode, rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_false.reg);
+ OpCondRegReg(kOpCmov, ccode, rl_result.reg, rl_true.reg);
}
}
@@ -303,22 +314,24 @@
FlushAllRegs();
LockCallTemps(); // Prepare for explicit register usage
- LoadValueDirectWideFixed(rl_src1, r0, r1);
- LoadValueDirectWideFixed(rl_src2, r2, r3);
+ RegStorage r_tmp1(RegStorage::k64BitPair, r0, r1);
+ RegStorage r_tmp2(RegStorage::k64BitPair, r2, r3);
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
// Swap operands and condition code to prevent use of zero flag.
if (ccode == kCondLe || ccode == kCondGt) {
// Compute (r3:r2) = (r3:r2) - (r1:r0)
- OpRegReg(kOpSub, r2, r0); // r2 = r2 - r0
- OpRegReg(kOpSbc, r3, r1); // r3 = r3 - r1 - CF
+ OpRegReg(kOpSub, rs_r2, rs_r0); // r2 = r2 - r0
+ OpRegReg(kOpSbc, rs_r3, rs_r1); // r3 = r3 - r1 - CF
} else {
// Compute (r1:r0) = (r1:r0) - (r3:r2)
- OpRegReg(kOpSub, r0, r2); // r0 = r0 - r2
- OpRegReg(kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ OpRegReg(kOpSub, rs_r0, rs_r2); // r0 = r0 - r2
+ OpRegReg(kOpSbc, rs_r1, rs_r3); // r1 = r1 - r3 - CF
}
switch (ccode) {
case kCondEq:
case kCondNe:
- OpRegReg(kOpOr, r0, r1); // r0 = r0 | r1
+ OpRegReg(kOpOr, rs_r0, rs_r1); // r0 = r0 | r1
break;
case kCondLe:
ccode = kCondGe;
@@ -342,11 +355,11 @@
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.reg.GetReg();
- int32_t high_reg = rl_src1.reg.GetHighReg();
+ RegStorage low_reg = rl_src1.reg.GetLow();
+ RegStorage high_reg = rl_src1.reg.GetHigh();
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
OpRegRegReg(kOpOr, t_reg, low_reg, high_reg);
FreeTemp(t_reg);
OpCondBranch(ccode, taken);
@@ -450,8 +463,7 @@
shift = p - 32;
}
-RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo,
- int lit, bool is_div) {
+RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
return rl_dest;
}
@@ -465,8 +477,8 @@
LockCallTemps(); // Prepare for explicit register usage.
// Assume that the result will be in EDX.
- RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r2), INVALID_SREG, INVALID_SREG};
+ RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rs_r2,
+ INVALID_SREG, INVALID_SREG};
// handle div/rem by 1 special case.
if (imm == 1) {
@@ -475,15 +487,15 @@
StoreValue(rl_result, rl_src);
} else {
// x % 1 == 0.
- LoadConstantNoClobber(r0, 0);
+ LoadConstantNoClobber(rs_r0, 0);
// For this case, return the result in EAX.
rl_result.reg.SetReg(r0);
}
} else if (imm == -1) { // handle 0x80000000 / -1 special case.
if (is_div) {
LIR *minint_branch = 0;
- LoadValueDirectFixed(rl_src, r0);
- OpRegImm(kOpCmp, r0, 0x80000000);
+ LoadValueDirectFixed(rl_src, rs_r0);
+ OpRegImm(kOpCmp, rs_r0, 0x80000000);
minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
// for x != MIN_INT, x / -1 == -x.
@@ -496,7 +508,7 @@
branch_around->target = NewLIR0(kPseudoTargetLabel);
} else {
// x % -1 == 0.
- LoadConstantNoClobber(r0, 0);
+ LoadConstantNoClobber(rs_r0, 0);
}
// For this case, return the result in EAX.
rl_result.reg.SetReg(r0);
@@ -524,36 +536,36 @@
*/
// Numerator into EAX.
- int numerator_reg = -1;
+ RegStorage numerator_reg;
if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
// We will need the value later.
if (rl_src.location == kLocPhysReg) {
// We can use it directly.
DCHECK(rl_src.reg.GetReg() != r0 && rl_src.reg.GetReg() != r2);
- numerator_reg = rl_src.reg.GetReg();
+ numerator_reg = rl_src.reg;
} else {
- LoadValueDirectFixed(rl_src, r1);
- numerator_reg = r1;
+ numerator_reg = rs_r1;
+ LoadValueDirectFixed(rl_src, numerator_reg);
}
- OpRegCopy(r0, numerator_reg);
+ OpRegCopy(rs_r0, numerator_reg);
} else {
// Only need this once. Just put it into EAX.
- LoadValueDirectFixed(rl_src, r0);
+ LoadValueDirectFixed(rl_src, rs_r0);
}
// EDX = magic.
- LoadConstantNoClobber(r2, magic);
+ LoadConstantNoClobber(rs_r2, magic);
// EDX:EAX = magic & dividend.
NewLIR1(kX86Imul32DaR, r2);
if (imm > 0 && magic < 0) {
// Add numerator to EDX.
- DCHECK_NE(numerator_reg, -1);
- NewLIR2(kX86Add32RR, r2, numerator_reg);
+ DCHECK(numerator_reg.Valid());
+ NewLIR2(kX86Add32RR, r2, numerator_reg.GetReg());
} else if (imm < 0 && magic > 0) {
- DCHECK_NE(numerator_reg, -1);
- NewLIR2(kX86Sub32RR, r2, numerator_reg);
+ DCHECK(numerator_reg.Valid());
+ NewLIR2(kX86Sub32RR, r2, numerator_reg.GetReg());
}
// Do we need the shift?
@@ -565,7 +577,7 @@
// Add 1 to EDX if EDX < 0.
// Move EDX to EAX.
- OpRegCopy(r0, r2);
+ OpRegCopy(rs_r0, rs_r2);
// Move sign bit to bit 0, zeroing the rest.
NewLIR2(kX86Shr32RI, r2, 31);
@@ -577,11 +589,11 @@
if (!is_div) {
// We need to compute the remainder.
// Remainder is divisor - (quotient * imm).
- DCHECK_NE(numerator_reg, -1);
- OpRegCopy(r0, numerator_reg);
+ DCHECK(numerator_reg.Valid());
+ OpRegCopy(rs_r0, numerator_reg);
// EAX = numerator * imm.
- OpRegRegImm(kOpMul, r2, r2, imm);
+ OpRegRegImm(kOpMul, rs_r2, rs_r2, imm);
// EDX -= EAX.
NewLIR2(kX86Sub32RR, r0, r2);
@@ -594,8 +606,8 @@
return rl_result;
}
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo,
- int reg_hi, bool is_div) {
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
+ bool is_div) {
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
return rl_dest;
}
@@ -607,31 +619,31 @@
LockCallTemps(); // Prepare for explicit register usage.
// Load LHS into EAX.
- LoadValueDirectFixed(rl_src1, r0);
+ LoadValueDirectFixed(rl_src1, rs_r0);
// Load RHS into EBX.
- LoadValueDirectFixed(rl_src2, r1);
+ LoadValueDirectFixed(rl_src2, rs_r1);
// Copy LHS sign bit into EDX.
NewLIR0(kx86Cdq32Da);
if (check_zero) {
// Handle division by zero case.
- GenImmedCheck(kCondEq, r1, 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rs_r1, 0, kThrowDivZero);
}
// Have to catch 0x80000000/-1 case, or we will get an exception!
- OpRegImm(kOpCmp, r1, -1);
+ OpRegImm(kOpCmp, rs_r1, -1);
LIR *minus_one_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
// RHS is -1.
- OpRegImm(kOpCmp, r0, 0x80000000);
+ OpRegImm(kOpCmp, rs_r0, 0x80000000);
LIR * minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
// In 0x80000000/-1 case.
if (!is_div) {
// For DIV, EAX is already right. For REM, we need EDX 0.
- LoadConstantNoClobber(r2, 0);
+ LoadConstantNoClobber(rs_r2, 0);
}
LIR* done = NewLIR1(kX86Jmp8, 0);
@@ -642,8 +654,8 @@
done->target = NewLIR0(kPseudoTargetLabel);
// Result is in EAX for div and EDX for rem.
- RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
+ RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rs_r0,
+ INVALID_SREG, INVALID_SREG};
if (!is_div) {
rl_result.reg.SetReg(r2);
}
@@ -672,17 +684,17 @@
}
// Pick the first integer as min/max.
- OpRegCopy(rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_src1.reg);
// If the integers are both in the same register, then there is nothing else to do
// because they are equal and we have already moved one into the result.
if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) {
// It is possible we didn't pick correctly so do the actual comparison now.
- OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
// Conditionally move the other integer into the destination register.
ConditionCode condition_code = is_min ? kCondGt : kCondLt;
- OpCondRegReg(kOpCmov, condition_code, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpCondRegReg(kOpCmov, condition_code, rl_result.reg, rl_src2.reg);
}
StoreValue(rl_dest, rl_result);
@@ -691,18 +703,18 @@
bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address.wide = 0; // ignore high half in info->args[1]
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_dest = size == kLong ? InlineTargetWide(info) : InlineTarget(info);
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Unaligned access is allowed on x86.
- LoadBaseDispWide(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(rl_address.reg, 0, rl_result.reg, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -710,46 +722,54 @@
bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address.wide = 0; // ignore high half in info->args[1]
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_src_value = info->args[2]; // [size] value
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
if (size == kLong) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDispWide(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
+ StoreBaseDispWide(rl_address.reg, 0, rl_value.reg);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
}
return true;
}
-void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
- NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
+void X86Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
+ NewLIR5(kX86Lea32RA, r_base.GetReg(), reg1.GetReg(), reg2.GetReg(), scale, offset);
}
void X86Mir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
}
+static bool IsInReg(X86Mir2Lir *pMir2Lir, const RegLocation &rl, RegStorage reg) {
+ return rl.reg.Valid() && rl.reg.GetReg() == reg.GetReg() && (pMir2Lir->IsLive(reg) || rl.home);
+}
+
bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK_EQ(cu_->instruction_set, kX86);
// Unused - RegLocation rl_src_unsafe = info->args[0];
RegLocation rl_src_obj = info->args[1]; // Object - known non-null
RegLocation rl_src_offset = info->args[2]; // long low
- rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
RegLocation rl_src_expected = info->args[4]; // int, long or Object
// If is_long, high half is in info->args[5]
RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
// If is_long, high half is in info->args[7]
if (is_long) {
+ // TODO: avoid unnecessary loads of SI and DI when the values are in registers.
+ // TODO: CFI support.
FlushAllRegs();
LockCallTemps();
- LoadValueDirectWideFixed(rl_src_expected, rAX, rDX);
- LoadValueDirectWideFixed(rl_src_new_value, rBX, rCX);
+ RegStorage r_tmp1(RegStorage::k64BitPair, rAX, rDX);
+ RegStorage r_tmp2(RegStorage::k64BitPair, rBX, rCX);
+ LoadValueDirectWideFixed(rl_src_expected, r_tmp1);
+ LoadValueDirectWideFixed(rl_src_new_value, r_tmp2);
NewLIR1(kX86Push32R, rDI);
MarkTemp(rDI);
LockTemp(rDI);
@@ -757,9 +777,20 @@
MarkTemp(rSI);
LockTemp(rSI);
const int push_offset = 4 /* push edi */ + 4 /* push esi */;
- LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_obj.s_reg_low) + push_offset, rDI);
- LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_offset.s_reg_low) + push_offset, rSI);
+ int srcObjSp = IsInReg(this, rl_src_obj, rs_rSI) ? 0
+ : (IsInReg(this, rl_src_obj, rs_rDI) ? 4
+ : (SRegOffset(rl_src_obj.s_reg_low) + push_offset));
+ LoadWordDisp(TargetReg(kSp), srcObjSp, rs_rDI);
+ int srcOffsetSp = IsInReg(this, rl_src_offset, rs_rSI) ? 0
+ : (IsInReg(this, rl_src_offset, rs_rDI) ? 4
+ : (SRegOffset(rl_src_offset.s_reg_low) + push_offset));
+ LoadWordDisp(TargetReg(kSp), srcOffsetSp, rs_rSI);
NewLIR4(kX86LockCmpxchg8bA, rDI, rSI, 0, 0);
+
+ // After a store we need to insert barrier in case of potential load. Since the
+ // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
+ GenMemBarrier(kStoreLoad);
+
FreeTemp(rSI);
UnmarkTemp(rSI);
NewLIR1(kX86Pop32R, rSI);
@@ -769,11 +800,8 @@
FreeCallTemps();
} else {
// EAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in EAX.
- FlushReg(r0);
- LockTemp(r0);
-
- // Release store semantics, get the barrier out of the way. TODO: revisit
- GenMemBarrier(kStoreLoad);
+ FlushReg(rs_r0);
+ LockTemp(rs_r0);
RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
RegLocation rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
@@ -781,14 +809,18 @@
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
FreeTemp(r0); // Temporarily release EAX for MarkGCCard().
- MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
+ MarkGCCard(rl_new_value.reg, rl_object.reg);
LockTemp(r0);
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
- LoadValueDirect(rl_src_expected, r0);
+ LoadValueDirect(rl_src_expected, rs_r0);
NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
+ // After a store we need to insert barrier in case of potential load. Since the
+ // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
+ GenMemBarrier(kStoreLoad);
+
FreeTemp(r0);
}
@@ -801,7 +833,7 @@
return true;
}
-LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) {
+LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
CHECK(base_of_code_ != nullptr);
// Address the start of the method
@@ -813,7 +845,8 @@
// We don't know the proper offset for the value, so pick one that will force
// 4 byte offset. We will fix this up in the assembler later to have the right
// value.
- LIR *res = RawLIR(current_dalvik_offset_, kX86Mov32RM, reg, reg, 256, 0, 0, target);
+ LIR *res = RawLIR(current_dalvik_offset_, kX86Mov32RM, reg.GetReg(), reg.GetReg(), 256,
+ 0, 0, target);
res->target = target;
res->flags.fixup = kFixupLoad;
SetMemRefType(res, true, kLiteral);
@@ -821,12 +854,12 @@
return res;
}
-LIR* X86Mir2Lir::OpVldm(int rBase, int count) {
+LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
LOG(FATAL) << "Unexpected use of OpVldm for x86";
return NULL;
}
-LIR* X86Mir2Lir::OpVstm(int rBase, int count) {
+LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
LOG(FATAL) << "Unexpected use of OpVstm for x86";
return NULL;
}
@@ -834,22 +867,22 @@
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
-void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
- // We are not supposed to clobber either of the provided registers, so allocate
- // a temporary to use for the check.
- int t_reg = AllocTemp();
+void X86Mir2Lir::GenDivZeroCheck(RegStorage reg) {
+ DCHECK(reg.IsPair()); // TODO: allow 64BitSolo.
+ // We are not supposed to clobber the incoming storage, so allocate a temporary.
+ RegStorage t_reg = AllocTemp();
// Doing an OR is a quick way to check if both registers are zero. This will set the flags.
- OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
+ OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
// In case of zero, throw ArithmeticException.
GenCheck(kCondEq, kThrowDivZero);
@@ -865,7 +898,7 @@
}
// Decrement register and branch on condition
-LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
+LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
OpRegImm(kOpSub, reg, 1);
return OpCondBranch(c_code, target);
}
@@ -876,15 +909,20 @@
return false;
}
+bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ LOG(FATAL) << "Unexpected use of easyMultiply in x86";
+ return false;
+}
+
LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
LOG(FATAL) << "Unexpected use of OpIT in x86";
return NULL;
}
-void X86Mir2Lir::GenImulRegImm(int dest, int src, int val) {
+void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
switch (val) {
case 0:
- NewLIR2(kX86Xor32RR, dest, dest);
+ NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
case 1:
OpRegCopy(dest, src);
@@ -895,17 +933,17 @@
}
}
-void X86Mir2Lir::GenImulMemImm(int dest, int sreg, int displacement, int val) {
+void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
LIR *m;
switch (val) {
case 0:
- NewLIR2(kX86Xor32RR, dest, dest);
+ NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
case 1:
- LoadBaseDisp(rX86_SP, displacement, dest, kWord, sreg);
+ LoadBaseDisp(rs_rX86_SP, displacement, dest, kWord, sreg);
break;
default:
- m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest, rX86_SP,
+ m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(), rX86_SP,
displacement, val);
AnnotateDalvikRegAccess(m, displacement >> 2, true /* is_load */, true /* is_64bit */);
break;
@@ -923,8 +961,8 @@
int64_t val = mir_graph_->ConstantValueWide(rl_src2);
if (val == 0) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
- OpRegReg(kOpXor, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg());
+ OpRegReg(kOpXor, rl_result.reg.GetLow(), rl_result.reg.GetLow());
+ OpRegReg(kOpXor, rl_result.reg.GetHigh(), rl_result.reg.GetHigh());
StoreValueWide(rl_dest, rl_result);
return;
} else if (val == 1) {
@@ -956,22 +994,22 @@
// ECX <- 1H * 2L
// EAX <- 1L * 2H
if (src1_in_reg) {
- GenImulRegImm(r1, rl_src1.reg.GetHighReg(), val_lo);
- GenImulRegImm(r0, rl_src1.reg.GetReg(), val_hi);
+ GenImulRegImm(rs_r1, rl_src1.reg.GetHigh(), val_lo);
+ GenImulRegImm(rs_r0, rl_src1.reg.GetLow(), val_hi);
} else {
- GenImulMemImm(r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo);
- GenImulMemImm(r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi);
+ GenImulMemImm(rs_r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo);
+ GenImulMemImm(rs_r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi);
}
// ECX <- ECX + EAX (2H * 1L) + (1H * 2L)
NewLIR2(kX86Add32RR, r1, r0);
// EAX <- 2L
- LoadConstantNoClobber(r0, val_lo);
+ LoadConstantNoClobber(rs_r0, val_lo);
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
+ NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
@@ -983,7 +1021,7 @@
// Result is EDX:EAX
RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r0, r2),
+ RegStorage::MakeRegPair(rs_r0, rs_r2),
INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
return;
@@ -1007,7 +1045,7 @@
if (src1_in_reg) {
NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg());
} else {
- LoadBaseDisp(rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, r1,
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1,
kWord, GetSRegHi(rl_src1.s_reg_low));
}
@@ -1015,7 +1053,7 @@
// Take advantage of the fact that the values are the same.
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
+ NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1030,13 +1068,13 @@
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg());
} else {
- LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0,
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0,
kWord, GetSRegHi(rl_src2.s_reg_low));
}
// EAX <- EAX * 1L (2H * 1L)
if (src1_in_reg) {
- NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetReg());
+ NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1046,7 +1084,7 @@
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
+ NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1060,15 +1098,15 @@
// EAX <- 2L
if (src2_in_reg) {
- NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetReg());
+ NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetLowReg());
} else {
- LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, r0,
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0,
kWord, rl_src2.s_reg_low);
}
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
+ NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1081,7 +1119,7 @@
// Result is EDX:EAX
RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r0, r2), INVALID_SREG, INVALID_SREG};
+ RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
}
@@ -1093,32 +1131,31 @@
// Both operands are in registers.
// But we must ensure that rl_src is in pair
rl_src = EvalLocWide(rl_src, kCoreReg, true);
- if (rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()) {
+ if (rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
// The registers are the same, so we would clobber it before the use.
- int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_dest.reg.GetReg());
- rl_src.reg.SetHighReg(temp_reg);
+ RegStorage temp_reg = AllocTemp();
+ OpRegCopy(temp_reg, rl_dest.reg);
+ rl_src.reg.SetHighReg(temp_reg.GetReg());
}
- NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg());
+ NewLIR2(x86op, rl_dest.reg.GetLowReg(), rl_src.reg.GetLowReg());
x86op = GetOpcode(op, rl_dest, rl_src, true);
NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg());
- FreeTemp(rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg);
return;
}
// RHS is in memory.
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- int rBase = TargetReg(kSp);
+ int r_base = TargetReg(kSp).GetReg();
int displacement = SRegOffset(rl_src.s_reg_low);
- LIR *lir = NewLIR3(x86op, rl_dest.reg.GetReg(), rBase, displacement + LOWORD_OFFSET);
+ LIR *lir = NewLIR3(x86op, rl_dest.reg.GetLowReg(), r_base, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), rBase, displacement + HIWORD_OFFSET);
+ lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), r_base, displacement + HIWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
}
@@ -1142,18 +1179,17 @@
// Operate directly into memory.
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
- int rBase = TargetReg(kSp);
+ int r_base = TargetReg(kSp).GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
- LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.reg.GetReg());
+ LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, rl_src.reg.GetLowReg());
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
+ lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
- FreeTemp(rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg);
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
@@ -1195,12 +1231,12 @@
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- if (IsTemp(rl_src1.reg.GetReg()) && IsTemp(rl_src1.reg.GetHighReg())) {
+ if (IsTemp(rl_src1.reg.GetLowReg()) && IsTemp(rl_src1.reg.GetHighReg())) {
GenLongRegOrMemOp(rl_src1, rl_src2, op);
} else if (is_commutative) {
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
// We need at least one of them to be a temporary.
- if (!(IsTemp(rl_src2.reg.GetReg()) && IsTemp(rl_src2.reg.GetHighReg()))) {
+ if (!(IsTemp(rl_src2.reg.GetLowReg()) && IsTemp(rl_src2.reg.GetHighReg()))) {
rl_src1 = ForceTempWide(rl_src1);
GenLongRegOrMemOp(rl_src1, rl_src2, op);
} else {
@@ -1246,15 +1282,15 @@
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = ForceTempWide(rl_src);
if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
- ((rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()))) {
+ ((rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()))) {
// The registers are the same, so we would clobber it before the use.
- int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_result.reg.GetReg());
- rl_result.reg.SetHighReg(temp_reg);
+ RegStorage temp_reg = AllocTemp();
+ OpRegCopy(temp_reg, rl_result.reg);
+ rl_result.reg.SetHighReg(temp_reg.GetReg());
}
- OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_result.reg.GetReg()); // rLow = -rLow
- OpRegImm(kOpAdc, rl_result.reg.GetHighReg(), 0); // rHigh = rHigh + CF
- OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg()); // rHigh = -rHigh
+ OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_result.reg.GetLow()); // rLow = -rLow
+ OpRegImm(kOpAdc, rl_result.reg.GetHigh(), 0); // rHigh = rHigh + CF
+ OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_result.reg.GetHigh()); // rHigh = -rHigh
StoreValueWide(rl_dest, rl_result);
}
@@ -1296,30 +1332,28 @@
// If index is constant, just fold it into the data offset
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
+ rl_index.reg = RegStorage::InvalidReg();
}
/* null object? */
- GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg, opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.reg, len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
- len_offset, kThrowArrayBounds);
+ GenRegMemCheck(kCondUge, rl_index.reg, rl_array.reg, len_offset, kThrowArrayBounds);
}
}
rl_result = EvalLoc(rl_dest, reg_class, true);
if ((size == kLong) || (size == kDouble)) {
- LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), size, INVALID_SREG);
+ LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg.GetLow(),
+ rl_result.reg.GetHigh(), size, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale,
- data_offset, rl_result.reg.GetReg(), INVALID_REG, size,
- INVALID_SREG);
+ LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg,
+ RegStorage::InvalidReg(), size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
}
@@ -1350,19 +1384,18 @@
constant_index_value = mir_graph_->ConstantValue(rl_index);
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
+ rl_index.reg = RegStorage::InvalidReg();
}
/* null object? */
- GenNullCheck(rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg, opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.reg, len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
- len_offset, kThrowArrayBounds);
+ GenRegMemCheck(kCondUge, rl_index.reg, rl_array.reg, len_offset, kThrowArrayBounds);
}
}
if ((size == kLong) || (size == kDouble)) {
@@ -1372,20 +1405,25 @@
}
// If the src reg can't be byte accessed, move it to a temp first.
if ((size == kSignedByte || size == kUnsignedByte) && rl_src.reg.GetReg() >= 4) {
- int temp = AllocTemp();
- OpRegCopy(temp, rl_src.reg.GetReg());
- StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, temp,
- INVALID_REG, size, INVALID_SREG);
+ RegStorage temp = AllocTemp();
+ OpRegCopy(temp, rl_src.reg);
+ StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp,
+ RegStorage::InvalidReg(), size, INVALID_SREG);
} else {
- StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_src.reg.GetReg(),
- rl_src.wide ? rl_src.reg.GetHighReg() : INVALID_REG, size, INVALID_SREG);
+ if (rl_src.wide) {
+ StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg.GetLow(),
+ rl_src.reg.GetHigh(), size, INVALID_SREG);
+ } else {
+ StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg,
+ RegStorage::InvalidReg(), size, INVALID_SREG);
+ }
}
if (card_mark) {
// Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
if (!constant_index) {
FreeTemp(rl_index.reg.GetReg());
}
- MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+ MarkGCCard(rl_src.reg, rl_array.reg);
}
}
@@ -1397,51 +1435,51 @@
case Instruction::SHL_LONG_2ADDR:
DCHECK_NE(shift_amount, 1); // Prevent a double store from happening.
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
- LoadConstant(rl_result.reg.GetReg(), 0);
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetLow());
+ LoadConstant(rl_result.reg.GetLow(), 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetLow());
FreeTemp(rl_src.reg.GetHighReg());
NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.reg.GetLow(), 0);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), shift_amount);
- NewLIR2(kX86Sal32RI, rl_result.reg.GetReg(), shift_amount);
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+ NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(), shift_amount);
+ NewLIR2(kX86Sal32RI, rl_result.reg.GetLowReg(), shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR2(kX86Sar32RI, rl_result.reg.GetReg(), shift_amount - 32);
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetLowReg(), shift_amount - 32);
NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+ NewLIR3(kX86Shrd32RRI, rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg(), shift_amount);
NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+ LoadConstant(rl_result.reg.GetHigh(), 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- NewLIR2(kX86Shr32RI, rl_result.reg.GetReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
+ NewLIR2(kX86Shr32RI, rl_result.reg.GetLowReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetHigh(), 0);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+ NewLIR3(kX86Shrd32RRI, rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg(), shift_amount);
NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount);
}
break;
@@ -1579,7 +1617,7 @@
int32_t value) {
bool in_mem = loc.location != kLocPhysReg;
bool byte_imm = IS_SIMM8(value);
- DCHECK(in_mem || !IsFpReg(loc.reg.GetReg()));
+ DCHECK(in_mem || !IsFpReg(loc.reg));
switch (op) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
@@ -1639,18 +1677,18 @@
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int rBase = TargetReg(kSp);
+ int r_base = TargetReg(kSp).GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
- LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, val_lo);
+ LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val_lo);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
- LIR *lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, val_hi);
+ LIR *lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, val_hi);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
}
@@ -1659,11 +1697,11 @@
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
DCHECK_EQ(rl_result.location, kLocPhysReg);
- DCHECK(!IsFpReg(rl_result.reg.GetReg()));
+ DCHECK(!IsFpReg(rl_result.reg));
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
+ NewLIR2(x86op, rl_result.reg.GetLowReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
@@ -1683,11 +1721,12 @@
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
- rl_dest.reg.GetReg() == rl_src1.reg.GetReg() && rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() &&
- !IsFpReg(rl_dest.reg.GetReg())) {
+ rl_dest.reg.GetLowReg() == rl_src1.reg.GetLowReg() &&
+ rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() &&
+ !IsFpReg(rl_dest.reg)) {
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
- NewLIR2(x86op, rl_dest.reg.GetReg(), val_lo);
+ NewLIR2(x86op, rl_dest.reg.GetLowReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
@@ -1705,7 +1744,7 @@
RegLocation rl_result = ForceTempWide(rl_src1);
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
+ NewLIR2(x86op, rl_result.reg.GetLowReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
@@ -1721,19 +1760,19 @@
RegLocation rl_dest, RegLocation rl_src) {
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.reg.GetReg();
+ RegStorage result_reg = rl_result.reg;
// SETcc only works with EAX..EDX.
- if (result_reg == object.reg.GetReg() || result_reg >= 4) {
+ if (result_reg == object.reg || result_reg.GetReg() >= 4) {
result_reg = AllocTypedTemp(false, kCoreReg);
- DCHECK_LT(result_reg, 4);
+ DCHECK_LT(result_reg.GetReg(), 4);
}
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
- int check_class = AllocTypedTemp(false, kCoreReg);
+ RegStorage check_class = AllocTypedTemp(false, kCoreReg);
// If Method* is already in a register, we can save a copy.
RegLocation rl_method = mir_graph_->GetMethodLoc();
@@ -1742,24 +1781,20 @@
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
} else {
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
LoadWordDisp(check_class, offset_of_type, check_class);
}
} else {
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadWordDisp(check_class,
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
} else {
- LoadWordDisp(check_class,
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
LoadWordDisp(check_class, offset_of_type, check_class);
}
@@ -1767,17 +1802,16 @@
// Compare the computed class to the class in the object.
DCHECK_EQ(object.location, kLocPhysReg);
- OpRegMem(kOpCmp, check_class, object.reg.GetReg(),
- mirror::Object::ClassOffset().Int32Value());
+ OpRegMem(kOpCmp, check_class, object.reg, mirror::Object::ClassOffset().Int32Value());
// Set the low byte of the result to 0 or 1 from the compare condition code.
- NewLIR2(kX86Set8R, result_reg, kX86CondEq);
+ NewLIR2(kX86Set8R, result_reg.GetReg(), kX86CondEq);
LIR* target = NewLIR0(kPseudoTargetLabel);
null_branchover->target = target;
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.reg.GetReg(), result_reg);
+ OpRegCopy(rl_result.reg, result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1792,7 +1826,7 @@
// May generate a call - use explicit registers.
LockCallTemps();
LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 gets current Method*.
- int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*.
+ RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class*.
// Reference must end up in kArg0.
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
@@ -1803,13 +1837,13 @@
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2).
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
@@ -1842,13 +1876,13 @@
LIR* branchover = nullptr;
if (type_known_final) {
// Ensure top 3 bytes of result are 0.
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.reg, 0);
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));
// Set the low byte of the result to 0 or 1 from the compare condition code.
NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq);
} else {
if (!type_known_abstract) {
- LoadConstant(rl_result.reg.GetReg(), 1); // Assume result succeeds.
+ LoadConstant(rl_result.reg, 1); // Assume result succeeds.
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
@@ -1976,11 +2010,11 @@
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = UpdateLoc(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg());
+ OpRegReg(op, rl_result.reg, rl_lhs.reg);
} else {
if (shift_op) {
// X86 doesn't require masking and must use ECX.
- int t_reg = TargetReg(kCount); // rCX
+ RegStorage t_reg = TargetReg(kCount); // rCX
LoadValueDirectFixed(rl_rhs, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
@@ -1988,12 +2022,12 @@
rl_rhs = LoadValue(rl_rhs, kCoreReg);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
- OpMemReg(op, rl_result, t_reg);
+ OpMemReg(op, rl_result, t_reg.GetReg());
FreeTemp(t_reg);
return;
} else if (!IsFpReg(rl_result.reg.GetReg())) {
// Can do this directly into the result register
- OpRegReg(op, rl_result.reg.GetReg(), t_reg);
+ OpRegReg(op, rl_result.reg, t_reg);
FreeTemp(t_reg);
StoreFinalValue(rl_dest, rl_result);
return;
@@ -2002,7 +2036,7 @@
// Three address form, or we can't do directly.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), t_reg);
+ OpRegRegReg(op, rl_result.reg, rl_lhs.reg, t_reg);
FreeTemp(t_reg);
} else {
// Multiply is 3 operand only (sort of).
@@ -2013,11 +2047,11 @@
// Can we do this from memory directly?
rl_rhs = UpdateLoc(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
- OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
+ OpRegMem(op, rl_result.reg, rl_rhs);
StoreFinalValue(rl_dest, rl_result);
return;
- } else if (!IsFpReg(rl_rhs.reg.GetReg())) {
- OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+ } else if (!IsFpReg(rl_rhs.reg)) {
+ OpRegReg(op, rl_result.reg, rl_rhs.reg);
StoreFinalValue(rl_dest, rl_result);
return;
}
@@ -2027,15 +2061,15 @@
// Okay, we can do this into memory.
OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
return;
- } else if (!IsFpReg(rl_result.reg.GetReg())) {
+ } else if (!IsFpReg(rl_result.reg)) {
// Can do this directly into the result register.
- OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegReg(op, rl_result.reg, rl_rhs.reg);
StoreFinalValue(rl_dest, rl_result);
return;
} else {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
}
} else {
// Try to use reg/memory instructions.
@@ -2047,7 +2081,7 @@
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
} else {
// We can optimize by moving to result and using memory operands.
if (rl_rhs.location != kLocPhysReg) {
@@ -2060,29 +2094,29 @@
rl_result = EvalLoc(rl_dest, kCoreReg, true);
} else {
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadValueDirect(rl_lhs, rl_result.reg.GetReg());
+ LoadValueDirect(rl_lhs, rl_result.reg);
}
- OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
+ OpRegMem(op, rl_result.reg, rl_rhs);
} else if (rl_lhs.location != kLocPhysReg) {
// RHS is in a register; LHS is in memory.
if (op != kOpSub) {
// Force RHS into result and operate on memory.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopy(rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
- OpRegMem(op, rl_result.reg.GetReg(), rl_lhs);
+ OpRegCopy(rl_result.reg, rl_rhs.reg);
+ OpRegMem(op, rl_result.reg, rl_lhs);
} else {
// Subtraction isn't commutative.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
}
} else {
// Both are in registers.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
}
}
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 9994927..da64250 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -65,44 +65,44 @@
}
// Return a target-dependent special register.
-int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
- int res = INVALID_REG;
+RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ int res_reg = RegStorage::kInvalidRegVal;
switch (reg) {
- case kSelf: res = rX86_SELF; break;
- case kSuspend: res = rX86_SUSPEND; break;
- case kLr: res = rX86_LR; break;
- case kPc: res = rX86_PC; break;
- case kSp: res = rX86_SP; break;
- case kArg0: res = rX86_ARG0; break;
- case kArg1: res = rX86_ARG1; break;
- case kArg2: res = rX86_ARG2; break;
- case kArg3: res = rX86_ARG3; break;
- case kFArg0: res = rX86_FARG0; break;
- case kFArg1: res = rX86_FARG1; break;
- case kFArg2: res = rX86_FARG2; break;
- case kFArg3: res = rX86_FARG3; break;
- case kRet0: res = rX86_RET0; break;
- case kRet1: res = rX86_RET1; break;
- case kInvokeTgt: res = rX86_INVOKE_TGT; break;
- case kHiddenArg: res = rAX; break;
- case kHiddenFpArg: res = fr0; break;
- case kCount: res = rX86_COUNT; break;
+ case kSelf: res_reg = rX86_SELF; break;
+ case kSuspend: res_reg = rX86_SUSPEND; break;
+ case kLr: res_reg = rX86_LR; break;
+ case kPc: res_reg = rX86_PC; break;
+ case kSp: res_reg = rX86_SP; break;
+ case kArg0: res_reg = rX86_ARG0; break;
+ case kArg1: res_reg = rX86_ARG1; break;
+ case kArg2: res_reg = rX86_ARG2; break;
+ case kArg3: res_reg = rX86_ARG3; break;
+ case kFArg0: res_reg = rX86_FARG0; break;
+ case kFArg1: res_reg = rX86_FARG1; break;
+ case kFArg2: res_reg = rX86_FARG2; break;
+ case kFArg3: res_reg = rX86_FARG3; break;
+ case kRet0: res_reg = rX86_RET0; break;
+ case kRet1: res_reg = rX86_RET1; break;
+ case kInvokeTgt: res_reg = rX86_INVOKE_TGT; break;
+ case kHiddenArg: res_reg = rAX; break;
+ case kHiddenFpArg: res_reg = fr0; break;
+ case kCount: res_reg = rX86_COUNT; break;
}
- return res;
+ return RegStorage::Solo32(res_reg);
}
-int X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
// For the 32-bit internal ABI, the first 3 arguments are passed in registers.
// TODO: This is not 64-bit compliant and depends on new internal ABI.
switch (arg_num) {
case 0:
- return rX86_ARG1;
+ return rs_rX86_ARG1;
case 1:
- return rX86_ARG2;
+ return rs_rX86_ARG2;
case 2:
- return rX86_ARG3;
+ return rs_rX86_ARG3;
default:
- return INVALID_REG;
+ return RegStorage::InvalidReg();
}
}
@@ -346,9 +346,9 @@
#endif
}
-void X86Mir2Lir::FlushRegWide(int reg1, int reg2) {
- RegisterInfo* info1 = GetRegInfo(reg1);
- RegisterInfo* info2 = GetRegInfo(reg2);
+void X86Mir2Lir::FlushRegWide(RegStorage reg) {
+ RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
+ RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
DCHECK(info1 && info2 && info1->pair && info2->pair &&
(info1->partner == info2->reg) &&
(info2->partner == info1->reg));
@@ -363,16 +363,18 @@
if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
info1 = info2;
int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
- StoreBaseDispWide(rX86_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+ StoreBaseDispWide(rs_rX86_SP, VRegOffset(v_reg),
+ RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
}
}
-void X86Mir2Lir::FlushReg(int reg) {
- RegisterInfo* info = GetRegInfo(reg);
+void X86Mir2Lir::FlushReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ RegisterInfo* info = GetRegInfo(reg.GetReg());
if (info->live && info->dirty) {
info->dirty = false;
int v_reg = mir_graph_->SRegToVReg(info->s_reg);
- StoreBaseDisp(rX86_SP, VRegOffset(v_reg), reg, kWord);
+ StoreBaseDisp(rs_rX86_SP, VRegOffset(v_reg), reg, kWord);
}
}
@@ -381,6 +383,10 @@
return X86_FPREG(reg);
}
+bool X86Mir2Lir::IsFpReg(RegStorage reg) {
+ return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
+}
+
/* Clobber all regs that might be used by an external C call */
void X86Mir2Lir::ClobberCallerSave() {
Clobber(rAX);
@@ -391,13 +397,13 @@
RegLocation X86Mir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- CHECK(res.reg.GetReg() == rAX);
+ CHECK(res.reg.GetLowReg() == rAX);
CHECK(res.reg.GetHighReg() == rDX);
Clobber(rAX);
Clobber(rDX);
MarkInUse(rAX);
MarkInUse(rDX);
- MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+ MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
return res;
}
@@ -425,30 +431,68 @@
FreeTemp(rX86_ARG3);
}
+bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
+ switch (opcode) {
+ case kX86LockCmpxchgMR:
+ case kX86LockCmpxchgAR:
+ case kX86LockCmpxchg8bM:
+ case kX86LockCmpxchg8bA:
+ case kX86XchgMR:
+ case kX86Mfence:
+ // Atomic memory instructions provide full barrier.
+ return true;
+ default:
+ break;
+ }
+
+ // Conservative if cannot prove it provides full barrier.
+ return false;
+}
+
void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#if ANDROID_SMP != 0
- // TODO: optimize fences
- NewLIR0(kX86Mfence);
+ // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
+ LIR* mem_barrier = last_lir_insn_;
+
+ /*
+ * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
+ * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
+ * to ensure is that there is a scheduling barrier in place.
+ */
+ if (barrier_kind == kStoreLoad) {
+ // If no LIR exists already that can be used a barrier, then generate an mfence.
+ if (mem_barrier == nullptr) {
+ mem_barrier = NewLIR0(kX86Mfence);
+ }
+
+ // If last instruction does not provide full barrier, then insert an mfence.
+ if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
+ mem_barrier = NewLIR0(kX86Mfence);
+ }
+ }
+
+ // Now ensure that a scheduling barrier is in place.
+ if (mem_barrier == nullptr) {
+ GenBarrier();
+ } else {
+ // Mark as a scheduling barrier.
+ DCHECK(!mem_barrier->flags.use_def_invalid);
+ mem_barrier->u.m.def_mask = ENCODE_ALL;
+ }
#endif
}
// Alloc a pair of core registers, or a double.
RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
- int high_reg;
- int low_reg;
-
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
- low_reg = AllocTempDouble();
- high_reg = low_reg; // only one allocated!
- // TODO: take advantage of 64-bit notation.
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ return AllocTempDouble();
}
- low_reg = AllocTemp();
- high_reg = AllocTemp();
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ RegStorage low_reg = AllocTemp();
+ RegStorage high_reg = AllocTemp();
+ return RegStorage::MakeRegPair(low_reg, high_reg);
}
-int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+RegStorage X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
return AllocTempFloat();
}
@@ -486,13 +530,18 @@
}
}
-void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
- RegLocation rl_free) {
- if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
- (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
+ DCHECK(rl_keep.wide);
+ DCHECK(rl_free.wide);
+ int free_low = rl_free.reg.GetLowReg();
+ int free_high = rl_free.reg.GetHighReg();
+ int keep_low = rl_keep.reg.GetLowReg();
+ int keep_high = rl_keep.reg.GetHighReg();
+ if ((free_low != keep_low) && (free_low != keep_high) &&
+ (free_high != keep_low) && (free_high != keep_high)) {
// No overlap, free both
- FreeTemp(rl_free.reg.GetReg());
- FreeTemp(rl_free.reg.GetHighReg());
+ FreeTemp(free_low);
+ FreeTemp(free_high);
}
}
@@ -505,7 +554,7 @@
int offset = frame_size_ - (4 * num_core_spills_);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreWordDisp(rX86_SP, offset, reg);
+ StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
offset += 4;
}
}
@@ -520,7 +569,7 @@
int offset = frame_size_ - (4 * num_core_spills_);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadWordDisp(rX86_SP, offset, reg);
+ LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
offset += 4;
}
}
@@ -552,9 +601,9 @@
}
// Not used in x86
-int X86Mir2Lir::LoadHelper(ThreadOffset offset) {
+RegStorage X86Mir2Lir::LoadHelper(ThreadOffset offset) {
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
- return INVALID_REG;
+ return RegStorage::InvalidReg();
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -606,7 +655,7 @@
loc.vec_len = kVectorLength8;
// TODO: use k64BitVector
loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg);
- DCHECK(IsFpReg(loc.reg.GetReg()));
+ DCHECK(IsFpReg(loc.reg.GetLowReg()));
return loc;
}
// We can't easily reuse; clobber and free any overlaps.
@@ -638,8 +687,8 @@
// Can reuse - update the register usage info
loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
loc.location = kLocPhysReg;
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -663,44 +712,37 @@
// TODO: Reunify with common code after 'pair mess' has been fixed
RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int32_t low_reg;
- int32_t high_reg;
loc = UpdateLocWide(loc);
/* If it is already in a register, we can assume proper form. Is it the right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.reg.GetReg()), loc.IsVectorScalar());
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ DCHECK_EQ(IsFpReg(loc.reg.GetLowReg()), loc.IsVectorScalar());
+ if (!RegClassMatches(reg_class, loc.reg)) {
/* It is the wrong register class. Reallocate and copy. */
- if (!IsFpReg(loc.reg.GetReg())) {
+ if (!IsFpReg(loc.reg.GetLowReg())) {
// We want this in a FP reg, and it is in core registers.
DCHECK(reg_class != kCoreReg);
// Allocate this into any FP reg, and mark it with the right size.
- low_reg = AllocTypedTemp(true, reg_class);
- OpVectorRegCopyWide(low_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
- CopyRegInfo(low_reg, loc.reg.GetReg());
- Clobber(loc.reg.GetReg());
- Clobber(loc.reg.GetHighReg());
+ int32_t low_reg = AllocTypedTemp(true, reg_class).GetReg();
+ OpVectorRegCopyWide(low_reg, loc.reg.GetLowReg(), loc.reg.GetHighReg());
+ CopyRegInfo(low_reg, loc.reg.GetLowReg());
+ Clobber(loc.reg);
loc.reg.SetReg(low_reg);
loc.reg.SetHighReg(low_reg); // Play nice with existing code.
loc.vec_len = kVectorLength8;
} else {
// The value is in a FP register, and we want it in a pair of core registers.
DCHECK_EQ(reg_class, kCoreReg);
- DCHECK_EQ(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK_EQ(loc.reg.GetLowReg(), loc.reg.GetHighReg());
RegStorage new_regs = AllocTypedTempWide(false, kCoreReg); // Force to core registers.
- low_reg = new_regs.GetReg();
- high_reg = new_regs.GetHighReg();
- DCHECK_NE(low_reg, high_reg);
- OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
- CopyRegInfo(low_reg, loc.reg.GetReg());
- CopyRegInfo(high_reg, loc.reg.GetHighReg());
- Clobber(loc.reg.GetReg());
- Clobber(loc.reg.GetHighReg());
+ OpRegCopyWide(new_regs, loc.reg);
+ CopyRegInfo(new_regs.GetLowReg(), loc.reg.GetLowReg());
+ CopyRegInfo(new_regs.GetHighReg(), loc.reg.GetHighReg());
+ Clobber(loc.reg);
loc.reg = new_regs;
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
}
}
return loc;
@@ -712,17 +754,17 @@
loc.reg = AllocTypedTempWide(loc.fp, reg_class);
// FIXME: take advantage of RegStorage notation.
- if (loc.reg.GetReg() == loc.reg.GetHighReg()) {
- DCHECK(IsFpReg(loc.reg.GetReg()));
+ if (loc.reg.GetLowReg() == loc.reg.GetHighReg()) {
+ DCHECK(IsFpReg(loc.reg.GetLowReg()));
loc.vec_len = kVectorLength8;
} else {
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
}
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
- if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
- MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
+ MarkLive(loc.reg.GetLow(), loc.s_reg_low);
+ if (loc.reg.GetLowReg() != loc.reg.GetHighReg()) {
+ MarkLive(loc.reg.GetHigh(), GetSRegHi(loc.s_reg_low));
}
}
return loc;
@@ -730,21 +772,19 @@
// TODO: Reunify with common code after 'pair mess' has been fixed
RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
- int new_reg;
-
if (loc.wide)
return EvalLocWide(loc, reg_class, update);
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ if (!RegClassMatches(reg_class, loc.reg)) {
/* Wrong register class. Realloc, copy and transfer ownership. */
- new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.reg.GetReg());
- CopyRegInfo(new_reg, loc.reg.GetReg());
- Clobber(loc.reg.GetReg());
- loc.reg.SetReg(new_reg);
+ RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class);
+ OpRegCopy(new_reg, loc.reg);
+ CopyRegInfo(new_reg, loc.reg);
+ Clobber(loc.reg);
+ loc.reg = new_reg;
if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
}
@@ -753,32 +793,34 @@
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
- loc.reg = RegStorage(RegStorage::k32BitSolo, AllocTypedTemp(loc.fp, reg_class));
+ loc.reg = AllocTypedTemp(loc.fp, reg_class);
if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ MarkLive(loc.reg, loc.s_reg_low);
}
return loc;
}
-int X86Mir2Lir::AllocTempDouble() {
+RegStorage X86Mir2Lir::AllocTempDouble() {
// We really don't need a pair of registers.
- return AllocTempFloat();
+ // FIXME - update to double
+ int reg = AllocTempFloat().GetReg();
+ return RegStorage(RegStorage::k64BitPair, reg, reg);
}
// TODO: Reunify with common code after 'pair mess' has been fixed
void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
- if (IsFpReg(rl.reg.GetReg())) {
+ RegisterInfo* p_low = IsTemp(rl.reg.GetLowReg());
+ if (IsFpReg(rl.reg.GetLowReg())) {
// We are using only the low register.
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
}
- ResetDef(rl.reg.GetReg());
+ ResetDef(rl.reg.GetLowReg());
} else {
RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
@@ -788,7 +830,7 @@
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.reg.GetReg());
+ ResetDef(rl.reg.GetLowReg());
ResetDef(rl.reg.GetHighReg());
}
}
@@ -800,13 +842,13 @@
(rl_dest.location == kLocCompilerTemp)) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
- int rBase = TargetReg(kSp);
+ int r_base = TargetReg(kSp).GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
- LIR * store = NewLIR3(kX86Mov32MI, rBase, displacement + LOWORD_OFFSET, val_lo);
+ LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
- store = NewLIR3(kX86Mov32MI, rBase, displacement + HIWORD_OFFSET, val_hi);
+ store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
return;
@@ -828,7 +870,7 @@
<< (loc.high_word ? " h" : " ")
<< (loc.home ? " H" : " ")
<< " vec_len: " << loc.vec_len
- << ", low: " << static_cast<int>(loc.reg.GetReg())
+ << ", low: " << static_cast<int>(loc.reg.GetLowReg())
<< ", high: " << static_cast<int>(loc.reg.GetHighReg())
<< ", s_reg: " << loc.s_reg_low
<< ", orig: " << loc.orig_sreg;
@@ -855,7 +897,7 @@
uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
// Generate the move instruction with the unique pointer and save index, dex_file, and type.
- LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg),
+ LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
static_cast<int>(target_method_id_ptr), target_method_idx,
WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(move);
@@ -872,7 +914,7 @@
uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
// Generate the move instruction with the unique pointer and save index and type.
- LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg),
+ LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
static_cast<int>(ptr), type_idx);
AppendLIR(move);
class_type_address_insns_.Insert(move);
@@ -984,19 +1026,19 @@
RegLocation rl_dest = InlineTarget(info);
// Is the string non-NULL?
- LoadValueDirectFixed(rl_obj, rDX);
- GenNullCheck(rDX, info->opt_flags);
+ LoadValueDirectFixed(rl_obj, rs_rDX);
+ GenNullCheck(rs_rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// Does the character fit in 16 bits?
LIR* launchpad_branch = nullptr;
if (rl_char.is_const) {
// We need the value in EAX.
- LoadConstantNoClobber(rAX, char_value);
+ LoadConstantNoClobber(rs_rAX, char_value);
} else {
// Character is not a constant; compare at runtime.
- LoadValueDirectFixed(rl_char, rAX);
- launchpad_branch = OpCmpImmBranch(kCondGt, rAX, 0xFFFF, nullptr);
+ LoadValueDirectFixed(rl_char, rs_rAX);
+ launchpad_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
}
// From here down, we know that we are looking for a char that fits in 16 bits.
@@ -1017,7 +1059,7 @@
NewLIR1(kX86Push32R, rDI);
// Compute the number of words to search in to rCX.
- LoadWordDisp(rDX, count_offset, rCX);
+ LoadWordDisp(rs_rDX, count_offset, rs_rCX);
LIR *length_compare = nullptr;
int start_value = 0;
if (zero_based) {
@@ -1031,23 +1073,23 @@
start_value = std::max(start_value, 0);
// Is the start > count?
- length_compare = OpCmpImmBranch(kCondLe, rCX, start_value, nullptr);
+ length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
if (start_value != 0) {
- OpRegImm(kOpSub, rCX, start_value);
+ OpRegImm(kOpSub, rs_rCX, start_value);
}
} else {
// Runtime start index.
rl_start = UpdateLoc(rl_start);
if (rl_start.location == kLocPhysReg) {
- length_compare = OpCmpBranch(kCondLe, rCX, rl_start.reg.GetReg(), nullptr);
- OpRegReg(kOpSub, rCX, rl_start.reg.GetReg());
+ length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
+ OpRegReg(kOpSub, rs_rCX, rl_start.reg);
} else {
// Compare to memory to avoid a register load. Handle pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- OpRegMem(kOpCmp, rCX, rX86_SP, displacement);
+ OpRegMem(kOpCmp, rs_rCX, rs_rX86_SP, displacement);
length_compare = NewLIR2(kX86Jcc8, 0, kX86CondLe);
- OpRegMem(kOpSub, rCX, rX86_SP, displacement);
+ OpRegMem(kOpSub, rs_rCX, rs_rX86_SP, displacement);
}
}
}
@@ -1057,14 +1099,14 @@
// Load the address of the string into EBX.
// The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
- LoadWordDisp(rDX, value_offset, rDI);
- LoadWordDisp(rDX, offset_offset, rBX);
- OpLea(rBX, rDI, rBX, 1, data_offset);
+ LoadWordDisp(rs_rDX, value_offset, rs_rDI);
+ LoadWordDisp(rs_rDX, offset_offset, rs_rBX);
+ OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset);
// Now compute into EDI where the search will start.
if (zero_based || rl_start.is_const) {
if (start_value == 0) {
- OpRegCopy(rDI, rBX);
+ OpRegCopy(rs_rDI, rs_rBX);
} else {
NewLIR3(kX86Lea32RM, rDI, rBX, 2 * start_value);
}
@@ -1073,17 +1115,17 @@
if (rl_start.reg.GetReg() == rDI) {
// We have a slight problem here. We are already using RDI!
// Grab the value from the stack.
- LoadWordDisp(rX86_SP, 0, rDX);
- OpLea(rDI, rBX, rDX, 1, 0);
+ LoadWordDisp(rs_rX86_SP, 0, rs_rDX);
+ OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
} else {
- OpLea(rDI, rBX, rl_start.reg.GetReg(), 1, 0);
+ OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
}
} else {
- OpRegCopy(rDI, rBX);
+ OpRegCopy(rs_rDI, rs_rBX);
// Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- LoadWordDisp(rX86_SP, displacement, rDX);
- OpLea(rDI, rBX, rDX, 1, 0);
+ LoadWordDisp(rs_rX86_SP, displacement, rs_rDX);
+ OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
}
}
@@ -1096,8 +1138,8 @@
// yes, we matched. Compute the index of the result.
// index = ((curr_ptr - orig_ptr) / 2) - 1.
- OpRegReg(kOpSub, rDI, rBX);
- OpRegImm(kOpAsr, rDI, 1);
+ OpRegReg(kOpSub, rs_rDI, rs_rBX);
+ OpRegImm(kOpAsr, rs_rDI, 1);
NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1);
LIR *all_done = NewLIR1(kX86Jmp8, 0);
@@ -1105,7 +1147,7 @@
LIR *not_found = NewLIR0(kPseudoTargetLabel);
length_compare->target = not_found;
failed_branch->target = not_found;
- LoadConstantNoClobber(rl_return.reg.GetReg(), -1);
+ LoadConstantNoClobber(rl_return.reg, -1);
// And join up at the end.
all_done->target = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index bd82bf6..013c40b 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -23,26 +23,26 @@
/* This file contains codegen for the X86 ISA */
-LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) {
+LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
int opcode;
/* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
- if (X86_DOUBLEREG(r_dest)) {
+ DCHECK_EQ(X86_DOUBLEREG(r_dest.GetReg()), X86_DOUBLEREG(r_src.GetReg()));
+ if (X86_DOUBLEREG(r_dest.GetReg())) {
opcode = kX86MovsdRR;
} else {
- if (X86_SINGLEREG(r_dest)) {
- if (X86_SINGLEREG(r_src)) {
+ if (X86_SINGLEREG(r_dest.GetReg())) {
+ if (X86_SINGLEREG(r_src.GetReg())) {
opcode = kX86MovssRR;
} else { // Fpr <- Gpr
opcode = kX86MovdxrRR;
}
} else { // Gpr <- Fpr
- DCHECK(X86_SINGLEREG(r_src));
+ DCHECK(X86_SINGLEREG(r_src.GetReg()));
opcode = kX86MovdrxRR;
}
}
DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
- LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+ LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
if (r_dest == r_src) {
res->flags.is_nop = true;
}
@@ -74,26 +74,26 @@
* 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) {
- int r_dest_save = r_dest;
- if (X86_FPREG(r_dest)) {
+LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
+ RegStorage r_dest_save = r_dest;
+ if (X86_FPREG(r_dest.GetReg())) {
if (value == 0) {
- return NewLIR2(kX86XorpsRR, r_dest, r_dest);
+ return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg());
}
- DCHECK(X86_SINGLEREG(r_dest));
+ DCHECK(X86_SINGLEREG(r_dest.GetReg()));
r_dest = AllocTemp();
}
LIR *res;
if (value == 0) {
- res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
+ res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg());
} else {
// Note, there is no byte immediate form of a 32 bit immediate move.
- res = NewLIR2(kX86Mov32RI, r_dest, value);
+ res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
}
- if (X86_FPREG(r_dest_save)) {
- NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
+ if (X86_FPREG(r_dest_save.GetReg())) {
+ NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg());
FreeTemp(r_dest);
}
@@ -113,7 +113,7 @@
return branch;
}
-LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) {
+LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpNeg: opcode = kX86Neg32R; break;
@@ -123,13 +123,13 @@
default:
LOG(FATAL) << "Bad case in OpReg " << op;
}
- return NewLIR1(opcode, r_dest_src);
+ return NewLIR1(opcode, r_dest_src.GetReg());
}
-LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
+LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
X86OpCode opcode = kX86Bkpt;
bool byte_imm = IS_SIMM8(value);
- DCHECK(!X86_FPREG(r_dest_src1));
+ DCHECK(!X86_FPREG(r_dest_src1.GetReg()));
switch (op) {
case kOpLsl: opcode = kX86Sal32RI; break;
case kOpLsr: opcode = kX86Shr32RI; break;
@@ -152,14 +152,14 @@
break;
case kOpMul:
opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
- return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
+ return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
default:
LOG(FATAL) << "Bad case in OpRegImm " << op;
}
- return NewLIR2(opcode, r_dest_src1, value);
+ return NewLIR2(opcode, r_dest_src1.GetReg(), value);
}
-LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
+LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
X86OpCode opcode = kX86Nop;
bool src2_must_be_cx = false;
switch (op) {
@@ -192,10 +192,10 @@
case kOpXor: opcode = kX86Xor32RR; break;
case kOp2Byte:
// Use shifts instead of a byte operand if the source can't be byte accessed.
- if (r_src2 >= 4) {
- NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
- NewLIR2(kX86Sal32RI, r_dest_src1, 24);
- return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
+ if (r_src2.GetReg() >= 4) {
+ NewLIR2(kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
+ NewLIR2(kX86Sal32RI, r_dest_src1.GetReg(), 24);
+ return NewLIR2(kX86Sar32RI, r_dest_src1.GetReg(), 24);
} else {
opcode = kX86Movsx8RR;
}
@@ -207,49 +207,49 @@
LOG(FATAL) << "Bad case in OpRegReg " << op;
break;
}
- CHECK(!src2_must_be_cx || r_src2 == rCX);
- return NewLIR2(opcode, r_dest_src1, r_src2);
+ CHECK(!src2_must_be_cx || r_src2.GetReg() == rCX);
+ return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
-LIR* X86Mir2Lir::OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type) {
- DCHECK(!(X86_FPREG(r_base)));
-
+LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+ DCHECK(!(X86_FPREG(r_base.GetReg())));
X86OpCode opcode = kX86Nop;
+ int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
switch (move_type) {
case kMov8GP:
- CHECK(!X86_FPREG(r_dest));
+ CHECK(!X86_FPREG(dest));
opcode = kX86Mov8RM;
break;
case kMov16GP:
- CHECK(!X86_FPREG(r_dest));
+ CHECK(!X86_FPREG(dest));
opcode = kX86Mov16RM;
break;
case kMov32GP:
- CHECK(!X86_FPREG(r_dest));
+ CHECK(!X86_FPREG(dest));
opcode = kX86Mov32RM;
break;
case kMov32FP:
- CHECK(X86_FPREG(r_dest));
+ CHECK(X86_FPREG(dest));
opcode = kX86MovssRM;
break;
case kMov64FP:
- CHECK(X86_FPREG(r_dest));
+ CHECK(X86_FPREG(dest));
opcode = kX86MovsdRM;
break;
case kMovU128FP:
- CHECK(X86_FPREG(r_dest));
+ CHECK(X86_FPREG(dest));
opcode = kX86MovupsRM;
break;
case kMovA128FP:
- CHECK(X86_FPREG(r_dest));
+ CHECK(X86_FPREG(dest));
opcode = kX86MovapsRM;
break;
case kMovLo128FP:
- CHECK(X86_FPREG(r_dest));
+ CHECK(X86_FPREG(dest));
opcode = kX86MovlpsRM;
break;
case kMovHi128FP:
- CHECK(X86_FPREG(r_dest));
+ CHECK(X86_FPREG(dest));
opcode = kX86MovhpsRM;
break;
case kMov64GP:
@@ -260,48 +260,49 @@
break;
}
- return NewLIR3(opcode, r_dest, r_base, offset);
+ return NewLIR3(opcode, dest, r_base.GetReg(), offset);
}
-LIR* X86Mir2Lir::OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type) {
- DCHECK(!(X86_FPREG(r_base)));
+LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ DCHECK(!(X86_FPREG(r_base.GetReg())));
+ int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
X86OpCode opcode = kX86Nop;
switch (move_type) {
case kMov8GP:
- CHECK(!X86_FPREG(r_src));
+ CHECK(!X86_FPREG(src));
opcode = kX86Mov8MR;
break;
case kMov16GP:
- CHECK(!X86_FPREG(r_src));
+ CHECK(!X86_FPREG(src));
opcode = kX86Mov16MR;
break;
case kMov32GP:
- CHECK(!X86_FPREG(r_src));
+ CHECK(!X86_FPREG(src));
opcode = kX86Mov32MR;
break;
case kMov32FP:
- CHECK(X86_FPREG(r_src));
+ CHECK(X86_FPREG(src));
opcode = kX86MovssMR;
break;
case kMov64FP:
- CHECK(X86_FPREG(r_src));
+ CHECK(X86_FPREG(src));
opcode = kX86MovsdMR;
break;
case kMovU128FP:
- CHECK(X86_FPREG(r_src));
+ CHECK(X86_FPREG(src));
opcode = kX86MovupsMR;
break;
case kMovA128FP:
- CHECK(X86_FPREG(r_src));
+ CHECK(X86_FPREG(src));
opcode = kX86MovapsMR;
break;
case kMovLo128FP:
- CHECK(X86_FPREG(r_src));
+ CHECK(X86_FPREG(src));
opcode = kX86MovlpsMR;
break;
case kMovHi128FP:
- CHECK(X86_FPREG(r_src));
+ CHECK(X86_FPREG(src));
opcode = kX86MovhpsMR;
break;
case kMov64GP:
@@ -312,17 +313,16 @@
break;
}
- return NewLIR3(opcode, r_base, offset, r_src);
+ return NewLIR3(opcode, r_base.GetReg(), offset, src);
}
-LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
+LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
// The only conditional reg to reg operation supported is Cmov
DCHECK_EQ(op, kOpCmov);
- return NewLIR3(kX86Cmov32RRC, r_dest, r_src, X86ConditionEncoding(cc));
+ return NewLIR3(kX86Cmov32RRC, r_dest.GetReg(), r_src.GetReg(), X86ConditionEncoding(cc));
}
-LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
- int offset) {
+LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
X86OpCode opcode = kX86Nop;
switch (op) {
// X86 binary opcodes
@@ -341,8 +341,8 @@
LOG(FATAL) << "Bad case in OpRegMem " << op;
break;
}
- LIR *l = NewLIR3(opcode, r_dest, rBase, offset);
- if (rBase == rX86_SP) {
+ LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
+ if (r_base == rs_rX86_SP) {
AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
}
return l;
@@ -372,7 +372,7 @@
return l;
}
-LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, RegLocation rl_value) {
+LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
DCHECK_NE(rl_value.location, kLocPhysReg);
int displacement = SRegOffset(rl_value.s_reg_low);
X86OpCode opcode = kX86Nop;
@@ -389,24 +389,24 @@
LOG(FATAL) << "Bad case in OpRegMem " << op;
break;
}
- LIR *l = NewLIR3(opcode, r_dest, rX86_SP, displacement);
+ LIR *l = NewLIR3(opcode, r_dest.GetReg(), rX86_SP, displacement);
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, false /* is_64bit */);
return l;
}
-LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
- int r_src2) {
+LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
+ RegStorage r_src2) {
if (r_dest != r_src1 && r_dest != r_src2) {
if (op == kOpAdd) { // lea special case, except can't encode rbp as base
if (r_src1 == r_src2) {
OpRegCopy(r_dest, r_src1);
return OpRegImm(kOpLsl, r_dest, 1);
- } else if (r_src1 != rBP) {
- return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
- r_src2 /* index */, 0 /* scale */, 0 /* disp */);
+ } else if (r_src1 != rs_rBP) {
+ return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r_src1.GetReg() /* base */,
+ r_src2.GetReg() /* index */, 0 /* scale */, 0 /* disp */);
} else {
- return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
- r_src1 /* index */, 0 /* scale */, 0 /* disp */);
+ return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r_src2.GetReg() /* base */,
+ r_src1.GetReg() /* index */, 0 /* scale */, 0 /* disp */);
}
} else {
OpRegCopy(r_dest, r_src1);
@@ -422,7 +422,7 @@
break;
case kOpSbc:
case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
OpRegCopy(t_reg, r_src1);
OpRegReg(op, t_reg, r_src2);
LIR* res = OpRegCopy(r_dest, t_reg);
@@ -442,25 +442,24 @@
}
}
-LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
- int value) {
+LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) {
if (op == kOpMul) {
X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
- return NewLIR3(opcode, r_dest, r_src, value);
+ return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
} else if (op == kOpAnd) {
- if (value == 0xFF && r_src < 4) {
- return NewLIR2(kX86Movzx8RR, r_dest, r_src);
+ if (value == 0xFF && r_src.GetReg() < 4) {
+ return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
} else if (value == 0xFFFF) {
- return NewLIR2(kX86Movzx16RR, r_dest, r_src);
+ return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg());
}
}
if (r_dest != r_src) {
if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
// TODO: fix bug in LEA encoding when disp == 0
- return NewLIR5(kX86Lea32RA, r_dest, r5sib_no_base /* base */,
- r_src /* index */, value /* scale */, 0 /* disp */);
+ return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r5sib_no_base /* base */,
+ r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
} else if (op == kOpAdd) { // lea add special case
- return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
+ return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r_src.GetReg() /* base */,
r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
}
OpRegCopy(r_dest, r_src);
@@ -480,7 +479,7 @@
return NewLIR1(opcode, thread_offset.Int32Value());
}
-LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {
+LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpBlx: opcode = kX86CallM; break;
@@ -488,18 +487,19 @@
LOG(FATAL) << "Bad opcode: " << op;
break;
}
- return NewLIR2(opcode, rBase, disp);
+ return NewLIR2(opcode, r_base.GetReg(), disp);
}
-LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
+LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
+ int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
LIR *res;
- if (X86_FPREG(r_dest_lo)) {
- DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi
- DCHECK_EQ(r_dest_lo, r_dest_hi);
+ bool is_fp = X86_FPREG(low_reg_val);
+ // TODO: clean this up once we fully recognize 64-bit storage containers.
+ if (is_fp) {
if (value == 0) {
- return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
+ return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
} else if (base_of_code_ != nullptr) {
// We will load the value from the literal area.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
@@ -515,45 +515,49 @@
// We don't know the proper offset for the value, so pick one that will force
// 4 byte offset. We will fix this up in the assembler later to have the right
// value.
- res = LoadBaseDisp(rl_method.reg.GetReg(), 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
+ res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::Solo64(low_reg_val),
+ kDouble, INVALID_SREG);
res->target = data_target;
res->flags.fixup = kFixupLoad;
SetMemRefType(res, true, kLiteral);
store_method_addr_used_ = true;
} else {
if (val_lo == 0) {
- res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
+ res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
} else {
- res = LoadConstantNoClobber(r_dest_lo, val_lo);
+ res = LoadConstantNoClobber(RegStorage::Solo32(low_reg_val), val_lo);
}
if (val_hi != 0) {
- r_dest_hi = AllocTempDouble();
- LoadConstantNoClobber(r_dest_hi, val_hi);
- NewLIR2(kX86PunpckldqRR, r_dest_lo, r_dest_hi);
+ // FIXME: clean up when AllocTempDouble no longer returns a pair.
+ RegStorage r_dest_hi = AllocTempDouble();
+ LoadConstantNoClobber(RegStorage::Solo32(r_dest_hi.GetLowReg()), val_hi);
+ NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetLowReg());
FreeTemp(r_dest_hi);
}
}
} else {
- res = LoadConstantNoClobber(r_dest_lo, val_lo);
- LoadConstantNoClobber(r_dest_hi, val_hi);
+ res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
+ LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
}
return res;
}
-LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
- int displacement, int r_dest, int r_dest_hi, OpSize size,
- int s_reg) {
+// FIXME: don't split r_dest into two storage units.
+LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_dest, RegStorage r_dest_hi,
+ OpSize size, int s_reg) {
LIR *load = NULL;
LIR *load2 = NULL;
- bool is_array = r_index != INVALID_REG;
+ bool is_array = r_index.Valid();
bool pair = false;
bool is64bit = false;
X86OpCode opcode = kX86Nop;
switch (size) {
case kLong:
case kDouble:
+ // TODO: use regstorage attributes here.
is64bit = true;
- if (X86_FPREG(r_dest)) {
+ if (X86_FPREG(r_dest.GetReg())) {
opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
} else {
pair = true;
@@ -565,9 +569,9 @@
case kWord:
case kSingle:
opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
- if (X86_FPREG(r_dest)) {
+ if (X86_FPREG(r_dest.GetReg())) {
opcode = is_array ? kX86MovssRA : kX86MovssRM;
- DCHECK(X86_SINGLEREG(r_dest));
+ DCHECK(X86_SINGLEREG(r_dest.GetReg()));
}
DCHECK_EQ((displacement & 0x3), 0);
break;
@@ -591,19 +595,19 @@
if (!is_array) {
if (!pair) {
- load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+ load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
} else {
- if (rBase == r_dest) {
- load2 = NewLIR3(opcode, r_dest_hi, rBase,
+ if (r_base == r_dest) {
+ load2 = NewLIR3(opcode, r_dest_hi.GetReg(), r_base.GetReg(),
displacement + HIWORD_OFFSET);
- load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+ load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
} else {
- load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
- load2 = NewLIR3(opcode, r_dest_hi, rBase,
+ load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
+ load2 = NewLIR3(opcode, r_dest_hi.GetReg(), r_base.GetReg(),
displacement + HIWORD_OFFSET);
}
}
- if (rBase == rX86_SP) {
+ if (r_base == rs_rX86_SP) {
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, is64bit);
if (pair) {
@@ -613,39 +617,39 @@
}
} else {
if (!pair) {
- load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+ load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
} else {
- if (rBase == r_dest) {
+ if (r_base == r_dest) {
if (r_dest_hi == r_index) {
// We can't use either register for the first load.
- int temp = AllocTemp();
- load2 = NewLIR5(opcode, temp, rBase, r_index, scale,
+ RegStorage temp = AllocTemp();
+ load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
- load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+ load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
OpRegCopy(r_dest_hi, temp);
FreeTemp(temp);
} else {
- load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
+ load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
- load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+ load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
}
} else {
if (r_dest == r_index) {
// We can't use either register for the first load.
- int temp = AllocTemp();
- load = NewLIR5(opcode, temp, rBase, r_index, scale,
+ RegStorage temp = AllocTemp();
+ load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
- load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
+ load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
OpRegCopy(r_dest, temp);
FreeTemp(temp);
} else {
- load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+ load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
- load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
+ load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
}
}
@@ -656,30 +660,31 @@
}
/* Load value from base + scaled index. */
-LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
- int r_index, int r_dest, int scale, OpSize size) {
- return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
- r_dest, INVALID_REG, size, INVALID_SREG);
+LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+ int scale, OpSize size) {
+ return LoadBaseIndexedDisp(r_base, r_index, scale, 0,
+ r_dest, RegStorage::InvalidReg(), size, INVALID_SREG);
}
-LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
- int r_dest, OpSize size, int s_reg) {
- return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
- r_dest, INVALID_REG, size, s_reg);
+LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement,
+ RegStorage r_dest, OpSize size, int s_reg) {
+ return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
+ r_dest, RegStorage::InvalidReg(), size, s_reg);
}
-LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
- int r_dest_lo, int r_dest_hi, int s_reg) {
- return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
- r_dest_lo, r_dest_hi, kLong, s_reg);
+LIR* X86Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
+ int s_reg) {
+ return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
+ r_dest.GetLow(), r_dest.GetHigh(), kLong, s_reg);
}
-LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
- int displacement, int r_src, int r_src_hi, OpSize size,
- int s_reg) {
+LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
+ int displacement, RegStorage r_src, RegStorage r_src_hi,
+ OpSize size, int s_reg) {
LIR *store = NULL;
LIR *store2 = NULL;
- bool is_array = r_index != INVALID_REG;
+ bool is_array = r_index.Valid();
+ // FIXME: use regstorage attributes in place of these.
bool pair = false;
bool is64bit = false;
X86OpCode opcode = kX86Nop;
@@ -687,7 +692,7 @@
case kLong:
case kDouble:
is64bit = true;
- if (X86_FPREG(r_src)) {
+ if (X86_FPREG(r_src.GetReg())) {
opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
} else {
pair = true;
@@ -699,9 +704,9 @@
case kWord:
case kSingle:
opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
- if (X86_FPREG(r_src)) {
+ if (X86_FPREG(r_src.GetReg())) {
opcode = is_array ? kX86MovssAR : kX86MovssMR;
- DCHECK(X86_SINGLEREG(r_src));
+ DCHECK(X86_SINGLEREG(r_src.GetReg()));
}
DCHECK_EQ((displacement & 0x3), 0);
break;
@@ -720,12 +725,12 @@
if (!is_array) {
if (!pair) {
- store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+ store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
} else {
- store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
- store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
+ store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
+ store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src_hi.GetReg());
}
- if (rBase == rX86_SP) {
+ if (r_base == rs_rX86_SP) {
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, is64bit);
if (pair) {
@@ -735,13 +740,13 @@
}
} else {
if (!pair) {
- store = NewLIR5(opcode, rBase, r_index, scale,
- displacement + LOWORD_OFFSET, r_src);
+ store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
+ displacement + LOWORD_OFFSET, r_src.GetReg());
} else {
- store = NewLIR5(opcode, rBase, r_index, scale,
- displacement + LOWORD_OFFSET, r_src);
- store2 = NewLIR5(opcode, rBase, r_index, scale,
- displacement + HIWORD_OFFSET, r_src_hi);
+ store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
+ displacement + LOWORD_OFFSET, r_src.GetReg());
+ store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
+ displacement + HIWORD_OFFSET, r_src_hi.GetReg());
}
}
@@ -749,23 +754,21 @@
}
/* store value base base + scaled index. */
-LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
- return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
- r_src, INVALID_REG, size, INVALID_SREG);
+ return StoreBaseIndexedDisp(r_base, r_index, scale, 0,
+ r_src, RegStorage::InvalidReg(), size, INVALID_SREG);
}
-LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
- int r_src, OpSize size) {
- return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
- displacement, r_src, INVALID_REG, size,
- INVALID_SREG);
+LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
+ RegStorage r_src, OpSize size) {
+ return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src,
+ RegStorage::InvalidReg(), size, INVALID_SREG);
}
-LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
- int r_src_lo, int r_src_hi) {
- return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
- r_src_lo, r_src_hi, kLong, INVALID_SREG);
+LIR* X86Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
+ r_src.GetLow(), r_src.GetHigh(), kLong, INVALID_SREG);
}
/*
@@ -774,15 +777,15 @@
*/
void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) {
NewLIR2(kX86MovdxrRR, fp_reg, low_reg);
- int tmp_reg = AllocTempDouble();
+ int tmp_reg = AllocTempDouble().GetLowReg();
NewLIR2(kX86MovdxrRR, tmp_reg, high_reg);
NewLIR2(kX86PunpckldqRR, fp_reg, tmp_reg);
FreeTemp(tmp_reg);
}
-LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg,
+LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target) {
- NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg, offset,
+ NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset,
check_value);
LIR* branch = OpCondBranch(cond, target);
return branch;
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index abe1b3d..797bc82 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -189,22 +189,52 @@
fr15 = 15 + X86_FP_REG_OFFSET,
};
+const RegStorage rs_r0(RegStorage::k32BitSolo, r0);
+const RegStorage rs_rAX = rs_r0;
+const RegStorage rs_r1(RegStorage::k32BitSolo, r1);
+const RegStorage rs_rCX = rs_r1;
+const RegStorage rs_r2(RegStorage::k32BitSolo, r2);
+const RegStorage rs_rDX = rs_r2;
+const RegStorage rs_r3(RegStorage::k32BitSolo, r3);
+const RegStorage rs_rBX = rs_r3;
+const RegStorage rs_r4sp(RegStorage::k32BitSolo, r4sp);
+const RegStorage rs_rX86_SP = rs_r4sp;
+const RegStorage rs_r5(RegStorage::k32BitSolo, r5);
+const RegStorage rs_rBP = rs_r5;
+const RegStorage rs_r6(RegStorage::k32BitSolo, r6);
+const RegStorage rs_rSI = rs_r6;
+const RegStorage rs_r7(RegStorage::k32BitSolo, r7);
+const RegStorage rs_rDI = rs_r7;
+
+// TODO: elminate these #defines?
#define rX86_ARG0 rAX
+#define rs_rX86_ARG0 rs_rAX
#define rX86_ARG1 rCX
+#define rs_rX86_ARG1 rs_rCX
#define rX86_ARG2 rDX
+#define rs_rX86_ARG2 rs_rDX
#define rX86_ARG3 rBX
+#define rs_rX86_ARG3 rs_rBX
#define rX86_FARG0 rAX
+#define rs_rX86_FARG0 rs_rAX
#define rX86_FARG1 rCX
+#define rs_rX86_FARG1 rs_rCX
#define rX86_FARG2 rDX
+#define rs_rX86_FARG2 rs_rDX
#define rX86_FARG3 rBX
+#define rs_rX86_FARG3 rs_rBX
#define rX86_RET0 rAX
+#define rs_rX86_RET0 rs_rAX
#define rX86_RET1 rDX
+#define rs_rX86_RET1 rs_rDX
#define rX86_INVOKE_TGT rAX
-#define rX86_LR INVALID_REG
-#define rX86_SUSPEND INVALID_REG
-#define rX86_SELF INVALID_REG
+#define rs_rX86_INVOKE_TGT rs_rAX
+#define rX86_LR RegStorage::kInvalidRegVal
+#define rX86_SUSPEND RegStorage::kInvalidRegVal
+#define rX86_SELF RegStorage::kInvalidRegVal
#define rX86_COUNT rCX
-#define rX86_PC INVALID_REG
+#define rs_rX86_COUNT rs_rCX
+#define rX86_PC RegStorage::kInvalidRegVal
// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
const RegLocation x86_loc_c_return
@@ -388,6 +418,7 @@
kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR, // compare and exchange
kX86LockCmpxchgMR, kX86LockCmpxchgAR, // locked compare and exchange
kX86LockCmpxchg8bM, kX86LockCmpxchg8bA, // locked compare and exchange
+ kX86XchgMR, // exchange memory with register (automatically locked)
Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index c59617e..11bec99 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -72,6 +72,7 @@
};
static const uint16_t kRegValMask = 0x007f;
+ static const uint16_t kInvalidRegVal = 0x007f;
static const uint16_t kHighRegShift = 7;
static const uint16_t kHighRegMask = kRegValMask << kHighRegShift;
@@ -92,45 +93,64 @@
RegStorage() : reg_(kInvalid) {}
~RegStorage() {}
- bool IsInvalid() const {
- return ((reg_ & kShapeMask) == kInvalid);
+ bool operator==(const RegStorage rhs) const {
+ return (reg_ == rhs.GetRawBits());
+ }
+
+ bool operator!=(const RegStorage rhs) const {
+ return (reg_ != rhs.GetRawBits());
+ }
+
+ bool Valid() const {
+ return ((reg_ & kShapeMask) != kInvalid);
}
bool Is32Bit() const {
- DCHECK(!IsInvalid());
return ((reg_ & kSizeMask) == k32Bit);
}
bool Is64Bit() const {
- DCHECK(!IsInvalid());
return ((reg_ & kSizeMask) == k64Bit);
}
bool IsPair() const {
- DCHECK(!IsInvalid());
return ((reg_ & kPairMask) == kPair);
}
bool IsSolo() const {
- DCHECK(!IsInvalid());
return ((reg_ & kVectorMask) == kSolo);
}
bool IsVector() const {
- DCHECK(!IsInvalid());
return ((reg_ & kVectorMask) == kVector);
}
// Used to retrieve either the low register of a pair, or the only register.
int GetReg() const {
- DCHECK(!IsInvalid());
- return (reg_ & kRegValMask);
+ DCHECK(!IsPair());
+ return Valid() ? (reg_ & kRegValMask) : kInvalidRegVal;
}
void SetReg(int reg) {
- DCHECK(!IsInvalid());
+ DCHECK(Valid());
reg_ = (reg_ & ~kRegValMask) | reg;
- DCHECK_EQ(GetReg(), reg);
+ }
+
+ void SetLowReg(int reg) {
+ DCHECK(IsPair());
+ reg_ = (reg_ & ~kRegValMask) | reg;
+ }
+
+ // Retrieve the least significant register of a pair.
+ int GetLowReg() const {
+ DCHECK(IsPair());
+ return (reg_ & kRegValMask);
+ }
+
+ // Create a stand-alone RegStorage from the low reg of a pair.
+ RegStorage GetLow() const {
+ DCHECK(IsPair());
+ return RegStorage(k32BitSolo, reg_ & kRegValMask);
}
// Retrieve the most significant register of a pair.
@@ -139,12 +159,41 @@
return (reg_ & kHighRegMask) >> kHighRegShift;
}
+ // Create a stand-alone RegStorage from the high reg of a pair.
+ RegStorage GetHigh() const {
+ DCHECK(IsPair());
+ return RegStorage(k32BitSolo, (reg_ & kHighRegMask) >> kHighRegShift);
+ }
+
void SetHighReg(int reg) {
DCHECK(IsPair());
reg_ = (reg_ & ~kHighRegMask) | (reg << kHighRegShift);
DCHECK_EQ(GetHighReg(), reg);
}
+ // Combine 2 32-bit solo regs into a pair.
+ static RegStorage MakeRegPair(RegStorage low, RegStorage high) {
+ DCHECK(!low.IsPair());
+ DCHECK(low.Is32Bit());
+ DCHECK(!high.IsPair());
+ DCHECK(high.Is32Bit());
+ return RegStorage(k64BitPair, low.GetReg(), high.GetReg());
+ }
+
+ // Create a 32-bit solo.
+ static RegStorage Solo32(int reg_num) {
+ return RegStorage(k32BitSolo, reg_num);
+ }
+
+ // Create a 64-bit solo.
+ static RegStorage Solo64(int reg_num) {
+ return RegStorage(k64BitSolo, reg_num);
+ }
+
+ static RegStorage InvalidReg() {
+ return RegStorage(kInvalid);
+ }
+
int GetRawBits() const {
return reg_;
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index ab0ee52..4a03ebe 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -226,6 +226,12 @@
opcode << "j" << condition_codes[*instr & 0xF];
branch_bytes = 1;
break;
+ case 0x86: case 0x87:
+ opcode << "xchg";
+ store = true;
+ has_modrm = true;
+ byte_operand = (*instr == 0x86);
+ break;
case 0x88: opcode << "mov"; store = true; has_modrm = true; byte_operand = true; break;
case 0x89: opcode << "mov"; store = true; has_modrm = true; break;
case 0x8A: opcode << "mov"; load = true; has_modrm = true; byte_operand = true; break;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index fdf5763..52a1672 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -31,7 +31,6 @@
Mutex* Locks::abort_lock_ = nullptr;
Mutex* Locks::breakpoint_lock_ = nullptr;
-Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
@@ -812,7 +811,6 @@
// Already initialized.
DCHECK(abort_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
- DCHECK(deoptimization_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
@@ -829,8 +827,6 @@
DCHECK(breakpoint_lock_ == nullptr);
breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
- DCHECK(deoptimization_lock_ == nullptr);
- deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock);
DCHECK(classlinker_classes_lock_ == nullptr);
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
kClassLinkerClassesLock);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 55ec1c3..4b881f6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -537,11 +537,8 @@
// Guards breakpoints.
static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
- // Guards deoptimization requests.
- static Mutex* deoptimization_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
// Guards trace requests.
- static Mutex* trace_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+ static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_);
// Guards profile objects.
static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c18d5c6..20890f5 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -200,17 +200,9 @@
size_t Dbg::alloc_record_count_ = 0;
// Deoptimization support.
-struct MethodInstrumentationRequest {
- bool deoptimize;
-
- // Method for selective deoptimization. NULL means full deoptimization.
- mirror::ArtMethod* method;
-
- MethodInstrumentationRequest(bool deoptimize, mirror::ArtMethod* method)
- : deoptimize(deoptimize), method(method) {}
-};
-// TODO we need to visit associated methods as roots.
-static std::vector<MethodInstrumentationRequest> gDeoptimizationRequests GUARDED_BY(Locks::deoptimization_lock_);
+Mutex* Dbg::deoptimization_lock_ = nullptr;
+std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
+size_t Dbg::full_deoptimization_event_count_ = 0;
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
@@ -238,6 +230,12 @@
}
}
+void DeoptimizationRequest::VisitRoots(RootCallback* callback, void* arg) {
+ if (method != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
+ }
+}
+
static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -502,6 +500,7 @@
gRegistry = new ObjectRegistry;
alloc_tracker_lock_ = new Mutex("AllocTracker lock");
+ deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock);
// Init JDWP if the debugger is enabled. This may connect out to a
// debugger, passively listen for a debugger, or block waiting for a
// debugger.
@@ -524,9 +523,17 @@
}
void Dbg::VisitRoots(RootCallback* callback, void* arg) {
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
- for (Breakpoint& bp : gBreakpoints) {
- bp.VisitRoots(callback, arg);
+ {
+ MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ for (Breakpoint& bp : gBreakpoints) {
+ bp.VisitRoots(callback, arg);
+ }
+ }
+ if (deoptimization_lock_ != nullptr) { // only true if the debugger is started.
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ for (DeoptimizationRequest& req : deoptimization_requests_) {
+ req.VisitRoots(callback, arg);
+ }
}
}
@@ -539,6 +546,8 @@
gRegistry = nullptr;
delete alloc_tracker_lock_;
alloc_tracker_lock_ = nullptr;
+ delete deoptimization_lock_;
+ deoptimization_lock_ = nullptr;
}
void Dbg::GcDidFinish() {
@@ -605,8 +614,9 @@
}
{
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- CHECK_EQ(gDeoptimizationRequests.size(), 0U);
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ CHECK_EQ(deoptimization_requests_.size(), 0U);
+ CHECK_EQ(full_deoptimization_event_count_, 0U);
}
Runtime* runtime = Runtime::Current();
@@ -646,8 +656,9 @@
// Since we're going to disable deoptimization, we clear the deoptimization requests queue.
// This prevents us from having any pending deoptimization request when the debugger attaches
// to us again while no event has been requested yet.
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- gDeoptimizationRequests.clear();
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ deoptimization_requests_.clear();
+ full_deoptimization_event_count_ = 0U;
}
runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
instrumentation::Instrumentation::kMethodEntered |
@@ -2546,44 +2557,86 @@
}
}
-static void ProcessDeoptimizationRequests()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
+// Process request while all mutator threads are suspended.
+void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- for (const MethodInstrumentationRequest& request : gDeoptimizationRequests) {
- mirror::ArtMethod* const method = request.method;
- if (method != nullptr) {
- // Selective deoptimization.
- if (request.deoptimize) {
- VLOG(jdwp) << "Deoptimize method " << PrettyMethod(method);
- instrumentation->Deoptimize(method);
- } else {
- VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(method);
- instrumentation->Undeoptimize(method);
- }
- } else {
- // Full deoptimization.
- if (request.deoptimize) {
- VLOG(jdwp) << "Deoptimize the world";
- instrumentation->DeoptimizeEverything();
- } else {
- VLOG(jdwp) << "Undeoptimize the world";
- instrumentation->UndeoptimizeEverything();
- }
- }
+ switch (request.kind) {
+ case DeoptimizationRequest::kNothing:
+ LOG(WARNING) << "Ignoring empty deoptimization request.";
+ break;
+ case DeoptimizationRequest::kFullDeoptimization:
+ VLOG(jdwp) << "Deoptimize the world";
+ instrumentation->DeoptimizeEverything();
+ break;
+ case DeoptimizationRequest::kFullUndeoptimization:
+ VLOG(jdwp) << "Undeoptimize the world";
+ instrumentation->UndeoptimizeEverything();
+ break;
+ case DeoptimizationRequest::kSelectiveDeoptimization:
+ VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method);
+ instrumentation->Deoptimize(request.method);
+ break;
+ case DeoptimizationRequest::kSelectiveUndeoptimization:
+ VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method);
+ instrumentation->Undeoptimize(request.method);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind;
+ break;
}
- gDeoptimizationRequests.clear();
}
-// Process deoptimization requests after suspending all mutator threads.
+void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
+ if (req.kind == DeoptimizationRequest::kNothing) {
+ // Nothing to do.
+ return;
+ }
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ switch (req.kind) {
+ case DeoptimizationRequest::kFullDeoptimization: {
+ DCHECK(req.method == nullptr);
+ if (full_deoptimization_event_count_ == 0) {
+ VLOG(jdwp) << "Request full deoptimization";
+ deoptimization_requests_.push_back(req);
+ }
+ ++full_deoptimization_event_count_;
+ break;
+ }
+ case DeoptimizationRequest::kFullUndeoptimization: {
+ DCHECK(req.method == nullptr);
+ DCHECK_GT(full_deoptimization_event_count_, 0U);
+ --full_deoptimization_event_count_;
+ if (full_deoptimization_event_count_ == 0) {
+ VLOG(jdwp) << "Request full undeoptimization";
+ deoptimization_requests_.push_back(req);
+ }
+ break;
+ }
+ case DeoptimizationRequest::kSelectiveDeoptimization: {
+ DCHECK(req.method != nullptr);
+ VLOG(jdwp) << "Request deoptimization of " << PrettyMethod(req.method);
+ deoptimization_requests_.push_back(req);
+ break;
+ }
+ case DeoptimizationRequest::kSelectiveUndeoptimization: {
+ DCHECK(req.method != nullptr);
+ VLOG(jdwp) << "Request undeoptimization of " << PrettyMethod(req.method);
+ deoptimization_requests_.push_back(req);
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unknown deoptimization request kind " << req.kind;
+ break;
+ }
+ }
+}
+
void Dbg::ManageDeoptimization() {
Thread* const self = Thread::Current();
{
// Avoid suspend/resume if there is no pending request.
- MutexLock mu(self, *Locks::deoptimization_lock_);
- if (gDeoptimizationRequests.empty()) {
+ MutexLock mu(self, *deoptimization_lock_);
+ if (deoptimization_requests_.empty()) {
return;
}
}
@@ -2593,27 +2646,21 @@
Runtime* const runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll();
const ThreadState old_state = self->SetStateUnsafe(kRunnable);
- ProcessDeoptimizationRequests();
+ {
+ MutexLock mu(self, *deoptimization_lock_);
+ for (const DeoptimizationRequest& request : deoptimization_requests_) {
+ ProcessDeoptimizationRequest(request);
+ }
+ deoptimization_requests_.clear();
+ }
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
runtime->GetThreadList()->ResumeAll();
self->TransitionFromSuspendedToRunnable();
}
-// Enable full deoptimization.
-void Dbg::EnableFullDeoptimization() {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- VLOG(jdwp) << "Request full deoptimization";
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(true, nullptr));
-}
-
-// Disable full deoptimization.
-void Dbg::DisableFullDeoptimization() {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- VLOG(jdwp) << "Request full undeoptimization";
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(false, nullptr));
-}
-
-void Dbg::WatchLocation(const JDWP::JdwpLocation* location) {
+void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
+ // TODO we don't need to deoptimize a method if it's not compiled since it already runs with the
+ // interpreter.
bool need_deoptimization = true;
mirror::ArtMethod* m = FromMethodId(location->method_id);
{
@@ -2630,18 +2677,17 @@
}
gBreakpoints.push_back(Breakpoint(m, location->dex_pc));
- VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " << gBreakpoints[gBreakpoints.size() - 1];
+ VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
+ << gBreakpoints[gBreakpoints.size() - 1];
}
if (need_deoptimization) {
- // Request its deoptimization. This will be done after updating the JDWP event list.
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(true, m));
- VLOG(jdwp) << "Request deoptimization of " << PrettyMethod(m);
+ req->kind = DeoptimizationRequest::kSelectiveDeoptimization;
+ req->method = m;
}
}
-void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) {
+void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
bool can_undeoptimize = true;
mirror::ArtMethod* m = FromMethodId(location->method_id);
DCHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
@@ -2666,9 +2712,8 @@
if (can_undeoptimize) {
// Request its undeoptimization. This will be done after updating the JDWP event list.
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(false, m));
- VLOG(jdwp) << "Request undeoptimization of " << PrettyMethod(m);
+ req->kind = DeoptimizationRequest::kSelectiveUndeoptimization;
+ req->method = m;
}
}
@@ -3767,22 +3812,27 @@
}
void Dbg::SetAllocTrackingEnabled(bool enabled) {
- MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
if (enabled) {
- if (recent_allocation_records_ == NULL) {
- alloc_record_max_ = GetAllocTrackerMax();
- LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
- << kMaxAllocRecordStackDepth << " frames, taking "
- << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
- alloc_record_head_ = alloc_record_count_ = 0;
- recent_allocation_records_ = new AllocRecord[alloc_record_max_];
- CHECK(recent_allocation_records_ != NULL);
+ {
+ MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
+ if (recent_allocation_records_ == NULL) {
+ alloc_record_max_ = GetAllocTrackerMax();
+ LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
+ << kMaxAllocRecordStackDepth << " frames, taking "
+ << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
+ alloc_record_head_ = alloc_record_count_ = 0;
+ recent_allocation_records_ = new AllocRecord[alloc_record_max_];
+ CHECK(recent_allocation_records_ != NULL);
+ }
}
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
} else {
Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
- delete[] recent_allocation_records_;
- recent_allocation_records_ = NULL;
+ {
+ MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
+ delete[] recent_allocation_records_;
+ recent_allocation_records_ = NULL;
+ }
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 5fbdb37..23c9c6a 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -25,6 +25,7 @@
#include <set>
#include <string>
+#include <vector>
#include "jdwp/jdwp.h"
#include "jni.h"
@@ -121,6 +122,25 @@
DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
};
+struct DeoptimizationRequest {
+ enum Kind {
+ kNothing, // no action.
+ kFullDeoptimization, // deoptimize everything.
+ kFullUndeoptimization, // undeoptimize everything.
+ kSelectiveDeoptimization, // deoptimize one method.
+ kSelectiveUndeoptimization // undeoptimize one method.
+ };
+
+ DeoptimizationRequest() : kind(kNothing), method(nullptr) {}
+
+ void VisitRoots(RootCallback* callback, void* arg);
+
+ Kind kind;
+
+ // Method for selective deoptimization.
+ mirror::ArtMethod* method;
+};
+
class Dbg {
public:
static bool ParseJdwpOptions(const std::string& options);
@@ -144,8 +164,8 @@
*/
static void Connected();
static void GoActive()
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_);
- static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_);
+ LOCKS_EXCLUDED(Locks::breakpoint_lock_, deoptimization_lock_, Locks::mutator_lock_);
+ static void Disconnected() LOCKS_EXCLUDED(deoptimization_lock_, Locks::mutator_lock_);
static void Disposed();
// Returns true if we're actually debugging with a real debugger, false if it's
@@ -407,26 +427,23 @@
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Full Deoptimization control. Only used for method entry/exit and single-stepping.
- static void EnableFullDeoptimization()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void DisableFullDeoptimization()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
+ // Records deoptimization request in the queue.
+ static void RequestDeoptimization(const DeoptimizationRequest& req)
+ LOCKS_EXCLUDED(deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Manage deoptimization after updating JDWP events list. This must be done while all mutator
- // threads are suspended.
+ // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
+ // request and finally resumes all threads.
static void ManageDeoptimization()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
+ LOCKS_EXCLUDED(deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Breakpoints.
- static void WatchLocation(const JDWP::JdwpLocation* pLoc)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_)
+ static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
+ LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void UnwatchLocation(const JDWP::JdwpLocation* pLoc)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_)
+ static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
+ LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Single-stepping.
@@ -521,6 +538,9 @@
static void PostThreadStartOrStop(Thread*, uint32_t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static Mutex* alloc_tracker_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(alloc_tracker_lock_);
@@ -528,6 +548,20 @@
static size_t alloc_record_head_ GUARDED_BY(alloc_tracker_lock_);
static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_);
+ // Guards deoptimization requests.
+ static Mutex* deoptimization_lock_ ACQUIRED_AFTER(Locks::breakpoint_lock_);
+
+ // Deoptimization requests to be processed each time the event list is updated. This is used when
+ // registering and unregistering events so we do not deoptimize while holding the event list
+ // lock.
+ static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(deoptimization_lock_);
+
+ // Count the number of events requiring full deoptimization. When the counter is > 0, everything
+ // is deoptimized, otherwise everything is undeoptimized.
+ // Note: we fully deoptimize on the first event only (when the counter is set to 1). We fully
+ // undeoptimize when the last event is unregistered (when the counter is set to 0).
+ static size_t full_deoptimization_event_count_ GUARDED_BY(deoptimization_lock_);
+
DISALLOW_COPY_AND_ASSIGN(Dbg);
};
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 829ec4a..9e5f54c 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -200,7 +200,7 @@
}
ThrowLocation throw_location(rcvr, proxy_method, -1);
JValue result_unboxed;
- if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) {
+ if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, &result_unboxed)) {
DCHECK(soa.Self()->IsExceptionPending());
return zero;
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index cd9e217..d4f47ef 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -669,7 +669,8 @@
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- collector_->MarkObject(obj->GetFieldObjectReferenceAddr(offset));
+ // Object was already verified when we scanned it.
+ collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7827261..02e7e3f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -708,9 +708,11 @@
allocation_stack_->Reset();
live_stack_->Reset();
STLDeleteValues(&mod_union_tables_);
+ STLDeleteValues(&remembered_sets_);
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
+ delete heap_trim_request_lock_;
VLOG(heap) << "Finished ~Heap()";
}
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 1ca132e..2fc67ec 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -29,6 +29,50 @@
namespace gc {
namespace space {
+class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+ public:
+ explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
+ }
+
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) OVERRIDE {
+ mirror::Object* obj =
+ LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
+ usable_size);
+ mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
+ kValgrindRedZoneBytes);
+ if (usable_size != nullptr) {
+ *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
+ }
+ return object_without_rdz;
+ }
+
+ virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
+ }
+
+ virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
+ return LargeObjectMapSpace::Free(self, object_with_rdz);
+ }
+
+ bool Contains(const mirror::Object* obj) const OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ return LargeObjectMapSpace::Contains(object_with_rdz);
+ }
+
+ private:
+ static constexpr size_t kValgrindRedZoneBytes = kPageSize;
+};
+
void LargeObjectSpace::SwapBitmaps() {
live_objects_.swap(mark_objects_);
// Swap names to get more descriptive diagnostics.
@@ -53,7 +97,11 @@
lock_("large object map space lock", kAllocSpaceLock) {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
- return new LargeObjectMapSpace(name);
+ if (RUNNING_ON_VALGRIND > 0) {
+ return new ValgrindLargeObjectMapSpace(name);
+ } else {
+ return new LargeObjectMapSpace(name);
+ }
}
mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b1b0c3c..eb01325 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -91,7 +91,7 @@
};
// A discontinuous large object space implemented by individual mmap/munmap calls.
-class LargeObjectMapSpace FINAL : public LargeObjectSpace {
+class LargeObjectMapSpace : public LargeObjectSpace {
public:
// Creates a large object space. Allocations into the large object space use memory maps instead
// of malloc.
@@ -106,7 +106,7 @@
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
- private:
+ protected:
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
@@ -115,7 +115,7 @@
std::vector<mirror::Object*,
accounting::GcAllocator<mirror::Object*> > large_objects_ GUARDED_BY(lock_);
typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
- accounting::GcAllocator<std::pair<const mirror::Object*, MemMap*> > > MemMaps;
+ accounting::GcAllocator<std::pair<mirror::Object*, MemMap*> > > MemMaps;
MemMaps mem_maps_ GUARDED_BY(lock_);
};
@@ -150,7 +150,7 @@
void Dump(std::ostream& os) const;
- private:
+ protected:
static const size_t kAlignment = kPageSize;
class AllocationHeader {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index d2aa8d2..cf7271b 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -118,7 +118,7 @@
void EnableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(deoptimized_methods_lock_);
void DisableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(deoptimized_methods_lock_);
+ LOCKS_EXCLUDED(deoptimized_methods_lock_);
bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Executes everything with interpreter.
@@ -142,7 +142,7 @@
// (except a class initializer) set to the resolution trampoline will be updated only once its
// declaring class is initialized.
void Undeoptimize(mirror::ArtMethod* method)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDeoptimized(mirror::ArtMethod* method) const LOCKS_EXCLUDED(deoptimized_methods_lock_);
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 4c17c96..66ebb96 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -335,12 +335,10 @@
AtomicInteger event_serial_;
// Linked list of events requested by the debugger (breakpoints, class prep, etc).
- Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_);
JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
- size_t full_deoptimization_requests_ GUARDED_BY(event_list_lock_); // Number of events requiring
- // full deoptimization.
// Used to synchronize suspension of the event thread (to avoid receiving "resume"
// events before the thread has finished suspending itself).
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 427350e..9b3ea2e 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -163,11 +163,12 @@
* If one or more "break"-type mods are used, register them with
* the interpreter.
*/
+ DeoptimizationRequest req;
for (int i = 0; i < pEvent->modCount; i++) {
const JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
/* should only be for Breakpoint, Step, and Exception */
- Dbg::WatchLocation(&pMod->locationOnly.loc);
+ Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
} else if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
@@ -181,6 +182,11 @@
dumpEvent(pEvent); /* TODO - need for field watches */
}
}
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+ CHECK(req.method == nullptr);
+ req.kind = DeoptimizationRequest::kFullDeoptimization;
+ }
{
/*
@@ -193,19 +199,11 @@
}
event_list_ = pEvent;
++event_list_size_;
-
- /**
- * Do we need to enable full deoptimization ?
- */
- if (NeedsFullDeoptimization(pEvent->eventKind)) {
- if (full_deoptimization_requests_ == 0) {
- // This is the first event that needs full deoptimization: enable it.
- Dbg::EnableFullDeoptimization();
- }
- ++full_deoptimization_requests_;
- }
}
+ // TODO we can do better job here since we should process only one request: the one we just
+ // created.
+ Dbg::RequestDeoptimization(req);
Dbg::ManageDeoptimization();
return ERR_NONE;
@@ -238,31 +236,28 @@
/*
* Unhook us from the interpreter, if necessary.
*/
+ DeoptimizationRequest req;
for (int i = 0; i < pEvent->modCount; i++) {
JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
/* should only be for Breakpoint, Step, and Exception */
- Dbg::UnwatchLocation(&pMod->locationOnly.loc);
+ Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
}
if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
Dbg::UnconfigureStep(pMod->step.threadId);
}
}
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+ CHECK(req.method == nullptr);
+ req.kind = DeoptimizationRequest::kFullUndeoptimization;
+ }
--event_list_size_;
CHECK(event_list_size_ != 0 || event_list_ == NULL);
- /**
- * Can we disable full deoptimization ?
- */
- if (NeedsFullDeoptimization(pEvent->eventKind)) {
- --full_deoptimization_requests_;
- if (full_deoptimization_requests_ == 0) {
- // We no longer need full deoptimization.
- Dbg::DisableFullDeoptimization();
- }
- }
+ Dbg::RequestDeoptimization(req);
}
/*
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 77c963f..5fc0228 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -215,7 +215,6 @@
event_list_lock_("JDWP event list lock", kJdwpEventListLock),
event_list_(NULL),
event_list_size_(0),
- full_deoptimization_requests_(0),
event_thread_lock_("JDWP event thread lock"),
event_thread_cond_("JDWP event thread condition variable", event_thread_lock_),
event_thread_id_(0),
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index dac287f..3d2fd7b 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -155,8 +155,16 @@
}
}
-// Similar to memmove except elements are of aligned appropriately for T, count is in T sized units
-// copies are guaranteed not to tear when T is less-than 64bit.
+template<typename T>
+inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
+ DCHECK(array_class_ != NULL);
+ Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ return down_cast<PrimitiveArray<T>*>(raw_array);
+}
+
+// Backward copy where elements are of aligned appropriately for T. Count is in T sized units.
+// Copies are guaranteed not to tear when the sizeof T is less-than 64bit.
template<typename T>
static inline void ArrayBackwardCopy(T* d, const T* s, int32_t count) {
d += count;
@@ -168,12 +176,15 @@
}
}
+// Forward copy where elements are of aligned appropriately for T. Count is in T sized units.
+// Copies are guaranteed not to tear when the sizeof T is less-than 64bit.
template<typename T>
-inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
- DCHECK(array_class_ != NULL);
- Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T),
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
- return down_cast<PrimitiveArray<T>*>(raw_array);
+static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
+ for (int32_t i = 0; i < count; ++i) {
+ *d = *s;
+ d++;
+ s++;
+ }
}
template<class T>
@@ -193,47 +204,49 @@
// Note for non-byte copies we can't rely on standard libc functions like memcpy(3) and memmove(3)
// in our implementation, because they may copy byte-by-byte.
- if (LIKELY(src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count)) {
- // Forward copy ok.
+ if (LIKELY(src != this)) {
+ // Memcpy ok for guaranteed non-overlapping distinct arrays.
Memcpy(dst_pos, src, src_pos, count);
} else {
- // Backward copy necessary.
+ // Handle copies within the same array using the appropriate direction copy.
void* dst_raw = GetRawData(sizeof(T), dst_pos);
const void* src_raw = src->GetRawData(sizeof(T), src_pos);
if (sizeof(T) == sizeof(uint8_t)) {
- // TUNING: use memmove here?
uint8_t* d = reinterpret_cast<uint8_t*>(dst_raw);
const uint8_t* s = reinterpret_cast<const uint8_t*>(src_raw);
- ArrayBackwardCopy<uint8_t>(d, s, count);
- } else if (sizeof(T) == sizeof(uint16_t)) {
- uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
- const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
- ArrayBackwardCopy<uint16_t>(d, s, count);
- } else if (sizeof(T) == sizeof(uint32_t)) {
- uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
- const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
- ArrayBackwardCopy<uint32_t>(d, s, count);
+ memmove(d, s, count);
} else {
- DCHECK_EQ(sizeof(T), sizeof(uint64_t));
- uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
- const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
- ArrayBackwardCopy<uint64_t>(d, s, count);
+ const bool copy_forward = (dst_pos < src_pos) || (dst_pos - src_pos >= count);
+ if (sizeof(T) == sizeof(uint16_t)) {
+ uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
+ const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint16_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint16_t>(d, s, count);
+ }
+ } else if (sizeof(T) == sizeof(uint32_t)) {
+ uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
+ const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint32_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint32_t>(d, s, count);
+ }
+ } else {
+ DCHECK_EQ(sizeof(T), sizeof(uint64_t));
+ uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
+ const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint64_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint64_t>(d, s, count);
+ }
+ }
}
}
}
-// Similar to memcpy except elements are of aligned appropriately for T, count is in T sized units
-// copies are guaranteed not to tear when T is less-than 64bit.
-template<typename T>
-static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
- for (int32_t i = 0; i < count; ++i) {
- *d = *s;
- d++;
- s++;
- }
-}
-
-
template<class T>
inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
int32_t count) {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 281d4ec..527b8a6 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -612,17 +612,17 @@
const JavaLangRefVisitor& ref_visitor) {
mirror::Class* klass = GetClass<kVerifyFlags>();
if (UNLIKELY(klass == Class::GetJavaLangClass())) {
- DCHECK_EQ(klass->GetClass(), Class::GetJavaLangClass());
+ DCHECK_EQ(klass->GetClass<kVerifyNone>(), Class::GetJavaLangClass());
AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
} else if (UNLIKELY(klass->IsArrayClass<kVerifyFlags>())) {
if (klass->IsObjectArrayClass<kVerifyNone>()) {
- AsObjectArray<mirror::Object>()->VisitReferences<kVisitClass>(visitor);
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
} else if (kVisitClass) {
visitor(this, ClassOffset(), false);
}
} else {
- VisitFieldsReferences<kVisitClass, false>(klass->GetReferenceInstanceOffsets(), visitor);
- if (UNLIKELY(klass->IsReferenceClass())) {
+ VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
+ if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
ref_visitor(klass, AsReference());
}
}
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 6667d51..a69bd05 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -141,7 +141,7 @@
// Widen it if necessary (and possible).
JValue wide_value;
if (!ConvertPrimitiveValue(NULL, false, field_type, Primitive::GetType(dst_descriptor),
- field_value, wide_value)) {
+ field_value, &wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
@@ -257,7 +257,7 @@
// Unbox the value, if necessary.
mirror::Object* boxed_value = soa.Decode<mirror::Object*>(javaValue);
JValue unboxed_value;
- if (!UnboxPrimitiveForField(boxed_value, field_type, unboxed_value, f)) {
+ if (!UnboxPrimitiveForField(boxed_value, field_type, f, &unboxed_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
@@ -282,7 +282,7 @@
// Widen the value if necessary (and possible).
JValue wide_value;
if (!ConvertPrimitiveValue(nullptr, false, Primitive::GetType(src_descriptor),
- field_type, new_value, wide_value)) {
+ field_type, new_value, &wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 1b9ebe4..a7ca0b8 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -44,23 +44,31 @@
* NULL on failure, e.g. if the threadId couldn't be found.
*/
static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
- // Suspend thread to build stack trace.
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
jobjectArray trace = nullptr;
- bool timed_out;
- Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
- if (thread != NULL) {
- {
- ScopedObjectAccess soa(env);
- jobject internal_trace = thread->CreateInternalStackTrace(soa);
- trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
- }
- // Restart suspended thread.
- thread_list->Resume(thread, false);
+ Thread* const self = Thread::Current();
+ if (static_cast<uint32_t>(thin_lock_id) == self->GetThreadId()) {
+ // No need to suspend ourself to build stacktrace.
+ ScopedObjectAccess soa(env);
+ jobject internal_trace = self->CreateInternalStackTrace(soa);
+ trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
} else {
- if (timed_out) {
- LOG(ERROR) << "Trying to get thread's stack by id failed as the thread failed to suspend "
- "within a generous timeout.";
+ // Suspend thread to build stack trace.
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ bool timed_out;
+ Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ if (thread != nullptr) {
+ {
+ ScopedObjectAccess soa(env);
+ jobject internal_trace = thread->CreateInternalStackTrace(soa);
+ trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
+ }
+ // Restart suspended thread.
+ thread_list->Resume(thread, false);
+ } else {
+ if (timed_out) {
+ LOG(ERROR) << "Trying to get thread's stack by id failed as the thread failed to suspend "
+ "within a generous timeout.";
+ }
}
}
return trace;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f567055..7f39e70 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -543,76 +543,58 @@
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type srcType, Primitive::Type dstType,
- const JValue& src, JValue& dst) {
- CHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
+ const JValue& src, JValue* dst) {
+ DCHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
+ if (LIKELY(srcType == dstType)) {
+ dst->SetJ(src.GetJ());
+ return true;
+ }
switch (dstType) {
- case Primitive::kPrimBoolean:
- if (srcType == Primitive::kPrimBoolean) {
- dst.SetZ(src.GetZ());
- return true;
- }
- break;
- case Primitive::kPrimChar:
- if (srcType == Primitive::kPrimChar) {
- dst.SetC(src.GetC());
- return true;
- }
- break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
case Primitive::kPrimByte:
- if (srcType == Primitive::kPrimByte) {
- dst.SetB(src.GetB());
- return true;
- }
+ // Only expect assignment with source and destination of identical type.
break;
case Primitive::kPrimShort:
- if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimShort) {
- dst.SetS(src.GetI());
+ if (srcType == Primitive::kPrimByte) {
+ dst->SetS(src.GetI());
return true;
}
break;
case Primitive::kPrimInt:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
- srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetI(src.GetI());
+ srcType == Primitive::kPrimShort) {
+ dst->SetI(src.GetI());
return true;
}
break;
case Primitive::kPrimLong:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetJ(src.GetI());
- return true;
- } else if (srcType == Primitive::kPrimLong) {
- dst.SetJ(src.GetJ());
+ dst->SetJ(src.GetI());
return true;
}
break;
case Primitive::kPrimFloat:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetF(src.GetI());
+ dst->SetF(src.GetI());
return true;
} else if (srcType == Primitive::kPrimLong) {
- dst.SetF(src.GetJ());
- return true;
- } else if (srcType == Primitive::kPrimFloat) {
- dst.SetF(src.GetF());
+ dst->SetF(src.GetJ());
return true;
}
break;
case Primitive::kPrimDouble:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetD(src.GetI());
+ dst->SetD(src.GetI());
return true;
} else if (srcType == Primitive::kPrimLong) {
- dst.SetD(src.GetJ());
+ dst->SetD(src.GetJ());
return true;
} else if (srcType == Primitive::kPrimFloat) {
- dst.SetD(src.GetF());
- return true;
- } else if (srcType == Primitive::kPrimDouble) {
- dst.SetJ(src.GetJ());
+ dst->SetD(src.GetF());
return true;
}
break;
@@ -642,7 +624,7 @@
return nullptr;
}
- jmethodID m = NULL;
+ jmethodID m = nullptr;
const char* shorty;
switch (src_class) {
case Primitive::kPrimBoolean:
@@ -698,29 +680,25 @@
return result.GetL();
}
-static std::string UnboxingFailureKind(mirror::ArtMethod* m, int index, mirror::ArtField* f)
+static std::string UnboxingFailureKind(mirror::ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (m != NULL && index != -1) {
- ++index; // Humans count from 1.
- return StringPrintf("method %s argument %d", PrettyMethod(m, false).c_str(), index);
- }
- if (f != NULL) {
+ if (f != nullptr) {
return "field " + PrettyField(f, false);
}
return "result";
}
static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, int index, mirror::ArtField* f)
+ mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- bool unbox_for_result = (f == NULL) && (index == -1);
+ bool unbox_for_result = (f == nullptr);
if (!dst_class->IsPrimitive()) {
- if (UNLIKELY(o != NULL && !o->InstanceOf(dst_class))) {
+ if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
if (!unbox_for_result) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got %s",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyTypeOf(o).c_str()).c_str());
} else {
@@ -731,20 +709,20 @@
}
return false;
}
- unboxed_value.SetL(o);
+ unboxed_value->SetL(o);
return true;
}
if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("Can't unbox %s to void",
- UnboxingFailureKind(m, index, f).c_str()).c_str());
+ UnboxingFailureKind(f).c_str()).c_str());
return false;
}
- if (UNLIKELY(o == NULL)) {
+ if (UNLIKELY(o == nullptr)) {
if (!unbox_for_result) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got null",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
} else {
ThrowNullPointerException(throw_location,
@@ -756,7 +734,7 @@
JValue boxed_value;
const StringPiece src_descriptor(ClassHelper(o->GetClass()).GetDescriptor());
- mirror::Class* src_class = NULL;
+ mirror::Class* src_class = nullptr;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::ArtField* primitive_field = o->GetClass()->GetIFields()->Get(0);
if (src_descriptor == "Ljava/lang/Boolean;") {
@@ -786,7 +764,7 @@
} else {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got %s",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyDescriptor(src_descriptor.data()).c_str()).c_str());
return false;
@@ -797,21 +775,15 @@
boxed_value, unboxed_value);
}
-bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, size_t index) {
- CHECK(m != NULL);
- return UnboxPrimitive(NULL, o, dst_class, unboxed_value, m, index, NULL);
-}
-
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtField* f) {
- CHECK(f != NULL);
- return UnboxPrimitive(NULL, o, dst_class, unboxed_value, NULL, -1, f);
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value) {
+ DCHECK(f != nullptr);
+ return UnboxPrimitive(nullptr, o, dst_class, f, unboxed_value);
}
bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value) {
- return UnboxPrimitive(&throw_location, o, dst_class, unboxed_value, NULL, -1, NULL);
+ mirror::Class* dst_class, JValue* unboxed_value) {
+ return UnboxPrimitive(&throw_location, o, dst_class, nullptr, unboxed_value);
}
} // namespace art
diff --git a/runtime/reflection.h b/runtime/reflection.h
index d2f9f25..325998f 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -36,19 +36,16 @@
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, size_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtField* f)
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value)
+ mirror::Class* dst_class, JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
- const JValue& src, JValue& dst)
+ const JValue& src, JValue* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args)
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index 90d8634..f6de0e7 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -16,3 +16,4 @@
longModTest passes
testIfCcz passes
ManyFloatArgs passes
+atomicLong passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 96c71cf..2745c27 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -15,6 +15,7 @@
*/
import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
/**
* Test for Jit regressions.
@@ -47,6 +48,18 @@
ZeroTests.longModTest();
MirOpSelectTests.testIfCcz();
ManyFloatArgs();
+ atomicLong();
+ }
+
+ public static void atomicLong() {
+ AtomicLong atomicLong = new AtomicLong();
+ atomicLong.addAndGet(3);
+ atomicLong.addAndGet(2);
+ atomicLong.addAndGet(1);
+ long result = atomicLong.get();
+ System.out.println(result == 6L ? "atomicLong passes" :
+ ("atomicLong failes: returns " + result + ", expected 6")
+ );
}
public static void returnConstantTest() {