Continuing register cleanup
Ready for review.
Continue the process of using RegStorage rather than
ints to hold register value in the top layers of codegen.
Given the huge number of changes in this CL, I've attempted
to minimize the number of actual logic changes. With this
CL, the use of ints for registers has largely been eliminated
except in the lowest utility levels. "Wide" utility routines
have been updated to take a single RegStorage rather than
a pair of ints representing low and high registers.
Upcoming CLs will be smaller and more targeted. My expectations:
o Allocate float double registers as a single double rather than
a pair of float single registers.
o Refactor to push code which assumes long and double Dalvik
values are held in a pair of register to the target dependent
layer.
o Clean-up of the xxx_mir.h files to reduce the amount of #defines
for registers. May also do a register renumbering to bring all
of our targets' register naming more consistent. Possibly
introduce a target-independent float/non-float test at the
RegStorage level.
Change-Id: I646de7392bdec94595dd2c6f76e0f1c4331096ff
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3235977..2afa5ca 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -54,12 +54,15 @@
return branch;
}
-LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) {
- LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
+LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) {
+ LIR* tgt;
LIR* branch;
if (c_code == kCondAl) {
+ tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal,
+ imm_val);
branch = OpUnconditionalBranch(tgt);
} else {
+ tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val);
branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
}
// Remember branch target - will process later
@@ -69,7 +72,7 @@
/* Perform null-check on a register. */
-LIR* Mir2Lir::GenNullCheck(int m_reg, int opt_flags) {
+LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
if (Runtime::Current()->ExplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
@@ -94,14 +97,15 @@
}
}
-void Mir2Lir::ForceImplicitNullCheck(int reg, int opt_flags) {
+void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
if (!Runtime::Current()->ExplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
// Force an implicit null check by performing a memory operation (load) from the given
// register with offset 0. This will cause a signal if the register contains 0 (null).
- int tmp = AllocTemp();
+ RegStorage tmp = AllocTemp();
+ // TODO: for Mips, would be best to use rZERO as the bogus register target.
LIR* load = LoadWordDisp(reg, 0, tmp);
FreeTemp(tmp);
MarkSafepointPC(load);
@@ -109,9 +113,10 @@
}
/* Perform check on two registers */
-LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
+LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2,
ThrowKind kind) {
- LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
+ LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(),
+ reg2.GetReg());
LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
// Remember branch target - will process later
throw_launchpads_.Insert(tgt);
@@ -162,12 +167,12 @@
if ((rl_temp.location == kLocDalvikFrame) &&
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
// OK - convert this to a compare immediate and branch
- OpCmpImmBranch(cond, rl_src1.reg.GetReg(), mir_graph_->ConstantValue(rl_src2), taken);
+ OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
return;
}
}
rl_src2 = LoadValue(rl_src2, kCoreReg);
- OpCmpBranch(cond, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), taken);
+ OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
}
void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
@@ -197,17 +202,17 @@
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- OpCmpImmBranch(cond, rl_src.reg.GetReg(), 0, taken);
+ OpCmpImmBranch(cond, rl_src.reg, 0, taken);
}
void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_src.reg);
} else {
- LoadValueDirect(rl_src, rl_result.reg.GetReg());
+ LoadValueDirect(rl_src, rl_result.reg.GetLow());
}
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31);
StoreValueWide(rl_dest, rl_result);
}
@@ -229,7 +234,7 @@
default:
LOG(ERROR) << "Bad int conversion type";
}
- OpRegReg(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegReg(op, rl_result.reg, rl_src.reg);
StoreValue(rl_dest, rl_result);
}
@@ -323,8 +328,7 @@
for (int i = 0; i < elems; i++) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.reg.GetReg(), kWord);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
}
}
/*
@@ -332,10 +336,10 @@
* this is an uncommon operation and isn't especially performance
* critical.
*/
- int r_src = AllocTemp();
- int r_dst = AllocTemp();
- int r_idx = AllocTemp();
- int r_val = INVALID_REG;
+ RegStorage r_src = AllocTemp();
+ RegStorage r_dst = AllocTemp();
+ RegStorage r_idx = AllocTemp();
+ RegStorage r_val;
switch (cu_->instruction_set) {
case kThumb2:
r_val = TargetReg(kLr);
@@ -374,11 +378,11 @@
for (int i = 0; i < elems; i++) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
StoreBaseDisp(TargetReg(kRet0),
- mirror::Array::DataOffset(component_size).Int32Value() +
- i * 4, rl_arg.reg.GetReg(), kWord);
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4,
+ rl_arg.reg, kWord);
// If the LoadValue caused a temp to be allocated, free it
- if (IsTemp(rl_arg.reg.GetReg())) {
- FreeTemp(rl_arg.reg.GetReg());
+ if (IsTemp(rl_arg.reg)) {
+ FreeTemp(rl_arg.reg);
}
}
}
@@ -392,17 +396,17 @@
//
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
- StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont,
- int storage_index, int r_base) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), storage_index_(storage_index),
- r_base_(r_base) {
+ StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
+ RegStorage r_base) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit),
+ storage_index_(storage_index), r_base_(r_base) {
}
void Compile() {
LIR* unresolved_target = GenerateTargetLabel();
uninit_->target = unresolved_target;
m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage),
- storage_index_, true);
+ storage_index_, true);
// Copy helper's result into r_base, a no-op on all but MIPS.
m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0));
@@ -412,7 +416,7 @@
private:
LIR* const uninit_;
const int storage_index_;
- const int r_base_;
+ const RegStorage r_base_;
};
void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
@@ -421,15 +425,14 @@
cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
if (field_info.FastPut() && !SLOW_FIELD_PATH) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
- int r_base;
+ RegStorage r_base;
if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
- if (IsTemp(rl_method.reg.GetReg())) {
- FreeTemp(rl_method.reg.GetReg());
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ if (IsTemp(rl_method.reg)) {
+ FreeTemp(rl_method.reg);
}
} else {
// Medium path, static storage base in a different class which requires checks that the other
@@ -439,14 +442,12 @@
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
- int r_method = TargetReg(kArg1);
+ RegStorage r_method = TargetReg(kArg1);
LockTemp(r_method);
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadWordDisp(r_method,
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- r_base);
+ LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
@@ -457,15 +458,14 @@
// The slow path is invoked if the r_base is NULL or the class pointed
// to by it is not initialized.
LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
- int r_tmp = TargetReg(kArg2);
+ RegStorage r_tmp = TargetReg(kArg2);
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
mirror::Class::kStatusInitialized, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
- AddSlowPath(new (arena_) StaticFieldSlowPath(this,
- unresolved_branch, uninit_branch, cont,
+ AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
@@ -483,17 +483,16 @@
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg(),
- rl_src.reg.GetHighReg());
+ StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
} else {
- StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg());
+ StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
}
if (field_info.IsVolatile()) {
// A load might follow the volatile store so insert a StoreLoad barrier.
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.reg.GetReg(), r_base);
+ MarkGCCard(rl_src.reg, r_base);
}
FreeTemp(r_base);
} else {
@@ -512,13 +511,12 @@
cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
if (field_info.FastGet() && !SLOW_FIELD_PATH) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
- int r_base;
+ RegStorage r_base;
if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
@@ -526,14 +524,12 @@
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
- int r_method = TargetReg(kArg1);
+ RegStorage r_method = TargetReg(kArg1);
LockTemp(r_method);
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadWordDisp(r_method,
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- r_base);
+ LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
@@ -544,15 +540,14 @@
// The slow path is invoked if the r_base is NULL or the class pointed
// to by it is not initialized.
LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
- int r_tmp = TargetReg(kArg2);
+ RegStorage r_tmp = TargetReg(kArg2);
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
mirror::Class::kStatusInitialized, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
- AddSlowPath(new (arena_) StaticFieldSlowPath(this,
- unresolved_branch, uninit_branch, cont,
+ AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
@@ -563,10 +558,9 @@
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
} else {
- LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg());
+ LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
}
FreeTemp(r_base);
@@ -619,7 +613,7 @@
LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
current_dalvik_offset_ = lab->operands[1];
AppendLIR(lab);
- int r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(helper_offset);
CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
OpUnconditionalBranch(resume_lab);
}
@@ -644,9 +638,10 @@
case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
// v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
if (target_x86) {
- OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v1),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v1);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v1));
}
// Make sure the following LoadConstant doesn't mess with kArg1.
LockTemp(TargetReg(kArg1));
@@ -655,33 +650,36 @@
break;
case kThrowArrayBounds:
// Move v1 (array index) to kArg0 and v2 (array length) to kArg1
- if (v2 != TargetReg(kArg0)) {
- OpRegCopy(TargetReg(kArg0), v1);
+ if (v2 != TargetReg(kArg0).GetReg()) {
+ OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
if (target_x86) {
// x86 leaves the array pointer in v2, so load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v2);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
}
} else {
- if (v1 == TargetReg(kArg1)) {
+ if (v1 == TargetReg(kArg1).GetReg()) {
// Swap v1 and v2, using kArg2 as a temp
- OpRegCopy(TargetReg(kArg2), v1);
+ OpRegCopy(TargetReg(kArg2), RegStorage::Solo32(v1));
if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v2);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
} else {
if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
+ mirror::Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(TargetReg(kArg1), v2);
+ OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
}
- OpRegCopy(TargetReg(kArg0), v1);
+ OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
}
}
func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
@@ -690,7 +688,7 @@
func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero);
break;
case kThrowNoSuchMethod:
- OpRegCopy(TargetReg(kArg0), v1);
+ OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
func_offset =
QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod);
break;
@@ -698,7 +696,7 @@
LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
}
ClobberCallerSave();
- int r_tgt = CallHelperSetup(func_offset);
+ RegStorage r_tgt = CallHelperSetup(func_offset);
CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */);
}
}
@@ -715,13 +713,13 @@
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg, opt_flags);
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
- LoadBaseDispWide(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ // FIXME? duplicate null check?
+ GenNullCheck(rl_obj.reg, opt_flags);
+ LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
+ rl_obj.s_reg_low);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// Without context sensitive analysis, we must issue the most conservative barriers.
@@ -730,11 +728,10 @@
GenMemBarrier(kLoadStore);
}
} else {
- int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
+ RegStorage reg_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
rl_result = EvalLoc(rl_dest, reg_class, true);
- LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
- INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
if (field_info.IsVolatile()) {
// Without context sensitive analysis, we must issue the most conservative barriers.
// In this case, either a load or store may follow so we issue both barriers.
@@ -746,9 +743,9 @@
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
- LoadBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low);
+ GenNullCheck(rl_obj.reg, opt_flags);
+ LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord,
+ rl_obj.s_reg_low);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// Without context sensitive analysis, we must issue the most conservative barriers.
@@ -784,16 +781,15 @@
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
- int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
- reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
+ GenNullCheck(rl_obj.reg, opt_flags);
+ RegStorage reg_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
if (field_info.IsVolatile()) {
// There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// A load might follow the volatile store so insert a StoreLoad barrier.
@@ -802,20 +798,19 @@
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
- GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg, opt_flags);
if (field_info.IsVolatile()) {
// There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_src.reg.GetReg(), kWord);
+ StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// A load might follow the volatile store so insert a StoreLoad barrier.
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.reg.GetReg(), rl_obj.reg.GetReg());
+ MarkGCCard(rl_src.reg, rl_obj.reg);
}
}
} else {
@@ -842,7 +837,7 @@
void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
- int res_reg = AllocTemp();
+ RegStorage res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
*cu_->dex_file,
@@ -850,23 +845,23 @@
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
- type_idx, rl_method.reg.GetReg(), true);
+ type_idx, rl_method.reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
} else {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadWordDisp(rl_method.reg.GetReg(), dex_cache_offset, res_reg);
+ LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
- LoadWordDisp(res_reg, offset_of_type, rl_result.reg.GetReg());
+ LoadWordDisp(res_reg, offset_of_type, rl_result.reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
FlushAllRegs();
- LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg.GetReg(), 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
// Object to generate the slow path for class resolution.
@@ -882,8 +877,8 @@
GenerateTargetLabel();
m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_,
- rl_method_.reg.GetReg(), true);
- m2l_->OpRegCopy(rl_result_.reg.GetReg(), m2l_->TargetReg(kRet0));
+ rl_method_.reg, true);
+ m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0));
m2l_->OpUnconditionalBranch(cont_);
}
@@ -895,8 +890,7 @@
};
// Add to list for future.
- AddSlowPath(new (arena_) SlowPath(this, branch, cont,
- type_idx, rl_method, rl_result));
+ AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result));
StoreValue(rl_dest, rl_result);
} else {
@@ -918,11 +912,11 @@
// If the Method* is already in a register, we can save a copy.
RegLocation rl_method = mir_graph_->GetMethodLoc();
- int r_method;
+ RegStorage r_method;
if (rl_method.location == kLocPhysReg) {
// A temp would conflict with register use below.
- DCHECK(!IsTemp(rl_method.reg.GetReg()));
- r_method = rl_method.reg.GetReg();
+ DCHECK(!IsTemp(rl_method.reg));
+ r_method = rl_method.reg;
} else {
r_method = TargetReg(kArg2);
LoadCurrMethodDirect(r_method);
@@ -943,14 +937,14 @@
// Object to generate the slow path for string resolution.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, int r_method) :
+ SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) :
LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) {
}
void Compile() {
GenerateTargetLabel();
- int r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
+ RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq
LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt);
@@ -961,7 +955,7 @@
}
private:
- int r_method_;
+ RegStorage r_method_;
};
// Add to list for future.
@@ -970,8 +964,8 @@
DCHECK_EQ(cu_->instruction_set, kX86);
LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
LoadConstant(TargetReg(kArg1), string_idx);
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), r_method,
- TargetReg(kArg1), true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), r_method, TargetReg(kArg1),
+ true);
LIR* target = NewLIR0(kPseudoTargetLabel);
branch->target = target;
}
@@ -979,11 +973,10 @@
StoreValue(rl_dest, GetReturn(false));
} else {
RegLocation rl_method = LoadCurrMethod();
- int res_reg = AllocTemp();
+ RegStorage res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_method.reg.GetReg(),
- mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
- LoadWordDisp(res_reg, offset_of_string, rl_result.reg.GetReg());
+ LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ LoadWordDisp(res_reg, offset_of_string, rl_result.reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -1056,25 +1049,24 @@
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.reg.GetReg();
- if (result_reg == object.reg.GetReg()) {
+ RegStorage result_reg = rl_result.reg;
+ if (result_reg == object.reg) {
result_reg = AllocTypedTemp(false, kCoreReg);
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
- int check_class = AllocTypedTemp(false, kCoreReg);
- int object_class = AllocTypedTemp(false, kCoreReg);
+ RegStorage check_class = AllocTypedTemp(false, kCoreReg);
+ RegStorage object_class = AllocTypedTemp(false, kCoreReg);
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class);
- LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
+ LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
- LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
@@ -1098,7 +1090,7 @@
FreeTemp(object_class);
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.reg.GetReg(), result_reg);
+ OpRegCopy(rl_result.reg, result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1116,7 +1108,7 @@
// May generate a call - use explicit registers
LockCallTemps();
LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
- int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
@@ -1126,13 +1118,13 @@
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
@@ -1154,7 +1146,7 @@
RegLocation rl_result = GetReturn(false);
if (cu_->instruction_set == kMips) {
// On MIPS rArg0 != rl_result, place false in result if branch is taken.
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.reg, 0);
}
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1168,16 +1160,16 @@
if (cu_->instruction_set == kThumb2) {
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
OpIT(kCondEq, "E"); // if-convert the test
- LoadConstant(rl_result.reg.GetReg(), 1); // .eq case - load true
- LoadConstant(rl_result.reg.GetReg(), 0); // .ne case - load false
+ LoadConstant(rl_result.reg, 1); // .eq case - load true
+ LoadConstant(rl_result.reg, 0); // .ne case - load false
} else {
- LoadConstant(rl_result.reg.GetReg(), 0); // ne case - load false
+ LoadConstant(rl_result.reg, 0); // ne case - load false
branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
- LoadConstant(rl_result.reg.GetReg(), 1); // eq case - load true
+ LoadConstant(rl_result.reg, 1); // eq case - load true
}
} else {
if (cu_->instruction_set == kThumb2) {
- int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
if (!type_known_abstract) {
/* Uses conditional nullification */
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
@@ -1190,10 +1182,10 @@
} else {
if (!type_known_abstract) {
/* Uses branchovers */
- LoadConstant(rl_result.reg.GetReg(), 1); // assume true
+ LoadConstant(rl_result.reg, 1); // assume true
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
- int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
FreeTemp(r_tgt);
@@ -1249,7 +1241,7 @@
// May generate a call - use explicit registers
LockCallTemps();
LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
- int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
@@ -1258,12 +1250,12 @@
type_idx, TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
- LoadWordDisp(TargetReg(kArg1),
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
@@ -1277,7 +1269,7 @@
class SlowPath : public LIRSlowPath {
public:
SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
- const int class_reg) :
+ const RegStorage class_reg) :
LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
class_reg_(class_reg) {
}
@@ -1294,11 +1286,10 @@
}
public:
const int type_idx_;
- const int class_reg_;
+ const RegStorage class_reg_;
};
- AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont,
- type_idx, class_reg));
+ AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg));
}
}
// At this point, class_reg (kArg2) has class
@@ -1343,8 +1334,7 @@
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
- TargetReg(kArg1));
+ LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
@@ -1376,16 +1366,15 @@
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// The longs may overlap - use intermediate temp if so
- if ((rl_result.reg.GetReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg())) {
- int t_reg = AllocTemp();
- OpRegRegReg(first_op, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- OpRegCopy(rl_result.reg.GetReg(), t_reg);
+ if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) {
+ RegStorage t_reg = AllocTemp();
+ OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
+ OpRegCopy(rl_result.reg.GetLow(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(first_op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(),
- rl_src2.reg.GetHighReg());
+ OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
}
/*
* NOTE: If rl_dest refers to a frame variable in a large frame, the
@@ -1508,22 +1497,21 @@
if (unary) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegReg(op, rl_result.reg, rl_src1.reg);
} else {
if (shift_op) {
- int t_reg = INVALID_REG;
rl_src2 = LoadValue(rl_src2, kCoreReg);
- t_reg = AllocTemp();
- OpRegRegImm(kOpAnd, t_reg, rl_src2.reg.GetReg(), 31);
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), t_reg);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
FreeTemp(t_reg);
} else {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
}
}
StoreValue(rl_dest, rl_result);
@@ -1533,9 +1521,9 @@
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
done = true;
} else if (cu_->instruction_set == kThumb2) {
if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
@@ -1544,9 +1532,9 @@
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
done = true;
}
}
@@ -1556,7 +1544,7 @@
ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
FlushAllRegs(); /* Send everything to home location */
LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
- int r_tgt = CallHelperSetup(func_offset);
+ RegStorage r_tgt = CallHelperSetup(func_offset);
LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
if (check_zero) {
GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
@@ -1603,32 +1591,32 @@
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- int t_reg = AllocTemp();
+ RegStorage t_reg = AllocTemp();
if (lit == 2) {
// Division by 2 is by far the most common division by constant.
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
+ OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
} else {
- OpRegRegImm(kOpAsr, t_reg, rl_src.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
+ OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
}
} else {
- int t_reg1 = AllocTemp();
- int t_reg2 = AllocTemp();
+ RegStorage t_reg1 = AllocTemp();
+ RegStorage t_reg2 = AllocTemp();
if (lit == 2) {
- OpRegRegImm(kOpLsr, t_reg1, rl_src.reg.GetReg(), 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
+ OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k);
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
} else {
- OpRegRegImm(kOpAsr, t_reg1, rl_src.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31);
OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
}
}
StoreValue(rl_dest, rl_result);
@@ -1658,7 +1646,7 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (power_of_two) {
// Shift.
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), LowestSetBit(lit));
+ OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit));
} else if (pop_count_le2) {
// Shift and add and shift.
int first_bit = LowestSetBit(lit);
@@ -1668,9 +1656,9 @@
// Reverse subtract: (src << (shift + 1)) - src.
DCHECK(power_of_two_minus_one);
// TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
- int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), LowestSetBit(lit + 1));
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetReg());
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1));
+ OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1689,10 +1677,10 @@
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set == kThumb2) {
- OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
+ OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit);
} else {
- OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegImm(kOpAdd, rl_result.reg.GetReg(), lit);
+ OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+ OpRegImm(kOpAdd, rl_result.reg, lit);
}
StoreValue(rl_dest, rl_result);
return;
@@ -1767,7 +1755,7 @@
case Instruction::REM_INT_LIT8:
case Instruction::REM_INT_LIT16: {
if (lit == 0) {
- GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
+ GenImmedCheck(kCondAl, RegStorage::InvalidReg(), 0, kThrowDivZero);
return;
}
if ((opcode == Instruction::DIV_INT) ||
@@ -1785,7 +1773,7 @@
bool done = false;
if (cu_->instruction_set == kMips) {
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
done = true;
} else if (cu_->instruction_set == kX86) {
rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
@@ -1795,7 +1783,7 @@
// Use ARM SDIV instruction for division. For remainder we also need to
// calculate using a MUL and subtract.
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
done = true;
}
}
@@ -1821,9 +1809,9 @@
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Avoid shifts by literal 0 - no support in Thumb. Change to copy.
if (shift_op && (lit == 0)) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg, rl_src.reg);
} else {
- OpRegRegImm(op, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
+ OpRegRegImm(op, rl_result.reg, rl_src.reg, lit);
}
StoreValue(rl_dest, rl_result);
}
@@ -1836,22 +1824,22 @@
bool call_out = false;
bool check_zero = false;
ThreadOffset func_offset(-1);
- int ret_reg = TargetReg(kRet0);
+ int ret_reg = TargetReg(kRet0).GetReg();
switch (opcode) {
case Instruction::NOT_LONG:
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Check for destructive overlap
- if (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg()) {
- int t_reg = AllocTemp();
- OpRegCopy(t_reg, rl_src2.reg.GetHighReg());
- OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), t_reg);
+ if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
+ RegStorage t_reg = AllocTemp();
+ OpRegCopy(t_reg, rl_src2.reg.GetHigh());
+ OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
}
StoreValueWide(rl_dest, rl_result);
return;
@@ -1880,7 +1868,7 @@
return;
} else {
call_out = true;
- ret_reg = TargetReg(kRet0);
+ ret_reg = TargetReg(kRet0).GetReg();
func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
}
break;
@@ -1888,7 +1876,7 @@
case Instruction::DIV_LONG_2ADDR:
call_out = true;
check_zero = true;
- ret_reg = TargetReg(kRet0);
+ ret_reg = TargetReg(kRet0).GetReg();
func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
break;
case Instruction::REM_LONG:
@@ -1897,7 +1885,7 @@
check_zero = true;
func_offset = QUICK_ENTRYPOINT_OFFSET(pLmod);
/* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
- ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
+ ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg();
break;
case Instruction::AND_LONG_2ADDR:
case Instruction::AND_LONG:
@@ -1937,17 +1925,19 @@
} else {
FlushAllRegs(); /* Send everything to home location */
if (check_zero) {
- LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
- int r_tgt = CallHelperSetup(func_offset);
- GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
- LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
+ RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
+ RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
+ RegStorage r_tgt = CallHelperSetup(func_offset);
+ GenDivZeroCheck(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)));
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
// NOTE: callout here is not a safepoint
CallHelper(r_tgt, func_offset, false /* not safepoint */);
} else {
CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
}
// Adjust return regs in to handle case of rem returning kArg2/kArg3
- if (ret_reg == TargetReg(kRet0))
+ if (ret_reg == TargetReg(kRet0).GetReg())
rl_result = GetReturnWide(false);
else
rl_result = GetReturnWideAlt();
@@ -2040,7 +2030,7 @@
/* Generic code for generating a wide constant into a VR. */
void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), value);
+ LoadConstantWide(rl_result.reg, value);
StoreValueWide(rl_dest, rl_result);
}