summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/compiler_enums.h18
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc4
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc2
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc4
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc4
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc14
-rw-r--r--compiler/dex/quick/x86/call_x86.cc35
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h6
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc31
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc42
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc351
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc14
-rw-r--r--compiler/dex/quick/x86/x86_lir.h22
-rw-r--r--compiler/dex/reg_storage.h3
14 files changed, 270 insertions, 280 deletions
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 1297ba9c7f..5d877fdf80 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -38,6 +38,7 @@ enum BitsUsed {
kSize512Bits,
kSize1024Bits,
};
+std::ostream& operator<<(std::ostream& os, const BitsUsed& rhs);
enum SpecialTargetRegister {
kSelf, // Thread pointer.
@@ -76,6 +77,7 @@ enum SpecialTargetRegister {
kHiddenFpArg,
kCount
};
+std::ostream& operator<<(std::ostream& os, const SpecialTargetRegister& code);
enum RegLocationType {
kLocDalvikFrame = 0, // Normal Dalvik register
@@ -344,6 +346,7 @@ enum AssemblerStatus {
kSuccess,
kRetryAll,
};
+std::ostream& operator<<(std::ostream& os, const AssemblerStatus& rhs);
enum OpSize {
kWord, // Natural word size of target (32/64).
@@ -357,7 +360,6 @@ enum OpSize {
kUnsignedByte,
kSignedByte,
};
-
std::ostream& operator<<(std::ostream& os, const OpSize& kind);
enum OpKind {
@@ -399,6 +401,7 @@ enum OpKind {
kOpBx,
kOpInvalid,
};
+std::ostream& operator<<(std::ostream& os, const OpKind& rhs);
enum MoveType {
kMov8GP, // Move 8-bit general purpose register.
@@ -415,8 +418,7 @@ enum MoveType {
kMovLo128FP, // Move low 64-bits of 128-bit FP register.
kMovHi128FP, // Move high 64-bits of 128-bit FP register.
};
-
-std::ostream& operator<<(std::ostream& os, const OpKind& kind);
+std::ostream& operator<<(std::ostream& os, const MoveType& kind);
enum ConditionCode {
kCondEq, // equal
@@ -438,7 +440,6 @@ enum ConditionCode {
kCondAl, // always
kCondNv, // never
};
-
std::ostream& operator<<(std::ostream& os, const ConditionCode& kind);
// Target specific condition encodings
@@ -460,7 +461,6 @@ enum ArmConditionCode {
kArmCondAl = 0xe, // 1110
kArmCondNv = 0xf, // 1111
};
-
std::ostream& operator<<(std::ostream& os, const ArmConditionCode& kind);
enum X86ConditionCode {
@@ -508,7 +508,6 @@ enum X86ConditionCode {
kX86CondNle = 0xf, // not-less-than
kX86CondG = kX86CondNle, // greater
};
-
std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
enum DividePattern {
@@ -517,7 +516,6 @@ enum DividePattern {
Divide5,
Divide7,
};
-
std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
/**
@@ -543,7 +541,6 @@ enum MemBarrierKind {
kAnyAny,
kNTStoreStore,
};
-
std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
enum OpFeatureFlags {
@@ -600,6 +597,7 @@ enum OpFeatureFlags {
kDefHi,
kDefLo
};
+std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& rhs);
enum SelectInstructionKind {
kSelectNone,
@@ -607,7 +605,6 @@ enum SelectInstructionKind {
kSelectMove,
kSelectGoto
};
-
std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind);
// LIR fixup kinds for Arm
@@ -629,14 +626,12 @@ enum FixupKind {
kFixupMovImmHST, // kThumb2MovImm16HST.
kFixupAlign4, // Align to 4-byte boundary.
};
-
std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
enum VolatileKind {
kNotVolatile, // Load/Store is not volatile
kVolatile // Load/Store is volatile
};
-
std::ostream& operator<<(std::ostream& os, const VolatileKind& kind);
enum WideKind {
@@ -644,7 +639,6 @@ enum WideKind {
kWide, // Wide view
kRef // Ref width
};
-
std::ostream& operator<<(std::ostream& os, const WideKind& kind);
} // namespace art
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index a1a5ad1d1f..0d5aa90f35 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -959,7 +959,7 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rARM_SP);
+ DCHECK_EQ(r_base, rs_rARM_SP);
AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
@@ -1088,7 +1088,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
// TODO: In future, may need to differentiate Dalvik & spill accesses
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rARM_SP);
+ DCHECK_EQ(r_base, rs_rARM_SP);
AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index fc72e02c55..e57f99ce9b 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -1663,7 +1663,7 @@ static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32
void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
int frame_size) {
- DCHECK(base == rs_sp);
+ DCHECK_EQ(base, rs_sp);
// Restore saves and drop stack frame.
// 2 versions:
//
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index fcd69ec4a3..78a6df8a1c 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -1266,7 +1266,7 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_sp);
+ DCHECK_EQ(r_base, rs_sp);
AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
@@ -1357,7 +1357,7 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
// TODO: In future, may need to differentiate Dalvik & spill accesses.
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_sp);
+ DCHECK_EQ(r_base, rs_sp);
AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 044972cc5f..a7dc84f6aa 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -544,7 +544,7 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rMIPS_SP);
+ DCHECK_EQ(r_base, rs_rMIPS_SP);
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, pair /* is64bit */);
if (pair) {
@@ -646,7 +646,7 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rMIPS_SP);
+ DCHECK_EQ(r_base, rs_rMIPS_SP);
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, pair /* is64bit */);
if (pair) {
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index ef55054d6d..3933b21a26 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -677,7 +677,7 @@ size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int
++size; // modrm
}
if (!modrm_is_reg_reg) {
- if (has_sib || LowRegisterBits(raw_base) == rs_rX86_SP.GetRegNum()
+ if (has_sib || (LowRegisterBits(raw_base) == rs_rX86_SP_32.GetRegNum())
|| (cu_->target64 && entry->skeleton.prefix1 == THREAD_PREFIX)) {
// SP requires a SIB byte.
// GS access also needs a SIB byte for absolute adressing in 64-bit mode.
@@ -1010,9 +1010,9 @@ void X86Mir2Lir::EmitDisp(uint8_t base, int32_t disp) {
void X86Mir2Lir::EmitModrmThread(uint8_t reg_or_opcode) {
if (cu_->target64) {
// Absolute adressing for GS access.
- uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP.GetRegNum();
+ uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
- uint8_t sib = (0/*TIMES_1*/ << 6) | (rs_rX86_SP.GetRegNum() << 3) | rs_rBP.GetRegNum();
+ uint8_t sib = (0/*TIMES_1*/ << 6) | (rs_rX86_SP_32.GetRegNum() << 3) | rs_rBP.GetRegNum();
code_buffer_.push_back(sib);
} else {
uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rBP.GetRegNum();
@@ -1025,9 +1025,9 @@ void X86Mir2Lir::EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp
DCHECK_LT(base, 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
code_buffer_.push_back(modrm);
- if (base == rs_rX86_SP.GetRegNum()) {
+ if (base == rs_rX86_SP_32.GetRegNum()) {
// Special SIB for SP base
- code_buffer_.push_back(0 << 6 | rs_rX86_SP.GetRegNum() << 3 | rs_rX86_SP.GetRegNum());
+ code_buffer_.push_back(0 << 6 | rs_rX86_SP_32.GetRegNum() << 3 | rs_rX86_SP_32.GetRegNum());
}
EmitDisp(base, disp);
}
@@ -1036,7 +1036,7 @@ void X86Mir2Lir::EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t i
int scale, int32_t disp) {
DCHECK_LT(RegStorage::RegNum(reg_or_opcode), 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | RegStorage::RegNum(reg_or_opcode) << 3 |
- rs_rX86_SP.GetRegNum();
+ rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
DCHECK_LT(scale, 4);
DCHECK_LT(RegStorage::RegNum(index), 8);
@@ -1584,7 +1584,7 @@ void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
uint8_t low_reg = LowRegisterBits(raw_reg);
- uint8_t modrm = (2 << 6) | (low_reg << 3) | rs_rX86_SP.GetRegNum();
+ uint8_t modrm = (2 << 6) | (low_reg << 3) | rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
DCHECK_LT(scale, 4);
uint8_t low_base_or_table = LowRegisterBits(raw_base_or_table);
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 497ef94c27..61dcc28afc 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -164,16 +164,20 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
* expanding the frame or flushing. This leaves the utility
* code with no spare temps.
*/
- LockTemp(rs_rX86_ARG0);
- LockTemp(rs_rX86_ARG1);
- LockTemp(rs_rX86_ARG2);
+ const RegStorage arg0 = TargetReg32(kArg0);
+ const RegStorage arg1 = TargetReg32(kArg1);
+ const RegStorage arg2 = TargetReg32(kArg2);
+ LockTemp(arg0);
+ LockTemp(arg1);
+ LockTemp(arg2);
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
+ const InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
// If we doing an implicit stack overflow check, perform the load immediately
// before the stack pointer is decremented and anything is saved.
@@ -182,12 +186,12 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// Implicit stack overflow check.
// test eax,[esp + -overflow]
int overflow = GetStackOverflowReservedBytes(isa);
- NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow);
+ NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rSP.GetReg(), -overflow);
MarkPossibleStackOverflowException();
}
/* Build frame, return address already on stack */
- stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ -
+ stack_decrement_ = OpRegImm(kOpSub, rs_rSP, frame_size_ -
GetInstructionSetPointerSize(cu_->instruction_set));
NewLIR0(kPseudoMethodEntry);
@@ -204,7 +208,8 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
+ const RegStorage local_rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ m2l_->OpRegImm(kOpAdd, local_rs_rSP, sp_displace_);
m2l_->ClobberCallerSave();
// Assumes codegen and target are in thumb2 mode.
m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
@@ -225,9 +230,9 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// may have moved us outside of the reserved area at the end of the stack.
// cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
if (cu_->target64) {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
+ OpRegThreadMem(kOpCmp, rs_rX86_SP_64, Thread::StackEndOffset<8>());
} else {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
+ OpRegThreadMem(kOpCmp, rs_rX86_SP_32, Thread::StackEndOffset<4>());
}
LIR* branch = OpCondBranch(kCondUlt, nullptr);
AddSlowPath(
@@ -245,13 +250,13 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start,
+ setup_method_address_[1] = StoreBaseDisp(rs_rSP, displacement, method_start,
cu_->target64 ? k64 : k32, kNotVolatile);
}
- FreeTemp(rs_rX86_ARG0);
- FreeTemp(rs_rX86_ARG1);
- FreeTemp(rs_rX86_ARG2);
+ FreeTemp(arg0);
+ FreeTemp(arg1);
+ FreeTemp(arg2);
}
void X86Mir2Lir::GenExitSequence() {
@@ -266,7 +271,9 @@ void X86Mir2Lir::GenExitSequence() {
UnSpillCoreRegs();
UnSpillFPRegs();
/* Remove frame except for return address */
- stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ stack_increment_ = OpRegImm(kOpAdd, rs_rSP,
+ frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
NewLIR0(kX86Ret);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 4412a1e254..d57dffb01d 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -389,7 +389,7 @@ class X86Mir2Lir : public Mir2Lir {
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
protected:
- RegStorage TargetReg32(SpecialTargetRegister reg);
+ RegStorage TargetReg32(SpecialTargetRegister reg) const;
// Casting of RegStorage
RegStorage As32BitReg(RegStorage reg) {
DCHECK(!reg.IsPair());
@@ -432,7 +432,7 @@ class X86Mir2Lir : public Mir2Lir {
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size, int opt_flags = 0);
- RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
+ RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num) const;
int AssignInsnOffsets();
void AssignOffsets();
@@ -530,7 +530,7 @@ class X86Mir2Lir : public Mir2Lir {
* @brief Check if a register is byte addressable.
* @returns true if a register is byte addressable.
*/
- bool IsByteRegister(RegStorage reg);
+ bool IsByteRegister(RegStorage reg) const;
void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 33bb0eeb76..bc02eee669 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -159,12 +159,13 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(rs_rX86_SP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ StoreBaseDisp(rs_rSP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
}
}
// Push the source virtual register onto the x87 stack.
- LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP.GetReg(),
+ LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP_32.GetReg(),
src_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
@@ -172,7 +173,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
// Now pop off x87 stack and store it in the destination VR's stack location.
int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset;
- LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
+ LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP_32.GetReg(), displacement);
AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double);
/*
@@ -191,12 +192,13 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
* correct register class.
*/
rl_result = EvalLoc(rl_dest, kFPReg, true);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (is_double) {
- LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
@@ -366,6 +368,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
// If the source is in physical register, then put it in its location on stack.
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (rl_src1.location == kLocPhysReg) {
RegisterInfo* reg_info = GetRegInfo(rl_src1.reg);
@@ -377,7 +380,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(rs_rX86_SP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rSP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -388,7 +391,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
FlushSpecificReg(reg_info);
ResetDef(rl_src2.reg);
} else {
- StoreBaseDisp(rs_rX86_SP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rSP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -396,12 +399,12 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
int fld_opcode = is_double ? kX86Fld64M : kX86Fld32M;
// Push the source virtual registers onto the x87 stack.
- LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
+ LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
src2_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_2, (src2_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
- LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
+ LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
src1_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_1, (src1_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
@@ -430,7 +433,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
// Now store result in the destination VR's stack location.
int displacement = dest_v_reg_offset + LOWORD_OFFSET;
int opcode = is_double ? kX86Fst64M : kX86Fst32M;
- LIR *fst = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
+ LIR *fst = NewLIR2NoDest(opcode, rs_rSP.GetReg(), displacement);
AnnotateDalvikRegAccess(fst, displacement >> 2, false /* is_load */, is_double /* is64bit */);
// Pop ST(1) and ST(0).
@@ -448,10 +451,10 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
}
@@ -639,7 +642,7 @@ bool X86Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement, 0x7fffffff);
AnnotateDalvikRegAccess(lir, displacement >> 2, false /*is_load */, false /* is_64bit */);
AnnotateDalvikRegAccess(lir, displacement >> 2, true /* is_load */, false /* is_64bit*/);
return true;
@@ -703,7 +706,7 @@ bool X86Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, true /* is_load */, true /* is_64bit*/);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, false /*is_load */, true /* is_64bit */);
return true;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 26465a5568..781c12807b 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1124,15 +1124,16 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (!obj_in_si && !obj_in_di) {
- LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
+ LoadWordDisp(rs_rSP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
}
if (!off_in_si && !off_in_di) {
- LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
+ LoadWordDisp(rs_rSP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
@@ -1507,12 +1508,14 @@ void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int
case 0:
NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
- case 1:
- LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, kNotVolatile);
+ case 1: {
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ LoadBaseDisp(rs_rSP, displacement, dest, k32, kNotVolatile);
break;
+ }
default:
m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
- rs_rX86_SP.GetReg(), displacement, val);
+ rs_rX86_SP_32.GetReg(), displacement, val);
AnnotateDalvikRegAccess(m, displacement >> 2, true /* is_load */, true /* is_64bit */);
break;
}
@@ -1653,7 +1656,7 @@ bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64
if (src1_in_reg) {
NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
- LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
+ LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
}
@@ -1719,12 +1722,13 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// At this point, the VRs are in their home locations.
bool src1_in_reg = rl_src1.location == kLocPhysReg;
bool src2_in_reg = rl_src2.location == kLocPhysReg;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
// ECX <- 1H
if (src1_in_reg) {
NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
kNotVolatile);
}
@@ -1735,7 +1739,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
- LIR* m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+ LIR* m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1748,7 +1752,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
kNotVolatile);
}
@@ -1757,7 +1761,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Imul32RR, rs_r0.GetReg(), rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
- LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1768,7 +1772,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
- LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1782,7 +1786,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
kNotVolatile);
}
@@ -1791,7 +1795,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
- LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
+ LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
}
@@ -1833,7 +1837,7 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
// RHS is in memory.
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1876,7 +1880,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
// Operate directly into memory.
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2106,7 +2110,7 @@ void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
int displacement = SRegOffset(rl_src.s_reg_low);
// RDX:RAX = magic * numerator.
- LIR *m = NewLIR2(kX86Imul64DaM, rs_rX86_SP.GetReg(), displacement);
+ LIR *m = NewLIR2(kX86Imul64DaM, rs_rX86_SP_32.GetReg(), displacement);
AnnotateDalvikRegAccess(m, displacement >> 2,
true /* is_load */, true /* is_64bit */);
} else {
@@ -2723,7 +2727,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2754,7 +2758,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -3198,7 +3202,7 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
} else {
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 270a4e5007..db2f272436 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -141,27 +141,6 @@ static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
-RegStorage rs_rX86_SP;
-
-RegStorage rs_rX86_ARG0;
-RegStorage rs_rX86_ARG1;
-RegStorage rs_rX86_ARG2;
-RegStorage rs_rX86_ARG3;
-RegStorage rs_rX86_ARG4;
-RegStorage rs_rX86_ARG5;
-RegStorage rs_rX86_FARG0;
-RegStorage rs_rX86_FARG1;
-RegStorage rs_rX86_FARG2;
-RegStorage rs_rX86_FARG3;
-RegStorage rs_rX86_FARG4;
-RegStorage rs_rX86_FARG5;
-RegStorage rs_rX86_FARG6;
-RegStorage rs_rX86_FARG7;
-RegStorage rs_rX86_RET0;
-RegStorage rs_rX86_RET1;
-RegStorage rs_rX86_INVOKE_TGT;
-RegStorage rs_rX86_COUNT;
-
RegLocation X86Mir2Lir::LocCReturn() {
return x86_loc_c_return;
}
@@ -182,39 +161,94 @@ RegLocation X86Mir2Lir::LocCReturnDouble() {
return x86_loc_c_return_double;
}
+// 32-bit reg storage locations for 32-bit targets.
+static const RegStorage RegStorage32FromSpecialTargetRegister_Target32[] {
+ RegStorage::InvalidReg(), // kSelf - Thread pointer.
+ RegStorage::InvalidReg(), // kSuspend - Used to reduce suspend checks for some targets.
+ RegStorage::InvalidReg(), // kLr - no register as the return address is pushed on entry.
+ RegStorage::InvalidReg(), // kPc - not exposed on X86 see kX86StartOfMethod.
+ rs_rX86_SP_32, // kSp
+ rs_rAX, // kArg0
+ rs_rCX, // kArg1
+ rs_rDX, // kArg2
+ rs_rBX, // kArg3
+ RegStorage::InvalidReg(), // kArg4
+ RegStorage::InvalidReg(), // kArg5
+ RegStorage::InvalidReg(), // kArg6
+ RegStorage::InvalidReg(), // kArg7
+ rs_rAX, // kFArg0
+ rs_rCX, // kFArg1
+ rs_rDX, // kFArg2
+ rs_rBX, // kFArg3
+ RegStorage::InvalidReg(), // kFArg4
+ RegStorage::InvalidReg(), // kFArg5
+ RegStorage::InvalidReg(), // kFArg6
+ RegStorage::InvalidReg(), // kFArg7
+ RegStorage::InvalidReg(), // kFArg8
+ RegStorage::InvalidReg(), // kFArg9
+ RegStorage::InvalidReg(), // kFArg10
+ RegStorage::InvalidReg(), // kFArg11
+ RegStorage::InvalidReg(), // kFArg12
+ RegStorage::InvalidReg(), // kFArg13
+ RegStorage::InvalidReg(), // kFArg14
+ RegStorage::InvalidReg(), // kFArg15
+ rs_rAX, // kRet0
+ rs_rDX, // kRet1
+ rs_rAX, // kInvokeTgt
+ rs_rAX, // kHiddenArg - used to hold the method index before copying to fr0.
+ rs_fr0, // kHiddenFpArg
+ rs_rCX, // kCount
+};
+
+// 32-bit reg storage locations for 64-bit targets.
+static const RegStorage RegStorage32FromSpecialTargetRegister_Target64[] {
+ RegStorage::InvalidReg(), // kSelf - Thread pointer.
+ RegStorage::InvalidReg(), // kSuspend - Used to reduce suspend checks for some targets.
+ RegStorage::InvalidReg(), // kLr - no register as the return address is pushed on entry.
+ RegStorage::InvalidReg(), // kPc - TODO: RIP based addressing.
+ rs_rX86_SP_32, // kSp
+ rs_rDI, // kArg0
+ rs_rSI, // kArg1
+ rs_rDX, // kArg2
+ rs_rCX, // kArg3
+ rs_r8, // kArg4
+ rs_r9, // kArg5
+ RegStorage::InvalidReg(), // kArg6
+ RegStorage::InvalidReg(), // kArg7
+ rs_fr0, // kFArg0
+ rs_fr1, // kFArg1
+ rs_fr2, // kFArg2
+ rs_fr3, // kFArg3
+ rs_fr4, // kFArg4
+ rs_fr5, // kFArg5
+ rs_fr6, // kFArg6
+ rs_fr7, // kFArg7
+ RegStorage::InvalidReg(), // kFArg8
+ RegStorage::InvalidReg(), // kFArg9
+ RegStorage::InvalidReg(), // kFArg10
+ RegStorage::InvalidReg(), // kFArg11
+ RegStorage::InvalidReg(), // kFArg12
+ RegStorage::InvalidReg(), // kFArg13
+ RegStorage::InvalidReg(), // kFArg14
+ RegStorage::InvalidReg(), // kFArg15
+ rs_rAX, // kRet0
+ rs_rDX, // kRet1
+ rs_rAX, // kInvokeTgt
+ rs_rAX, // kHiddenArg
+ RegStorage::InvalidReg(), // kHiddenFpArg
+ rs_rCX, // kCount
+};
+static_assert(arraysize(RegStorage32FromSpecialTargetRegister_Target32) ==
+ arraysize(RegStorage32FromSpecialTargetRegister_Target64),
+ "Mismatch in RegStorage array sizes");
+
// Return a target-dependent special register for 32-bit.
-RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
- RegStorage res_reg = RegStorage::InvalidReg();
- switch (reg) {
- case kSelf: res_reg = RegStorage::InvalidReg(); break;
- case kSuspend: res_reg = RegStorage::InvalidReg(); break;
- case kLr: res_reg = RegStorage::InvalidReg(); break;
- case kPc: res_reg = RegStorage::InvalidReg(); break;
- case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target-
- // specific size.
- case kArg0: res_reg = rs_rX86_ARG0; break;
- case kArg1: res_reg = rs_rX86_ARG1; break;
- case kArg2: res_reg = rs_rX86_ARG2; break;
- case kArg3: res_reg = rs_rX86_ARG3; break;
- case kArg4: res_reg = rs_rX86_ARG4; break;
- case kArg5: res_reg = rs_rX86_ARG5; break;
- case kFArg0: res_reg = rs_rX86_FARG0; break;
- case kFArg1: res_reg = rs_rX86_FARG1; break;
- case kFArg2: res_reg = rs_rX86_FARG2; break;
- case kFArg3: res_reg = rs_rX86_FARG3; break;
- case kFArg4: res_reg = rs_rX86_FARG4; break;
- case kFArg5: res_reg = rs_rX86_FARG5; break;
- case kFArg6: res_reg = rs_rX86_FARG6; break;
- case kFArg7: res_reg = rs_rX86_FARG7; break;
- case kRet0: res_reg = rs_rX86_RET0; break;
- case kRet1: res_reg = rs_rX86_RET1; break;
- case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
- case kHiddenArg: res_reg = rs_rAX; break;
- case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
- case kCount: res_reg = rs_rX86_COUNT; break;
- default: res_reg = RegStorage::InvalidReg();
- }
- return res_reg;
+RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const {
+ DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target32[kCount], rs_rCX);
+ DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target64[kCount], rs_rCX);
+ DCHECK_LT(reg, arraysize(RegStorage32FromSpecialTargetRegister_Target32));
+ return cu_->target64 ? RegStorage32FromSpecialTargetRegister_Target64[reg]
+ : RegStorage32FromSpecialTargetRegister_Target32[reg];
}
RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
@@ -433,7 +467,7 @@ void X86Mir2Lir::AdjustSpillMask() {
RegStorage X86Mir2Lir::AllocateByteRegister() {
RegStorage reg = AllocTypedTemp(false, kCoreReg);
if (!cu_->target64) {
- DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
+ DCHECK_LT(reg.GetRegNum(), rs_rX86_SP_32.GetRegNum());
}
return reg;
}
@@ -442,8 +476,8 @@ RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
return GetRegInfo(reg)->Master()->GetReg();
}
-bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
- return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
+bool X86Mir2Lir::IsByteRegister(RegStorage reg) const {
+ return cu_->target64 || reg.GetRegNum() < rs_rX86_SP_32.GetRegNum();
}
/* Clobber all regs that might be used by an external C call */
@@ -483,8 +517,8 @@ void X86Mir2Lir::ClobberCallerSave() {
RegLocation X86Mir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
- DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
+ DCHECK_EQ(res.reg.GetLowReg(), rs_rAX.GetReg());
+ DCHECK_EQ(res.reg.GetHighReg(), rs_rDX.GetReg());
Clobber(rs_rAX);
Clobber(rs_rDX);
MarkInUse(rs_rAX);
@@ -503,41 +537,41 @@ RegLocation X86Mir2Lir::GetReturnAlt() {
/* To be used when explicitly managing register use */
void X86Mir2Lir::LockCallTemps() {
- LockTemp(rs_rX86_ARG0);
- LockTemp(rs_rX86_ARG1);
- LockTemp(rs_rX86_ARG2);
- LockTemp(rs_rX86_ARG3);
+ LockTemp(TargetReg32(kArg0));
+ LockTemp(TargetReg32(kArg1));
+ LockTemp(TargetReg32(kArg2));
+ LockTemp(TargetReg32(kArg3));
if (cu_->target64) {
- LockTemp(rs_rX86_ARG4);
- LockTemp(rs_rX86_ARG5);
- LockTemp(rs_rX86_FARG0);
- LockTemp(rs_rX86_FARG1);
- LockTemp(rs_rX86_FARG2);
- LockTemp(rs_rX86_FARG3);
- LockTemp(rs_rX86_FARG4);
- LockTemp(rs_rX86_FARG5);
- LockTemp(rs_rX86_FARG6);
- LockTemp(rs_rX86_FARG7);
+ LockTemp(TargetReg32(kArg4));
+ LockTemp(TargetReg32(kArg5));
+ LockTemp(TargetReg32(kFArg0));
+ LockTemp(TargetReg32(kFArg1));
+ LockTemp(TargetReg32(kFArg2));
+ LockTemp(TargetReg32(kFArg3));
+ LockTemp(TargetReg32(kFArg4));
+ LockTemp(TargetReg32(kFArg5));
+ LockTemp(TargetReg32(kFArg6));
+ LockTemp(TargetReg32(kFArg7));
}
}
/* To be used when explicitly managing register use */
void X86Mir2Lir::FreeCallTemps() {
- FreeTemp(rs_rX86_ARG0);
- FreeTemp(rs_rX86_ARG1);
- FreeTemp(rs_rX86_ARG2);
- FreeTemp(rs_rX86_ARG3);
+ FreeTemp(TargetReg32(kArg0));
+ FreeTemp(TargetReg32(kArg1));
+ FreeTemp(TargetReg32(kArg2));
+ FreeTemp(TargetReg32(kArg3));
if (cu_->target64) {
- FreeTemp(rs_rX86_ARG4);
- FreeTemp(rs_rX86_ARG5);
- FreeTemp(rs_rX86_FARG0);
- FreeTemp(rs_rX86_FARG1);
- FreeTemp(rs_rX86_FARG2);
- FreeTemp(rs_rX86_FARG3);
- FreeTemp(rs_rX86_FARG4);
- FreeTemp(rs_rX86_FARG5);
- FreeTemp(rs_rX86_FARG6);
- FreeTemp(rs_rX86_FARG7);
+ FreeTemp(TargetReg32(kArg4));
+ FreeTemp(TargetReg32(kArg5));
+ FreeTemp(TargetReg32(kFArg0));
+ FreeTemp(TargetReg32(kFArg1));
+ FreeTemp(TargetReg32(kFArg2));
+ FreeTemp(TargetReg32(kFArg3));
+ FreeTemp(TargetReg32(kFArg4));
+ FreeTemp(TargetReg32(kFArg5));
+ FreeTemp(TargetReg32(kFArg6));
+ FreeTemp(TargetReg32(kFArg7));
}
}
@@ -687,11 +721,14 @@ void X86Mir2Lir::SpillCoreRegs() {
}
// Spill mask not including fake return address register
uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
+ int offset =
+ frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
OpSize size = cu_->target64 ? k64 : k32;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
+ StoreBaseDisp(rs_rSP, offset,
+ cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
size, kNotVolatile);
offset += GetInstructionSetPointerSize(cu_->instruction_set);
}
@@ -706,9 +743,10 @@ void X86Mir2Lir::UnSpillCoreRegs() {
uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
OpSize size = cu_->target64 ? k64 : k32;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
+ LoadBaseDisp(rs_rSP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
size, kNotVolatile);
offset += GetInstructionSetPointerSize(cu_->instruction_set);
}
@@ -720,11 +758,12 @@ void X86Mir2Lir::SpillFPRegs() {
return;
}
uint32_t mask = fp_spill_mask_;
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ int offset = frame_size_ -
+ (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
- k64, kNotVolatile);
+ StoreBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg), k64, kNotVolatile);
offset += sizeof(double);
}
}
@@ -734,10 +773,12 @@ void X86Mir2Lir::UnSpillFPRegs() {
return;
}
uint32_t mask = fp_spill_mask_;
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ int offset = frame_size_ -
+ (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
+ LoadBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg),
k64, kNotVolatile);
offset += sizeof(double);
}
@@ -783,49 +824,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
<< " is wrong: expecting " << i << ", seeing "
<< static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
}
- if (cu_->target64) {
- rs_rX86_SP = rs_rX86_SP_64;
-
- rs_rX86_ARG0 = rs_rDI;
- rs_rX86_ARG1 = rs_rSI;
- rs_rX86_ARG2 = rs_rDX;
- rs_rX86_ARG3 = rs_rCX;
- rs_rX86_ARG4 = rs_r8;
- rs_rX86_ARG5 = rs_r9;
- rs_rX86_FARG0 = rs_fr0;
- rs_rX86_FARG1 = rs_fr1;
- rs_rX86_FARG2 = rs_fr2;
- rs_rX86_FARG3 = rs_fr3;
- rs_rX86_FARG4 = rs_fr4;
- rs_rX86_FARG5 = rs_fr5;
- rs_rX86_FARG6 = rs_fr6;
- rs_rX86_FARG7 = rs_fr7;
- rs_rX86_INVOKE_TGT = rs_rDI;
- } else {
- rs_rX86_SP = rs_rX86_SP_32;
-
- rs_rX86_ARG0 = rs_rAX;
- rs_rX86_ARG1 = rs_rCX;
- rs_rX86_ARG2 = rs_rDX;
- rs_rX86_ARG3 = rs_rBX;
- rs_rX86_ARG4 = RegStorage::InvalidReg();
- rs_rX86_ARG5 = RegStorage::InvalidReg();
- rs_rX86_FARG0 = rs_rAX;
- rs_rX86_FARG1 = rs_rCX;
- rs_rX86_FARG2 = rs_rDX;
- rs_rX86_FARG3 = rs_rBX;
- rs_rX86_FARG4 = RegStorage::InvalidReg();
- rs_rX86_FARG5 = RegStorage::InvalidReg();
- rs_rX86_FARG6 = RegStorage::InvalidReg();
- rs_rX86_FARG7 = RegStorage::InvalidReg();
- rs_rX86_INVOKE_TGT = rs_rAX;
- // TODO(64): Initialize with invalid reg
-// rX86_ARG4 = RegStorage::InvalidReg();
-// rX86_ARG5 = RegStorage::InvalidReg();
- }
- rs_rX86_RET0 = rs_rAX;
- rs_rX86_RET1 = rs_rDX;
- rs_rX86_COUNT = rs_rCX;
}
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -875,7 +873,7 @@ void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
(rl_dest.location == kLocCompilerTemp)) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1327,7 +1325,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Load32Disp(rs_rX86_SP, displacement, rs_rDI);
+ Load32Disp(rs_rX86_SP_32, displacement, rs_rDI);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
@@ -2264,7 +2262,7 @@ void X86Mir2Lir::GenReduceVector(MIR* mir) {
StoreFinalValue(rl_dest, rl_result);
} else {
int displacement = SRegOffset(rl_result.s_reg_low);
- LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, vector_src.GetReg());
+ LIR *l = NewLIR3(extr_opcode, rs_rX86_SP_32.GetReg(), displacement, vector_src.GetReg());
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
}
@@ -2462,18 +2460,14 @@ RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
return in_to_reg_storage_mapping_.Get(arg_num);
}
-RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
+RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) const {
// For the 32-bit internal ABI, the first 3 arguments are passed in registers.
// Not used for 64-bit, TODO: Move X86_32 to the same framework
switch (core_arg_num) {
- case 0:
- return rs_rX86_ARG1;
- case 1:
- return rs_rX86_ARG2;
- case 2:
- return rs_rX86_ARG3;
- default:
- return RegStorage::InvalidReg();
+ case 0: return TargetReg32(kArg1);
+ case 1: return TargetReg32(kArg2);
+ case 2: return TargetReg32(kArg3);
+ default: return RegStorage::InvalidReg();
}
}
@@ -2503,7 +2497,8 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ StoreRefDisp(rs_rSP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
}
if (mir_graph_->GetNumOfInVRs() == 0) {
@@ -2540,9 +2535,9 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
} else {
// Needs flush.
if (t_loc->ref) {
- StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
+ StoreRefDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), reg, kNotVolatile);
} else {
- StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
kNotVolatile);
}
}
@@ -2550,9 +2545,9 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
// If arriving in frame & promoted.
if (t_loc->location == kLocPhysReg) {
if (t_loc->ref) {
- LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
+ LoadRefDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
+ LoadBaseDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), t_loc->reg,
t_loc->wide ? k64 : k32, kNotVolatile);
}
}
@@ -2578,16 +2573,16 @@ int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
uintptr_t direct_method, InvokeType type, bool skip_this) {
if (!cu_->target64) {
return Mir2Lir::GenDalvikArgsNoRange(info,
- call_state, pcrLabel, next_call_insn,
- target_method,
- vtable_idx, direct_code,
- direct_method, type, skip_this);
+ call_state, pcrLabel, next_call_insn,
+ target_method,
+ vtable_idx, direct_code,
+ direct_method, type, skip_this);
}
return GenDalvikArgsRange(info,
- call_state, pcrLabel, next_call_insn,
- target_method,
- vtable_idx, direct_code,
- direct_method, type, skip_this);
+ call_state, pcrLabel, next_call_insn,
+ target_method,
+ vtable_idx, direct_code,
+ direct_method, type, skip_this);
}
/*
@@ -2643,14 +2638,14 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
}
next_arg++;
}
@@ -2705,23 +2700,23 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type2(this, ResourceMask::kDalvikReg);
if (src_is_16b_aligned) {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovA128FP);
} else if (src_is_8b_aligned) {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
- ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovLo128FP);
+ ld2 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset + (bytes_to_move >> 1),
kMovHi128FP);
} else {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovU128FP);
}
if (dest_is_16b_aligned) {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovA128FP);
} else if (dest_is_8b_aligned) {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
- st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovLo128FP);
+ st2 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset + (bytes_to_move >> 1),
temp, kMovHi128FP);
} else {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovU128FP);
}
// TODO If we could keep track of aliasing information for memory accesses that are wider
@@ -2758,8 +2753,8 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
RegStorage temp = TargetReg(kArg3, kNotWide);
// Now load the argument VR and store to the outs.
- Load32Disp(rs_rX86_SP, current_src_offset, temp);
- Store32Disp(rs_rX86_SP, current_dest_offset, temp);
+ Load32Disp(rs_rX86_SP_64, current_src_offset, temp);
+ Store32Disp(rs_rX86_SP_64, current_dest_offset, temp);
}
current_src_offset += bytes_to_move;
@@ -2785,17 +2780,17 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, regWide, k64, kNotVolatile);
}
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, rl_arg.reg, k32, kNotVolatile);
} else {
LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, regSingle, k32, kNotVolatile);
}
}
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index cb9a24a336..c1c79caa19 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -230,7 +230,7 @@ LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
// TODO: there are several instances of this check. A utility function perhaps?
// TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage?
// Use shifts instead of a byte operand if the source can't be byte accessed.
- if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
+ if (r_src2.GetRegNum() >= rs_rX86_SP_32.GetRegNum()) {
NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
@@ -385,7 +385,7 @@ LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int o
}
LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
}
return l;
@@ -411,7 +411,7 @@ LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
LOG(FATAL) << "Bad case in OpMemReg " << op;
break;
}
- LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
+ LIR *l = NewLIR3(opcode, rs_rX86_SP_32.GetReg(), displacement, r_value);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
@@ -437,7 +437,7 @@ LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
LOG(FATAL) << "Bad case in OpRegMem " << op;
break;
}
- LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
+ LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP_32.GetReg(), displacement);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
}
@@ -514,7 +514,7 @@ LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int
r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
} else if (op == kOpAdd) { // lea add special case
return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
- r_src.GetReg() /* base */, rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */,
+ r_src.GetReg() /* base */, rs_rX86_SP_32.GetReg()/*r4sib_no_index*/ /* index */,
0 /* scale */, value /* disp */);
}
OpRegCopy(r_dest, r_src);
@@ -705,7 +705,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
}
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, is64bit);
if (pair) {
@@ -870,7 +870,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, is64bit);
if (pair) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index afdc244dac..76a67c4d6c 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -234,7 +234,7 @@ constexpr RegStorage rs_r3q(RegStorage::kValid | r3q);
constexpr RegStorage rs_rBX = rs_r3;
constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
-extern RegStorage rs_rX86_SP;
+static_assert(rs_rX86_SP_64.GetRegNum() == rs_rX86_SP_32.GetRegNum(), "Unexpected mismatch");
constexpr RegStorage rs_r5(RegStorage::kValid | r5);
constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
constexpr RegStorage rs_rBP = rs_r5;
@@ -313,24 +313,8 @@ constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
-extern RegStorage rs_rX86_ARG0;
-extern RegStorage rs_rX86_ARG1;
-extern RegStorage rs_rX86_ARG2;
-extern RegStorage rs_rX86_ARG3;
-extern RegStorage rs_rX86_ARG4;
-extern RegStorage rs_rX86_ARG5;
-extern RegStorage rs_rX86_FARG0;
-extern RegStorage rs_rX86_FARG1;
-extern RegStorage rs_rX86_FARG2;
-extern RegStorage rs_rX86_FARG3;
-extern RegStorage rs_rX86_FARG4;
-extern RegStorage rs_rX86_FARG5;
-extern RegStorage rs_rX86_FARG6;
-extern RegStorage rs_rX86_FARG7;
-extern RegStorage rs_rX86_RET0;
-extern RegStorage rs_rX86_RET1;
-extern RegStorage rs_rX86_INVOKE_TGT;
-extern RegStorage rs_rX86_COUNT;
+constexpr RegStorage rs_rX86_RET0 = rs_rAX;
+constexpr RegStorage rs_rX86_RET1 = rs_rDX;
// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
const RegLocation x86_loc_c_return
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 4a84ff2516..46ed011b53 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -339,6 +339,9 @@ class RegStorage : public ValueObject {
private:
uint16_t reg_;
};
+static inline std::ostream& operator<<(std::ostream& o, const RegStorage& rhs) {
+ return o << rhs.GetRawBits(); // TODO: better output.
+}
} // namespace art