AArch64: Enable LONG_* and INT_* opcodes.
This patch fixes some of the issues with LONG and INT opcodes. The patch
has been tested and passes all the dalvik tests except for 018 and 107.
Change-Id: Idd1923ed935ee8236ab0c7e5fa969eaefeea8708
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index ec2556b..63e3831 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -136,22 +136,22 @@
// TODO: Remove this when we are able to compile everything.
int arm64_support_list[] = {
Instruction::NOP,
- // Instruction::MOVE,
- // Instruction::MOVE_FROM16,
- // Instruction::MOVE_16,
- // Instruction::MOVE_WIDE,
- // Instruction::MOVE_WIDE_FROM16,
- // Instruction::MOVE_WIDE_16,
- // Instruction::MOVE_OBJECT,
- // Instruction::MOVE_OBJECT_FROM16,
- // Instruction::MOVE_OBJECT_16,
+ Instruction::MOVE,
+ Instruction::MOVE_FROM16,
+ Instruction::MOVE_16,
+ Instruction::MOVE_WIDE,
+ Instruction::MOVE_WIDE_FROM16,
+ Instruction::MOVE_WIDE_16,
+ Instruction::MOVE_OBJECT,
+ Instruction::MOVE_OBJECT_FROM16,
+ Instruction::MOVE_OBJECT_16,
// Instruction::MOVE_RESULT,
// Instruction::MOVE_RESULT_WIDE,
// Instruction::MOVE_RESULT_OBJECT,
Instruction::MOVE_EXCEPTION,
Instruction::RETURN_VOID,
- // Instruction::RETURN,
- // Instruction::RETURN_WIDE,
+ Instruction::RETURN,
+ Instruction::RETURN_WIDE,
// Instruction::RETURN_OBJECT,
// Instruction::CONST_4,
// Instruction::CONST_16,
@@ -184,7 +184,7 @@
// Instruction::CMPG_FLOAT,
// Instruction::CMPL_DOUBLE,
// Instruction::CMPG_DOUBLE,
- // Instruction::CMP_LONG,
+ Instruction::CMP_LONG,
// Instruction::IF_EQ,
// Instruction::IF_NE,
// Instruction::IF_LT,
@@ -258,16 +258,16 @@
// Instruction::INVOKE_INTERFACE_RANGE,
// Instruction::UNUSED_79,
// Instruction::UNUSED_7A,
- // Instruction::NEG_INT,
- // Instruction::NOT_INT,
- // Instruction::NEG_LONG,
- // Instruction::NOT_LONG,
+ Instruction::NEG_INT,
+ Instruction::NOT_INT,
+ Instruction::NEG_LONG,
+ Instruction::NOT_LONG,
// Instruction::NEG_FLOAT,
// Instruction::NEG_DOUBLE,
- // Instruction::INT_TO_LONG,
+ Instruction::INT_TO_LONG,
// Instruction::INT_TO_FLOAT,
// Instruction::INT_TO_DOUBLE,
- // Instruction::LONG_TO_INT,
+ Instruction::LONG_TO_INT,
// Instruction::LONG_TO_FLOAT,
// Instruction::LONG_TO_DOUBLE,
// Instruction::FLOAT_TO_INT,
@@ -276,31 +276,31 @@
// Instruction::DOUBLE_TO_INT,
// Instruction::DOUBLE_TO_LONG,
// Instruction::DOUBLE_TO_FLOAT,
- // Instruction::INT_TO_BYTE,
- // Instruction::INT_TO_CHAR,
- // Instruction::INT_TO_SHORT,
- // Instruction::ADD_INT,
- // Instruction::SUB_INT,
- // Instruction::MUL_INT,
- // Instruction::DIV_INT,
- // Instruction::REM_INT,
- // Instruction::AND_INT,
- // Instruction::OR_INT,
- // Instruction::XOR_INT,
- // Instruction::SHL_INT,
- // Instruction::SHR_INT,
- // Instruction::USHR_INT,
- // Instruction::ADD_LONG,
- // Instruction::SUB_LONG,
- // Instruction::MUL_LONG,
- // Instruction::DIV_LONG,
- // Instruction::REM_LONG,
- // Instruction::AND_LONG,
- // Instruction::OR_LONG,
- // Instruction::XOR_LONG,
- // Instruction::SHL_LONG,
- // Instruction::SHR_LONG,
- // Instruction::USHR_LONG,
+ Instruction::INT_TO_BYTE,
+ Instruction::INT_TO_CHAR,
+ Instruction::INT_TO_SHORT,
+ Instruction::ADD_INT,
+ Instruction::SUB_INT,
+ Instruction::MUL_INT,
+ Instruction::DIV_INT,
+ Instruction::REM_INT,
+ Instruction::AND_INT,
+ Instruction::OR_INT,
+ Instruction::XOR_INT,
+ Instruction::SHL_INT,
+ Instruction::SHR_INT,
+ Instruction::USHR_INT,
+ Instruction::ADD_LONG,
+ Instruction::SUB_LONG,
+ Instruction::MUL_LONG,
+ Instruction::DIV_LONG,
+ Instruction::REM_LONG,
+ Instruction::AND_LONG,
+ Instruction::OR_LONG,
+ Instruction::XOR_LONG,
+ Instruction::SHL_LONG,
+ Instruction::SHR_LONG,
+ Instruction::USHR_LONG,
// Instruction::ADD_FLOAT,
// Instruction::SUB_FLOAT,
// Instruction::MUL_FLOAT,
@@ -311,28 +311,28 @@
// Instruction::MUL_DOUBLE,
// Instruction::DIV_DOUBLE,
// Instruction::REM_DOUBLE,
- // Instruction::ADD_INT_2ADDR,
- // Instruction::SUB_INT_2ADDR,
- // Instruction::MUL_INT_2ADDR,
- // Instruction::DIV_INT_2ADDR,
- // Instruction::REM_INT_2ADDR,
- // Instruction::AND_INT_2ADDR,
- // Instruction::OR_INT_2ADDR,
- // Instruction::XOR_INT_2ADDR,
- // Instruction::SHL_INT_2ADDR,
- // Instruction::SHR_INT_2ADDR,
- // Instruction::USHR_INT_2ADDR,
- // Instruction::ADD_LONG_2ADDR,
- // Instruction::SUB_LONG_2ADDR,
- // Instruction::MUL_LONG_2ADDR,
- // Instruction::DIV_LONG_2ADDR,
- // Instruction::REM_LONG_2ADDR,
- // Instruction::AND_LONG_2ADDR,
- // Instruction::OR_LONG_2ADDR,
- // Instruction::XOR_LONG_2ADDR,
- // Instruction::SHL_LONG_2ADDR,
- // Instruction::SHR_LONG_2ADDR,
- // Instruction::USHR_LONG_2ADDR,
+ Instruction::ADD_INT_2ADDR,
+ Instruction::SUB_INT_2ADDR,
+ Instruction::MUL_INT_2ADDR,
+ Instruction::DIV_INT_2ADDR,
+ Instruction::REM_INT_2ADDR,
+ Instruction::AND_INT_2ADDR,
+ Instruction::OR_INT_2ADDR,
+ Instruction::XOR_INT_2ADDR,
+ Instruction::SHL_INT_2ADDR,
+ Instruction::SHR_INT_2ADDR,
+ Instruction::USHR_INT_2ADDR,
+ Instruction::ADD_LONG_2ADDR,
+ Instruction::SUB_LONG_2ADDR,
+ Instruction::MUL_LONG_2ADDR,
+ Instruction::DIV_LONG_2ADDR,
+ Instruction::REM_LONG_2ADDR,
+ Instruction::AND_LONG_2ADDR,
+ Instruction::OR_LONG_2ADDR,
+ Instruction::XOR_LONG_2ADDR,
+ Instruction::SHL_LONG_2ADDR,
+ Instruction::SHR_LONG_2ADDR,
+ Instruction::USHR_LONG_2ADDR,
// Instruction::ADD_FLOAT_2ADDR,
// Instruction::SUB_FLOAT_2ADDR,
// Instruction::MUL_FLOAT_2ADDR,
@@ -343,25 +343,25 @@
// Instruction::MUL_DOUBLE_2ADDR,
// Instruction::DIV_DOUBLE_2ADDR,
// Instruction::REM_DOUBLE_2ADDR,
- // Instruction::ADD_INT_LIT16,
- // Instruction::RSUB_INT,
- // Instruction::MUL_INT_LIT16,
- // Instruction::DIV_INT_LIT16,
- // Instruction::REM_INT_LIT16,
- // Instruction::AND_INT_LIT16,
- // Instruction::OR_INT_LIT16,
- // Instruction::XOR_INT_LIT16,
+ Instruction::ADD_INT_LIT16,
+ Instruction::RSUB_INT,
+ Instruction::MUL_INT_LIT16,
+ Instruction::DIV_INT_LIT16,
+ Instruction::REM_INT_LIT16,
+ Instruction::AND_INT_LIT16,
+ Instruction::OR_INT_LIT16,
+ Instruction::XOR_INT_LIT16,
Instruction::ADD_INT_LIT8,
- // Instruction::RSUB_INT_LIT8,
- // Instruction::MUL_INT_LIT8,
- // Instruction::DIV_INT_LIT8,
- // Instruction::REM_INT_LIT8,
- // Instruction::AND_INT_LIT8,
- // Instruction::OR_INT_LIT8,
- // Instruction::XOR_INT_LIT8,
- // Instruction::SHL_INT_LIT8,
- // Instruction::SHR_INT_LIT8,
- // Instruction::USHR_INT_LIT8,
+ Instruction::RSUB_INT_LIT8,
+ Instruction::MUL_INT_LIT8,
+ Instruction::DIV_INT_LIT8,
+ Instruction::REM_INT_LIT8,
+ Instruction::AND_INT_LIT8,
+ Instruction::OR_INT_LIT8,
+ Instruction::XOR_INT_LIT8,
+ Instruction::SHL_INT_LIT8,
+ Instruction::SHR_INT_LIT8,
+ Instruction::USHR_INT_LIT8,
// Instruction::IGET_QUICK,
// Instruction::IGET_WIDE_QUICK,
// Instruction::IGET_OBJECT_QUICK,
@@ -403,7 +403,7 @@
// kMirOpNop,
// kMirOpNullCheck,
// kMirOpRangeCheck,
- // kMirOpDivZeroCheck,
+ kMirOpDivZeroCheck,
kMirOpCheck,
// kMirOpCheckPart2,
// kMirOpSelect,
@@ -699,7 +699,7 @@
// V : void
// (ARM64) Current calling conversion only support 32bit softfp
// which has problems with long, float, double
-constexpr char arm64_supported_types[] = "ZBSCILV";
+constexpr char arm64_supported_types[] = "ZBSCILVJ";
// (x84_64) We still have troubles with compiling longs/doubles/floats
constexpr char x86_64_supported_types[] = "ZBSCILV";
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 2d1c19e..f0a9ca4 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -120,6 +120,7 @@
bool GenInlinedSqrt(CallInfo* info);
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
@@ -127,6 +128,8 @@
RegLocation rl_src2);
void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div);
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 384a008..2556788 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -998,6 +998,15 @@
#endif
}
+void ArmMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+ LOG(FATAL) << "Unexpected use GenNotLong()";
+}
+
+void ArmMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div) {
+ LOG(FATAL) << "Unexpected use GenDivRemLong()";
+}
+
void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index c3b23fd..6a6b0f6 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -298,6 +298,7 @@
kA64Mov2rr, // mov [00101010000] rm[20-16] [000000] [11111] rd[4-0].
kA64Mvn2rr, // mov [00101010001] rm[20-16] [000000] [11111] rd[4-0].
kA64Mul3rrr, // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0].
+ kA64Msub4rrrr, // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0].
kA64Neg3rro, // neg alias of "sub arg0, rzr, arg1, arg2".
kA64Orr3Rrl, // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Orr4rrro, // orr [s0101010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 656f8fd..4a0c055 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -422,6 +422,10 @@
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"mul", "!0r, !1r, !2r", kFixupNone),
+ ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000),
+ kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 14, 10,
+ kFmtRegR, 20, 16, IS_QUAD_OP | REG_DEF0_USE123,
+ "msub", "!0r, !1r, !3r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Neg3rro), SF_VARIANTS(0x4b0003e0),
kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtShift, -1, -1,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index f7a0199..2e3ef86 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -94,8 +94,7 @@
tab_rec->anchor = switch_label;
// Add displacement to base branch address and go!
- OpRegRegRegShift(kOpAdd, r_base.GetReg(), r_base.GetReg(), r_disp.GetReg(),
- ENCODE_NO_SHIFT, true);
+ OpRegRegRegShift(kOpAdd, r_base, r_base, r_disp, ENCODE_NO_SHIFT);
NewLIR1(kA64Br1x, r_base.GetReg());
// Loop exit label.
@@ -148,8 +147,7 @@
tab_rec->anchor = switch_label;
// Add displacement to base branch address and go!
- OpRegRegRegShift(kOpAdd, branch_reg.GetReg(), branch_reg.GetReg(), disp_reg.GetReg(),
- ENCODE_NO_SHIFT, true);
+ OpRegRegRegShift(kOpAdd, branch_reg, branch_reg, disp_reg, ENCODE_NO_SHIFT);
NewLIR1(kA64Br1x, branch_reg.GetReg());
// branch_over target here
@@ -334,7 +332,7 @@
if (!skip_overflow_check) {
LoadWordDisp(rs_rA64_SELF, Thread::StackEndOffset<8>().Int32Value(), rs_x12);
- OpRegImm64(kOpSub, rs_rA64_SP, frame_size_, /*is_wide*/true);
+ OpRegImm64(kOpSub, rs_rA64_SP, frame_size_);
if (Runtime::Current()->ExplicitStackOverflowChecks()) {
/* Load stack limit */
// TODO(Arm64): fix the line below:
@@ -348,7 +346,7 @@
MarkPossibleStackOverflowException();
}
} else if (frame_size_ > 0) {
- OpRegImm64(kOpSub, rs_rA64_SP, frame_size_, /*is_wide*/true);
+ OpRegImm64(kOpSub, rs_rA64_SP, frame_size_);
}
/* Need to spill any FP regs? */
@@ -391,7 +389,7 @@
UnSpillCoreRegs(rs_rA64_SP, spill_offset, core_spill_mask_);
}
- OpRegImm64(kOpAdd, rs_rA64_SP, frame_size_, /*is_wide*/true);
+ OpRegImm64(kOpAdd, rs_rA64_SP, frame_size_);
NewLIR0(kA64Ret);
}
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 350e483..fddbfd7 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -93,6 +93,8 @@
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
// Required for target - Dalvik-level generators.
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation lr_shift);
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
@@ -120,6 +122,8 @@
bool GenInlinedSqrt(CallInfo* info);
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
@@ -127,6 +131,8 @@
RegLocation rl_src2);
void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div);
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -170,7 +176,7 @@
LIR* OpReg(OpKind op, RegStorage r_dest_src);
void OpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value, bool is_wide);
+ LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
@@ -191,8 +197,8 @@
LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift,
- bool is_wide = false);
+ LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ int shift);
LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
static const ArmEncodingMap EncodingMap[kA64Last];
int EncodeShift(int code, int amount);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b0f5904..38f110e 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -53,10 +53,36 @@
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
- NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
- NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
+ NewLIR4(WIDE(kA64Csinc4rrrc), rl_result.reg.GetReg(), rxzr, rxzr, kArmCondEq);
+ NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_result.reg.GetReg(),
rl_result.reg.GetReg(), kArmCondLe);
- StoreValue(rl_dest, rl_result);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift) {
+ OpKind op = kOpBkpt;
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case: " << opcode;
+ }
+ rl_shift = LoadValueWide(rl_shift, kCoreReg);
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_shift.reg);
+ StoreValueWide(rl_dest, rl_result);
}
void Arm64Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
@@ -69,7 +95,7 @@
LIR* branch = NewLIR2(WIDE(opcode), rl_src1.reg.GetLowReg(), 0);
branch->target = taken;
} else {
- OpRegImm64(kOpCmp, rl_src1.reg, val, /*is_wide*/true);
+ OpRegImm64(kOpCmp, rl_src1.reg, val);
OpCondBranch(ccode, taken);
}
}
@@ -219,7 +245,8 @@
ArmConditionCode arm_cond = ArmConditionEncoding(cond);
if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
- branch = NewLIR2(opcode, reg.GetReg(), 0);
+ ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+ branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
} else {
OpRegImm(kOpCmp, reg, check_value);
branch = NewLIR2(kA64B2ct, arm_cond, 0);
@@ -354,19 +381,16 @@
NewLIR4(kA64Smaddl4xwwx, r_lo.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
switch (pattern) {
case Divide3:
- OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi.GetReg(),
- rl_src.reg.GetReg(), EncodeShift(kA64Asr, 31));
+ OpRegRegRegShift(kOpSub, rl_result.reg, r_hi, rl_src.reg, EncodeShift(kA64Asr, 31));
break;
case Divide5:
OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
- EncodeShift(kA64Asr, magic_table[lit].shift));
+ OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift));
break;
case Divide7:
OpRegReg(kOpAdd, r_hi, rl_src.reg);
OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
- EncodeShift(kA64Asr, magic_table[lit].shift));
+ OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift));
break;
default:
LOG(FATAL) << "Unexpected pattern: " << pattern;
@@ -405,25 +429,30 @@
return rl_result;
}
-RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
+RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
bool is_div) {
+ CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
+
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- // Simple case, use sdiv instruction.
- OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
+ OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
} else {
- // Remainder case, use the following code:
- // temp = reg1 / reg2 - integer division
- // temp = temp * reg2
- // dest = reg1 - temp
-
- RegStorage temp = AllocTemp();
- OpRegRegReg(kOpDiv, temp, reg1, reg2);
- OpRegReg(kOpMul, temp, reg2);
- OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
+ // temp = r_src1 / r_src2
+ // dest = r_src1 - temp * r_src2
+ RegStorage temp;
+ ArmOpcode wide;
+ if (rl_result.reg.Is64Bit()) {
+ temp = AllocTempWide();
+ wide = WIDE(0);
+ } else {
+ temp = AllocTemp();
+ wide = UNWIDE(0);
+ }
+ OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
+ NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
+ r_src1.GetReg(), r_src2.GetReg());
FreeTemp(temp);
}
-
return rl_result;
}
@@ -684,17 +713,14 @@
void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
- EncodeShift(kA64Lsl, second_bit - first_bit));
+ OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
if (first_bit != 0) {
OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
- DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
- OpRegImm64(kOpCmp, reg, 0, /*is_wide*/true);
- GenDivZeroCheck(kCondEq);
+ LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
}
// Test suspend flag, return target of taken suspend branch
@@ -756,33 +782,51 @@
#endif
}
-void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
- rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- RegStorage z_reg = AllocTemp();
- LoadConstantNoClobber(z_reg, 0);
- // Check for destructive overlap
- if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
- RegStorage t_reg = AllocTemp();
- OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
- OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
- FreeTemp(t_reg);
- } else {
- OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
- OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
- }
- FreeTemp(z_reg);
+void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+ RegLocation rl_result;
+
+ rl_src = LoadValue(rl_src, kCoreReg);
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 31);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
+ RegLocation rl_result;
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ GenDivZeroCheck(rl_src2.reg);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
StoreValueWide(rl_dest, rl_result);
}
void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) {
RegLocation rl_result;
+
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegRegRegShift(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg(),
- ENCODE_NO_SHIFT, /*is_wide*/ true);
+ OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
+ RegLocation rl_result;
+
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+ RegLocation rl_result;
+
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
StoreValueWide(rl_dest, rl_result);
}
@@ -865,8 +909,7 @@
} else {
// No special indexed operation, lea + load w/ displacement
reg_ptr = AllocTemp();
- OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
- EncodeShift(kA64Lsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
FreeTemp(rl_index.reg);
}
rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -971,8 +1014,7 @@
rl_src = LoadValue(rl_src, reg_class);
}
if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
- EncodeShift(kA64Lsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
}
if (needs_range_check) {
if (constant_index) {
@@ -1004,167 +1046,84 @@
}
}
-
void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
- // TODO(Arm64): check this.
- UNIMPLEMENTED(WARNING);
-
- rl_src = LoadValueWide(rl_src, kCoreReg);
+ OpKind op = kOpBkpt;
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+ rl_src = LoadValueWide(rl_src, kCoreReg);
if (shift_amount == 0) {
StoreValueWide(rl_dest, rl_src);
return;
}
- if (BadOverlap(rl_src, rl_dest)) {
- GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
- return;
- }
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
- if (shift_amount == 1) {
- OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
- OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
- } else if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
- LoadConstant(rl_result.reg.GetLow(), 0);
- } else if (shift_amount > 31) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetLow(), 0);
- } else {
- OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetLowReg(),
- EncodeShift(kA64Lsr, 32 - shift_amount));
- OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
- }
+ op = kOpLsl;
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
- if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
- OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
- } else if (shift_amount > 31) {
- OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
- OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
- } else {
- RegStorage t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
- EncodeShift(kA64Lsl, 32 - shift_amount));
- FreeTemp(t_reg);
- OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
- }
+ op = kOpAsr;
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
- if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
- LoadConstant(rl_result.reg.GetHigh(), 0);
- } else if (shift_amount > 31) {
- OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetHigh(), 0);
- } else {
- RegStorage t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
- EncodeShift(kA64Lsl, 32 - shift_amount));
- FreeTemp(t_reg);
- OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
- }
+ op = kOpLsr;
break;
default:
LOG(FATAL) << "Unexpected case";
}
+ OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
StoreValueWide(rl_dest, rl_result);
}
void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- // TODO(Arm64): implement this.
- UNIMPLEMENTED(WARNING);
-
- if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
+ if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
if (!rl_src2.is_const) {
- // Don't bother with special handling for subtract from immediate.
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
+ return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
}
} else {
- // Normalize
+ // Associativity.
if (!rl_src2.is_const) {
DCHECK(rl_src1.is_const);
std::swap(rl_src1, rl_src2);
}
}
- if (BadOverlap(rl_src1, rl_dest)) {
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- }
DCHECK(rl_src2.is_const);
- // TODO(Arm64): implement this.
- // int64_t val = mir_graph_->ConstantValueWide(rl_src2);
- int32_t mod_imm_lo = -1; // ModifiedImmediate(val_lo);
- int32_t mod_imm_hi = -1; // ModifiedImmediate(val_hi);
- // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
+ OpKind op = kOpBkpt;
+ int64_t val = mir_graph_->ConstantValueWide(rl_src2);
+
switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
+ op = kOpAdd;
+ break;
case Instruction::SUB_LONG:
case Instruction::SUB_LONG_2ADDR:
- if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- }
- break;
- default:
- break;
- }
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
- switch (opcode) {
-#if 0
- case Instruction::ADD_LONG:
- case Instruction::ADD_LONG_2ADDR:
- NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
- NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
- break;
- case Instruction::OR_LONG:
- case Instruction::OR_LONG_2ADDR:
- if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
- OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
- }
- if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
- OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
- }
- break;
- case Instruction::XOR_LONG:
- case Instruction::XOR_LONG_2ADDR:
- OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
- OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
+ op = kOpSub;
break;
case Instruction::AND_LONG:
case Instruction::AND_LONG_2ADDR:
- if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
- OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
- }
- if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
- OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
- }
+ op = kOpAnd;
break;
- case Instruction::SUB_LONG_2ADDR:
- case Instruction::SUB_LONG:
- NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
- NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ op = kOpOr;
break;
-#endif
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ op = kOpXor;
+ break;
default:
- LOG(FATAL) << "Unexpected opcode " << opcode;
+ LOG(FATAL) << "Unexpected opcode";
}
+
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegImm(op, rl_result.reg, rl_src1.reg, val);
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 2b1c5e8..808060d 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -606,7 +606,7 @@
GrowableArray<RegisterInfo*>::Iterator fp_it(®_pool_->sp_regs_);
for (RegisterInfo* info = fp_it.Next(); info != nullptr; info = fp_it.Next()) {
int fp_reg_num = info->GetReg().GetRegNum();
- RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | fp_reg_num);
+ RegStorage dp_reg = RegStorage::FloatSolo64(fp_reg_num);
RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
// Double precision register's master storage should refer to itself.
DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
@@ -616,6 +616,20 @@
DCHECK_EQ(info->StorageMask(), 0x1U);
}
+ // Alias 32bit W registers to corresponding 64bit X registers.
+ GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_);
+ for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
+ int x_reg_num = info->GetReg().GetRegNum();
+ RegStorage x_reg = RegStorage::Solo64(x_reg_num);
+ RegisterInfo* x_reg_info = GetRegInfo(x_reg);
+ // 64bit X register's master storage should refer to itself.
+ DCHECK_EQ(x_reg_info, x_reg_info->Master());
+ // Redirect 32bit W master storage to 64bit X.
+ info->SetMaster(x_reg_info);
+ // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+
// TODO: re-enable this when we can safely save r4 over the suspension code path.
bool no_suspend = NO_SUSPEND; // || !Runtime::Current()->ExplicitSuspendChecks();
if (no_suspend) {
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 39e9fad..eca0d2f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -408,7 +408,7 @@
DCHECK_EQ(shift, ENCODE_NO_SHIFT);
return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
default:
- return OpRegRegRegShift(op, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
+ return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
}
DCHECK(!IsPseudoLirOp(opcode));
@@ -445,8 +445,8 @@
return NULL;
}
-LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
- int r_src2, int shift, bool is_wide) {
+LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
+ RegStorage r_src2, int shift) {
ArmOpcode opcode = kA64Brk1d;
switch (op) {
@@ -500,21 +500,24 @@
// The instructions above belong to two kinds:
// - 4-operands instructions, where the last operand is a shift/extend immediate,
// - 3-operands instructions with no shift/extend.
- ArmOpcode widened_opcode = (is_wide) ? WIDE(opcode) : opcode;
+ ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
+ CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
+ CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
if (EncodingMap[opcode].flags & IS_QUAD_OP) {
DCHECK_EQ(shift, ENCODE_NO_SHIFT);
- return NewLIR4(widened_opcode, r_dest, r_src1, r_src2, shift);
+ return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
} else {
DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
DCHECK_EQ(shift, ENCODE_NO_SHIFT);
- return NewLIR3(widened_opcode, r_dest, r_src1, r_src2);
+ return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
}
}
LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
- return OpRegRegRegShift(op, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), ENCODE_NO_SHIFT);
+ return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
}
+// Should be taking an int64_t value ?
LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
LIR* res;
bool neg = (value < 0);
@@ -523,6 +526,7 @@
ArmOpcode alt_opcode = kA64Brk1d;
int32_t log_imm = -1;
bool is_wide = r_dest.Is64Bit();
+ CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
switch (op) {
@@ -610,11 +614,11 @@
}
LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
- return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value), /*is_wide*/false);
+ return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
}
-LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value, bool is_wide) {
- ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
+LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
+ ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
ArmOpcode opcode = kA64Brk1d;
ArmOpcode neg_opcode = kA64Brk1d;
bool shift;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index de55a05..7e3c8ce 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1595,7 +1595,7 @@
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegReg(op, rl_result.reg, rl_src1.reg);
} else {
- if (shift_op) {
+ if ((shift_op) && (cu_->instruction_set != kArm64)) {
rl_src2 = LoadValue(rl_src2, kCoreReg);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
@@ -1613,7 +1613,7 @@
StoreValue(rl_dest, rl_result);
} else {
bool done = false; // Set to true if we happen to find a way to use a real instruction.
- if (cu_->instruction_set == kMips) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
@@ -1889,7 +1889,7 @@
}
bool done = false;
- if (cu_->instruction_set == kMips) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
done = true;
@@ -1952,6 +1952,10 @@
switch (opcode) {
case Instruction::NOT_LONG:
+ if (cu->instruction_set == kArm64) {
+ mir_to_lir->GenNotLong(rl_dest, rl_src2);
+ return;
+ }
rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg);
rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true);
// Check for destructive overlap
@@ -1998,6 +2002,10 @@
break;
case Instruction::DIV_LONG:
case Instruction::DIV_LONG_2ADDR:
+ if (cu->instruction_set == kArm64) {
+ mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+ return;
+ }
call_out = true;
check_zero = true;
ret_reg = mir_to_lir->TargetReg(kRet0).GetReg();
@@ -2005,6 +2013,10 @@
break;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
+ if (cu->instruction_set == kArm64) {
+ mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+ return;
+ }
call_out = true;
check_zero = true;
func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod);
@@ -2014,7 +2026,8 @@
break;
case Instruction::AND_LONG_2ADDR:
case Instruction::AND_LONG:
- if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
+ if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
+ cu->instruction_set == kArm64) {
return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
}
first_op = kOpAnd;
@@ -2022,7 +2035,8 @@
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
- if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
+ if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
+ cu->instruction_set == kArm64) {
mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
return;
}
@@ -2031,7 +2045,8 @@
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
- if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
+ if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
+ cu->instruction_set == kArm64) {
mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
return;
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 2b57b35..e462173 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -118,6 +118,7 @@
bool GenInlinedSqrt(CallInfo* info);
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
@@ -125,6 +126,8 @@
RegLocation rl_src2);
void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div);
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 55e93d7..beaf6bb 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -431,6 +431,15 @@
StoreValueWide(rl_dest, rl_result);
}
+void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+ LOG(FATAL) << "Unexpected use GenNotLong()";
+}
+
+void MipsMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div) {
+ LOG(FATAL) << "Unexpected use GenDivRemLong()";
+}
+
void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 3584c33..4cebb7c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -775,7 +775,7 @@
RegLocation rl_src2, LIR* taken, LIR* fall_through);
void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
LIR* taken, LIR* fall_through);
- void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src);
void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
@@ -800,7 +800,7 @@
void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
- void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift);
void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src, int lit);
@@ -1170,6 +1170,7 @@
virtual bool GenInlinedSqrt(CallInfo* info) = 0;
virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
+ virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0;
virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
@@ -1177,6 +1178,8 @@
RegLocation rl_src2) = 0;
virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
+ virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div) = 0;
virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) = 0;
virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 2c51c1f..8c0f2bb 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -447,8 +447,11 @@
reg = FindLiveReg(wide ? reg_pool_->dp_regs_ : reg_pool_->sp_regs_, s_reg);
}
if (!reg.Valid() && (reg_class != kFPReg)) {
- // TODO: add 64-bit core pool similar to above.
- reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
+ if (Is64BitInstructionSet(cu_->instruction_set)) {
+ reg = FindLiveReg(wide ? reg_pool_->core64_regs_ : reg_pool_->core_regs_, s_reg);
+ } else {
+ reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
+ }
}
if (reg.Valid()) {
if (wide && !reg.IsFloat() && !Is64BitInstructionSet(cu_->instruction_set)) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 3070edd..72cdbbd 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -118,6 +118,7 @@
bool GenInlinedSqrt(CallInfo* info);
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
@@ -125,6 +126,8 @@
RegLocation rl_src2);
void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div);
// TODO: collapse reg_lo, reg_hi
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a6ccc99..b70922c 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1372,6 +1372,15 @@
GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
}
+void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+ LOG(FATAL) << "Unexpected use GenNotLong()";
+}
+
+void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div) {
+ LOG(FATAL) << "Unexpected use GenDivRemLong()";
+}
+
void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = ForceTempWide(rl_src);
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 3387c50..df21343 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -312,7 +312,7 @@
case k256BitSolo: return 32;
case k512BitSolo: return 64;
case k1024BitSolo: return 128;
- default: LOG(FATAL) << "Unexpected shap";
+ default: LOG(FATAL) << "Unexpected shape";
}
return 0;
}
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 2a5c7d1..cb9f53b 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -84,12 +84,6 @@
// Double-precision FP arithmetics.
extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
// Intrinsic entrypoints.
extern "C" int32_t __memcmp16(void*, void*, int32_t);
extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
@@ -199,10 +193,10 @@
qpoints->pF2l = NULL;
qpoints->pLdiv = NULL;
qpoints->pLmod = NULL;
- qpoints->pLmul = art_quick_mul_long;
- qpoints->pShlLong = art_quick_shl_long;
- qpoints->pShrLong = art_quick_shr_long;
- qpoints->pUshrLong = art_quick_ushr_long;
+ qpoints->pLmul = NULL;
+ qpoints->pShlLong = NULL;
+ qpoints->pShrLong = NULL;
+ qpoints->pUshrLong = NULL;
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ac922dd..7f31fb6 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1611,10 +1611,6 @@
UNIMPLEMENTED art_quick_instrumentation_entry
UNIMPLEMENTED art_quick_instrumentation_exit
UNIMPLEMENTED art_quick_deoptimize
-UNIMPLEMENTED art_quick_mul_long
-UNIMPLEMENTED art_quick_shl_long
-UNIMPLEMENTED art_quick_shr_long
-UNIMPLEMENTED art_quick_ushr_long
UNIMPLEMENTED art_quick_indexof
/*