summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk6
-rw-r--r--compiler/dex/compiler_enums.h1
-rw-r--r--compiler/dex/mir_optimization.cc62
-rw-r--r--compiler/dex/quick/arm/arm_lir.h1
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc33
-rw-r--r--compiler/dex/quick/arm/call_arm.cc34
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h9
-rw-r--r--compiler/dex/quick/arm/int_arm.cc2
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc277
-rw-r--r--compiler/dex/quick/codegen_util.cc26
-rw-r--r--compiler/dex/quick/gen_common.cc6
-rw-r--r--compiler/dex/quick/mir_to_lir.cc47
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h1
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc20
-rw-r--r--compiler/dex/verification_results.cc9
-rw-r--r--compiler/dex/verification_results.h1
-rw-r--r--compiler/driver/compiler_driver-inl.h6
-rw-r--r--compiler/driver/compiler_driver.cc221
-rw-r--r--compiler/driver/compiler_driver.h7
-rw-r--r--compiler/driver/compiler_options.h17
-rw-r--r--compiler/driver/dex_compilation_unit.h6
-rw-r--r--compiler/image_writer.cc7
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/optimizing/builder.cc161
-rw-r--r--compiler/optimizing/builder.h10
-rw-r--r--compiler/optimizing/code_generator.cc32
-rw-r--r--compiler/optimizing/code_generator.h9
-rw-r--r--compiler/optimizing/code_generator_arm.cc580
-rw-r--r--compiler/optimizing/code_generator_arm64.cc411
-rw-r--r--compiler/optimizing/code_generator_arm64.h9
-rw-r--r--compiler/optimizing/code_generator_x86.cc641
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc614
-rw-r--r--compiler/optimizing/find_loops_test.cc2
-rw-r--r--compiler/optimizing/gvn.cc98
-rw-r--r--compiler/optimizing/gvn.h72
-rw-r--r--compiler/optimizing/gvn_test.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc9
-rw-r--r--compiler/optimizing/linearize_test.cc2
-rw-r--r--compiler/optimizing/live_ranges_test.cc2
-rw-r--r--compiler/optimizing/liveness_test.cc2
-rw-r--r--compiler/optimizing/locations.h9
-rw-r--r--compiler/optimizing/nodes.cc59
-rw-r--r--compiler/optimizing/nodes.h43
-rw-r--r--compiler/optimizing/optimizing_compiler.cc39
-rw-r--r--compiler/optimizing/register_allocator.cc157
-rw-r--r--compiler/optimizing/register_allocator_test.cc6
-rw-r--r--compiler/optimizing/ssa_builder.h14
-rw-r--r--compiler/trampolines/trampoline_compiler.cc1
-rw-r--r--compiler/utils/arena_allocator.cc9
-rw-r--r--compiler/utils/arena_allocator.h11
-rw-r--r--compiler/utils/arm/assembler_arm.cc30
-rw-r--r--compiler/utils/arm/assembler_arm.h41
-rw-r--r--compiler/utils/arm/assembler_arm32.cc51
-rw-r--r--compiler/utils/arm/assembler_arm32.h7
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc11
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc110
-rw-r--r--compiler/utils/arm/assembler_thumb2.h6
-rw-r--r--compiler/utils/arm/assembler_thumb2_test.cc10
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc12
-rw-r--r--compiler/utils/x86/assembler_x86.cc31
-rw-r--r--compiler/utils/x86/assembler_x86.h5
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc88
-rw-r--r--compiler/utils/x86/constants_x86.h3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc42
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h4
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc8
-rw-r--r--compiler/utils/x86_64/constants_x86_64.h3
68 files changed, 2868 insertions, 1403 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 84176a177b..70c7e521a6 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -279,7 +279,11 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
# Vixl assembly support for ARM64 targets.
- LOCAL_SHARED_LIBRARIES += libvixl
+ ifeq ($$(art_ndebug_or_debug),debug)
+ LOCAL_SHARED_LIBRARIES += libvixld
+ else
+ LOCAL_SHARED_LIBRARIES += libvixl
+ endif
ifeq ($$(art_target_or_host),target)
# For atrace.
LOCAL_SHARED_LIBRARIES += libcutils
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 4d377df09a..3b3170e511 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -614,7 +614,6 @@ enum FixupKind {
kFixupVLoad, // FP load which *may* be pc-relative.
kFixupCBxZ, // Cbz, Cbnz.
kFixupTBxZ, // Tbz, Tbnz.
- kFixupPushPop, // Not really pc relative, but changes size based on args.
kFixupCondBranch, // Conditional branch
kFixupT1Branch, // Thumb1 Unconditional branch
kFixupT2Branch, // Thumb2 Unconditional branch
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 1f630f78bb..55f2abcd3f 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -399,6 +399,28 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
return compiler_temp;
}
+static bool EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
+ bool is_taken;
+ switch (opcode) {
+ case Instruction::IF_EQ: is_taken = (src1 == src2); break;
+ case Instruction::IF_NE: is_taken = (src1 != src2); break;
+ case Instruction::IF_LT: is_taken = (src1 < src2); break;
+ case Instruction::IF_GE: is_taken = (src1 >= src2); break;
+ case Instruction::IF_GT: is_taken = (src1 > src2); break;
+ case Instruction::IF_LE: is_taken = (src1 <= src2); break;
+ case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
+ case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
+ case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
+ case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
+ case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
+ case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
+ default:
+ LOG(FATAL) << "Unexpected opcode " << opcode;
+ UNREACHABLE();
+ }
+ return is_taken;
+}
+
/* Do some MIR-level extended basic block optimizations */
bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
if (bb->block_type == kDead) {
@@ -424,6 +446,46 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Look for interesting opcodes, skip otherwise
Instruction::Code opcode = mir->dalvikInsn.opcode;
switch (opcode) {
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ if (!IsConst(mir->ssa_rep->uses[1])) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ:
+ // Result known at compile time?
+ if (IsConst(mir->ssa_rep->uses[0])) {
+ int32_t rhs = (mir->ssa_rep->num_uses == 2) ? ConstantValue(mir->ssa_rep->uses[1]) : 0;
+ bool is_taken = EvaluateBranch(opcode, ConstantValue(mir->ssa_rep->uses[0]), rhs);
+ BasicBlockId edge_to_kill = is_taken ? bb->fall_through : bb->taken;
+ if (is_taken) {
+ // Replace with GOTO.
+ bb->fall_through = NullBasicBlockId;
+ mir->dalvikInsn.opcode = Instruction::GOTO;
+ mir->dalvikInsn.vA =
+ IsInstructionIfCc(opcode) ? mir->dalvikInsn.vC : mir->dalvikInsn.vB;
+ } else {
+ // Make NOP.
+ bb->taken = NullBasicBlockId;
+ mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ }
+ mir->ssa_rep->num_uses = 0;
+ BasicBlock* successor_to_unlink = GetBasicBlock(edge_to_kill);
+ successor_to_unlink->ErasePredecessor(bb->id);
+ if (successor_to_unlink->predecessors.empty()) {
+ successor_to_unlink->KillUnreachable(this);
+ }
+ }
+ break;
case Instruction::CMPL_FLOAT:
case Instruction::CMPL_DOUBLE:
case Instruction::CMPG_FLOAT:
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 4c7f87433d..b9d9a1111d 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -488,6 +488,7 @@ enum ArmOpcode {
kThumb2BicRRI8M, // bic rd, rn, #<const> [11110] i [000010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
kThumb2AndRRI8M, // and rd, rn, #<const> [11110] i [000000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
kThumb2OrrRRI8M, // orr rd, rn, #<const> [11110] i [000100] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
+ kThumb2OrnRRI8M, // orn rd, rn, #<const> [11110] i [000110] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
kThumb2EorRRI8M, // eor rd, rn, #<const> [11110] i [001000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
kThumb2AddRRI8M, // add rd, rn, #<const> [11110] i [010001] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
kThumb2AdcRRI8M, // adc rd, rn, #<const> [11110] i [010101] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 4e20d76604..de93e2602b 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -671,12 +671,12 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
- | IS_LOAD | NEEDS_FIXUP, "pop", "<!0R>", 4, kFixupPushPop),
+ | IS_LOAD, "pop", "<!0R>", 4, kFixupNone),
ENCODING_MAP(kThumb2Push, 0xe92d0000,
kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
- | IS_STORE | NEEDS_FIXUP, "push", "<!0R>", 4, kFixupPushPop),
+ | IS_STORE, "push", "<!0R>", 4, kFixupNone),
ENCODING_MAP(kThumb2CmpRI8M, 0xf1b00f00,
kFmtBitBlt, 19, 16, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
@@ -788,6 +788,10 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
"orr", "!0C, !1C, #!2m", 4, kFixupNone),
+ ENCODING_MAP(kThumb2OrnRRI8M, 0xf0600000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "orn", "!0C, !1C, #!2m", 4, kFixupNone),
ENCODING_MAP(kThumb2EorRRI8M, 0xf0800000,
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
@@ -1396,31 +1400,6 @@ void ArmMir2Lir::AssembleLIR() {
}
break;
}
- case kFixupPushPop: {
- if (__builtin_popcount(lir->operands[0]) == 1) {
- /*
- * The standard push/pop multiple instruction
- * requires at least two registers in the list.
- * If we've got just one, switch to the single-reg
- * encoding.
- */
- lir->opcode = (lir->opcode == kThumb2Push) ? kThumb2Push1 :
- kThumb2Pop1;
- int reg = 0;
- while (lir->operands[0]) {
- if (lir->operands[0] & 0x1) {
- break;
- } else {
- reg++;
- lir->operands[0] >>= 1;
- }
- }
- lir->operands[0] = reg;
- // This won't change again, don't bother unlinking, just reset fixup kind
- lir->flags.fixup = kFixupNone;
- }
- break;
- }
case kFixupCondBranch: {
LIR *target_lir = lir->target;
int32_t delta = 0;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index f15d707a5c..99b2166030 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -347,7 +347,20 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
}
}
/* Spill core callee saves */
- NewLIR1(kThumb2Push, core_spill_mask_);
+ if (core_spill_mask_ == 0u) {
+ // Nothing to spill.
+ } else if ((core_spill_mask_ & ~(0xffu | (1u << rs_rARM_LR.GetRegNum()))) == 0u) {
+ // Spilling only low regs and/or LR, use 16-bit PUSH.
+ constexpr int lr_bit_shift = rs_rARM_LR.GetRegNum() - 8;
+ NewLIR1(kThumbPush,
+ (core_spill_mask_ & ~(1u << rs_rARM_LR.GetRegNum())) |
+ ((core_spill_mask_ & (1u << rs_rARM_LR.GetRegNum())) >> lr_bit_shift));
+ } else if (IsPowerOfTwo(core_spill_mask_)) {
+ // kThumb2Push cannot be used to spill a single register.
+ NewLIR1(kThumb2Push1, CTZ(core_spill_mask_));
+ } else {
+ NewLIR1(kThumb2Push, core_spill_mask_);
+ }
/* Need to spill any FP regs? */
if (num_fp_spills_) {
/*
@@ -444,13 +457,26 @@ void ArmMir2Lir::GenExitSequence() {
if (num_fp_spills_) {
NewLIR1(kThumb2VPopCS, num_fp_spills_);
}
- if (core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) {
+ if ((core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) != 0) {
/* Unspill rARM_LR to rARM_PC */
core_spill_mask_ &= ~(1 << rs_rARM_LR.GetRegNum());
core_spill_mask_ |= (1 << rs_rARM_PC.GetRegNum());
}
- NewLIR1(kThumb2Pop, core_spill_mask_);
- if (!(core_spill_mask_ & (1 << rs_rARM_PC.GetRegNum()))) {
+ if (core_spill_mask_ == 0u) {
+ // Nothing to unspill.
+ } else if ((core_spill_mask_ & ~(0xffu | (1u << rs_rARM_PC.GetRegNum()))) == 0u) {
+ // Unspilling only low regs and/or PC, use 16-bit POP.
+ constexpr int pc_bit_shift = rs_rARM_PC.GetRegNum() - 8;
+ NewLIR1(kThumbPop,
+ (core_spill_mask_ & ~(1u << rs_rARM_PC.GetRegNum())) |
+ ((core_spill_mask_ & (1u << rs_rARM_PC.GetRegNum())) >> pc_bit_shift));
+ } else if (IsPowerOfTwo(core_spill_mask_)) {
+ // kThumb2Pop cannot be used to unspill a single register.
+ NewLIR1(kThumb2Pop1, CTZ(core_spill_mask_));
+ } else {
+ NewLIR1(kThumb2Pop, core_spill_mask_);
+ }
+ if ((core_spill_mask_ & (1 << rs_rARM_PC.GetRegNum())) == 0) {
/* We didn't pop to rARM_PC, so must do a bv rARM_LR */
NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index e8d0c32ffd..0bc4c3b7bf 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -250,10 +250,11 @@ class ArmMir2Lir FINAL : public Mir2Lir {
int EncodeShift(int code, int amount);
int ModifiedImmediate(uint32_t value);
ArmConditionCode ArmConditionEncoding(ConditionCode code);
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
+ bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+ bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) OVERRIDE;
+ bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+ bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+ bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
RegStorage AllocPreservedDouble(int s_reg);
RegStorage AllocPreservedSingle(int s_reg);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index e38dbf5a8d..1a7b4395d8 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -592,7 +592,6 @@ bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
- GetEasyMultiplyOp(lit, &ops[0]);
if (GetEasyMultiplyOp(lit, &ops[0])) {
ops[1].op = kOpInvalid;
ops[1].shift = 0;
@@ -1162,6 +1161,7 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
// Check for destructive overlap
if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
RegStorage t_reg = AllocTemp();
+ OpRegCopy(t_reg, rl_result.reg.GetLow());
OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
FreeTemp(t_reg);
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 0c7812ba09..36d065f0db 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -97,31 +97,11 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
return load_pc_rel;
}
-static int LeadingZeros(uint32_t val) {
- uint32_t alt;
- int32_t n;
- int32_t count;
-
- count = 16;
- n = 32;
- do {
- alt = val >> count;
- if (alt != 0) {
- n = n - count;
- val = alt;
- }
- count >>= 1;
- } while (count);
- return n - val;
-}
-
/*
* Determine whether value can be encoded as a Thumb2 modified
* immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
*/
int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
- int32_t z_leading;
- int32_t z_trailing;
uint32_t b0 = value & 0xff;
/* Note: case of value==0 must use 0:000:0:0000000 encoding */
@@ -135,8 +115,8 @@ int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
if (value == ((b0 << 24) | (b0 << 8)))
return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
/* Can we do it with rotation? */
- z_leading = LeadingZeros(value);
- z_trailing = 32 - LeadingZeros(~value & (value - 1));
+ int z_leading = CLZ(value);
+ int z_trailing = CTZ(value);
/* A run of eight or fewer active bits? */
if ((z_leading + z_trailing) < 24)
return -1; /* No - bail */
@@ -152,6 +132,71 @@ bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) {
return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
}
+bool ArmMir2Lir::InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+ switch (opcode) {
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ if ((value >> 12) == (value >> 31)) { // Signed 12-bit, RRI12 versions of ADD/SUB.
+ return true;
+ }
+ FALLTHROUGH_INTENDED;
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(-value) >= 0);
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ return true;
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ if ((value >> 16) == 0) {
+ return true; // movw, 16-bit unsigned.
+ }
+ FALLTHROUGH_INTENDED;
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::AND_INT_LIT16:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::OR_INT_LIT8:
+ return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::XOR_INT_LIT8:
+ return (ModifiedImmediate(value) >= 0);
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::REM_INT_LIT16: {
+ EasyMultiplyOp ops[2];
+ return GetEasyMultiplyTwoOps(value, ops);
+ }
+ default:
+ return false;
+ }
+}
+
bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) {
return EncodeImmSingle(value) >= 0;
}
@@ -213,10 +258,7 @@ LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
}
LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
- // This is kThumb2BCond instead of kThumbBCond for performance reasons. The assembly
- // time required for a new pass after kThumbBCond is fixed up to kThumb2BCond is
- // substantial.
- LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
+ LIR* branch = NewLIR2(kThumbBCond, 0 /* offset to be patched */,
ArmConditionEncoding(cc));
branch->target = target;
return branch;
@@ -513,7 +555,7 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, in
op = (op == kOpAdd) ? kOpSub : kOpAdd;
}
}
- if (mod_imm < 0 && (abs_value & 0x3ff) == abs_value) {
+ if (mod_imm < 0 && (abs_value >> 12) == 0) {
// This is deliberately used only if modified immediate encoding is inadequate since
// we sometimes actually use the flags for small values but not necessarily low regs.
if (op == kOpAdd)
@@ -545,6 +587,12 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, in
case kOpOr:
opcode = kThumb2OrrRRI8M;
alt_opcode = kThumb2OrrRRR;
+ if (mod_imm < 0) {
+ mod_imm = ModifiedImmediate(~value);
+ if (mod_imm >= 0) {
+ opcode = kThumb2OrnRRI8M;
+ }
+ }
break;
case kOpAnd:
if (mod_imm < 0) {
@@ -858,12 +906,12 @@ LIR* ArmMir2Lir::LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStora
*/
LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
- LIR* load = NULL;
- ArmOpcode opcode = kThumbBkpt;
+ LIR* load = nullptr;
+ ArmOpcode opcode16 = kThumbBkpt; // 16-bit Thumb opcode.
+ ArmOpcode opcode32 = kThumbBkpt; // 32-bit Thumb2 opcode.
bool short_form = false;
- bool thumb2Form = (displacement < 4092 && displacement >= 0);
bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
- int encoded_disp = displacement;
+ int scale = 0; // Used for opcode16 and some indexed loads.
bool already_generated = false;
switch (size) {
case kDouble:
@@ -891,57 +939,45 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
already_generated = true;
break;
}
+ DCHECK_EQ((displacement & 0x3), 0);
+ scale = 2;
if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
(displacement >= 0)) {
short_form = true;
- encoded_disp >>= 2;
- opcode = kThumbLdrPcRel;
+ opcode16 = kThumbLdrPcRel;
} else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
(displacement >= 0)) {
short_form = true;
- encoded_disp >>= 2;
- opcode = kThumbLdrSpRel;
- } else if (all_low && displacement < 128 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x3), 0);
- short_form = true;
- encoded_disp >>= 2;
- opcode = kThumbLdrRRI5;
- } else if (thumb2Form) {
- short_form = true;
- opcode = kThumb2LdrRRI12;
+ opcode16 = kThumbLdrSpRel;
+ } else {
+ short_form = all_low && (displacement >> (5 + scale)) == 0;
+ opcode16 = kThumbLdrRRI5;
+ opcode32 = kThumb2LdrRRI12;
}
break;
case kUnsignedHalf:
- if (all_low && displacement < 64 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x1), 0);
- short_form = true;
- encoded_disp >>= 1;
- opcode = kThumbLdrhRRI5;
- } else if (displacement < 4092 && displacement >= 0) {
- short_form = true;
- opcode = kThumb2LdrhRRI12;
- }
+ DCHECK_EQ((displacement & 0x1), 0);
+ scale = 1;
+ short_form = all_low && (displacement >> (5 + scale)) == 0;
+ opcode16 = kThumbLdrhRRI5;
+ opcode32 = kThumb2LdrhRRI12;
break;
case kSignedHalf:
- if (thumb2Form) {
- short_form = true;
- opcode = kThumb2LdrshRRI12;
- }
+ DCHECK_EQ((displacement & 0x1), 0);
+ scale = 1;
+ DCHECK_EQ(opcode16, kThumbBkpt); // Not available.
+ opcode32 = kThumb2LdrshRRI12;
break;
case kUnsignedByte:
- if (all_low && displacement < 32 && displacement >= 0) {
- short_form = true;
- opcode = kThumbLdrbRRI5;
- } else if (thumb2Form) {
- short_form = true;
- opcode = kThumb2LdrbRRI12;
- }
+ DCHECK_EQ(scale, 0); // Keep scale = 0.
+ short_form = all_low && (displacement >> (5 + scale)) == 0;
+ opcode16 = kThumbLdrbRRI5;
+ opcode32 = kThumb2LdrbRRI12;
break;
case kSignedByte:
- if (thumb2Form) {
- short_form = true;
- opcode = kThumb2LdrsbRRI12;
- }
+ DCHECK_EQ(scale, 0); // Keep scale = 0.
+ DCHECK_EQ(opcode16, kThumbBkpt); // Not available.
+ opcode32 = kThumb2LdrsbRRI12;
break;
default:
LOG(FATAL) << "Bad size: " << size;
@@ -949,12 +985,33 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
if (!already_generated) {
if (short_form) {
- load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), encoded_disp);
+ load = NewLIR3(opcode16, r_dest.GetReg(), r_base.GetReg(), displacement >> scale);
+ } else if ((displacement >> 12) == 0) { // Thumb2 form.
+ load = NewLIR3(opcode32, r_dest.GetReg(), r_base.GetReg(), displacement);
+ } else if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) &&
+ InexpensiveConstantInt(displacement & ~0x00000fff, Instruction::ADD_INT)) {
+ // In this case, using LoadIndexed would emit 3 insns (movw+movt+ldr) but we can
+ // actually do it in two because we know that the kOpAdd is a single insn. On the
+ // other hand, we introduce an extra dependency, so this is not necessarily faster.
+ if (opcode16 != kThumbBkpt && r_dest.Low8() &&
+ InexpensiveConstantInt(displacement & ~(0x1f << scale), Instruction::ADD_INT)) {
+ // We can use the 16-bit Thumb opcode for the load.
+ OpRegRegImm(kOpAdd, r_dest, r_base, displacement & ~(0x1f << scale));
+ load = NewLIR3(opcode16, r_dest.GetReg(), r_dest.GetReg(), (displacement >> scale) & 0x1f);
+ } else {
+ DCHECK_NE(opcode32, kThumbBkpt);
+ OpRegRegImm(kOpAdd, r_dest, r_base, displacement & ~0x00000fff);
+ load = NewLIR3(opcode32, r_dest.GetReg(), r_dest.GetReg(), displacement & 0x00000fff);
+ }
} else {
+ if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) ||
+ (scale != 0 && InexpensiveConstantInt(displacement, Instruction::CONST))) {
+ scale = 0; // Prefer unscaled indexing if the same number of insns.
+ }
RegStorage reg_offset = AllocTemp();
- LoadConstant(reg_offset, encoded_disp);
+ LoadConstant(reg_offset, displacement >> scale);
DCHECK(!r_dest.IsFloat());
- load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
+ load = LoadBaseIndexed(r_base, reg_offset, r_dest, scale, size);
FreeTemp(reg_offset);
}
}
@@ -1000,12 +1057,12 @@ LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_
LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- LIR* store = NULL;
- ArmOpcode opcode = kThumbBkpt;
+ LIR* store = nullptr;
+ ArmOpcode opcode16 = kThumbBkpt; // 16-bit Thumb opcode.
+ ArmOpcode opcode32 = kThumbBkpt; // 32-bit Thumb2 opcode.
bool short_form = false;
- bool thumb2Form = (displacement < 4092 && displacement >= 0);
bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
- int encoded_disp = displacement;
+ int scale = 0; // Used for opcode16 and some indexed loads.
bool already_generated = false;
switch (size) {
case kDouble:
@@ -1037,53 +1094,67 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
already_generated = true;
break;
}
+ DCHECK_EQ((displacement & 0x3), 0);
+ scale = 2;
if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
short_form = true;
- encoded_disp >>= 2;
- opcode = kThumbStrSpRel;
- } else if (all_low && displacement < 128 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x3), 0);
- short_form = true;
- encoded_disp >>= 2;
- opcode = kThumbStrRRI5;
- } else if (thumb2Form) {
- short_form = true;
- opcode = kThumb2StrRRI12;
+ opcode16 = kThumbStrSpRel;
+ } else {
+ short_form = all_low && (displacement >> (5 + scale)) == 0;
+ opcode16 = kThumbStrRRI5;
+ opcode32 = kThumb2StrRRI12;
}
break;
case kUnsignedHalf:
case kSignedHalf:
- if (all_low && displacement < 64 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x1), 0);
- short_form = true;
- encoded_disp >>= 1;
- opcode = kThumbStrhRRI5;
- } else if (thumb2Form) {
- short_form = true;
- opcode = kThumb2StrhRRI12;
- }
+ DCHECK_EQ((displacement & 0x1), 0);
+ scale = 1;
+ short_form = all_low && (displacement >> (5 + scale)) == 0;
+ opcode16 = kThumbStrhRRI5;
+ opcode32 = kThumb2StrhRRI12;
break;
case kUnsignedByte:
case kSignedByte:
- if (all_low && displacement < 32 && displacement >= 0) {
- short_form = true;
- opcode = kThumbStrbRRI5;
- } else if (thumb2Form) {
- short_form = true;
- opcode = kThumb2StrbRRI12;
- }
+ DCHECK_EQ(scale, 0); // Keep scale = 0.
+ short_form = all_low && (displacement >> (5 + scale)) == 0;
+ opcode16 = kThumbStrbRRI5;
+ opcode32 = kThumb2StrbRRI12;
break;
default:
LOG(FATAL) << "Bad size: " << size;
}
if (!already_generated) {
if (short_form) {
- store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), encoded_disp);
+ store = NewLIR3(opcode16, r_src.GetReg(), r_base.GetReg(), displacement >> scale);
+ } else if ((displacement >> 12) == 0) {
+ store = NewLIR3(opcode32, r_src.GetReg(), r_base.GetReg(), displacement);
+ } else if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) &&
+ InexpensiveConstantInt(displacement & ~0x00000fff, Instruction::ADD_INT)) {
+ // In this case, using StoreIndexed would emit 3 insns (movw+movt+str) but we can
+ // actually do it in two because we know that the kOpAdd is a single insn. On the
+ // other hand, we introduce an extra dependency, so this is not necessarily faster.
+ RegStorage r_scratch = AllocTemp();
+ if (opcode16 != kThumbBkpt && r_src.Low8() && r_scratch.Low8() &&
+ InexpensiveConstantInt(displacement & ~(0x1f << scale), Instruction::ADD_INT)) {
+ // We can use the 16-bit Thumb opcode for the load.
+ OpRegRegImm(kOpAdd, r_scratch, r_base, displacement & ~(0x1f << scale));
+ store = NewLIR3(opcode16, r_src.GetReg(), r_scratch.GetReg(),
+ (displacement >> scale) & 0x1f);
+ } else {
+ DCHECK_NE(opcode32, kThumbBkpt);
+ OpRegRegImm(kOpAdd, r_scratch, r_base, displacement & ~0x00000fff);
+ store = NewLIR3(opcode32, r_src.GetReg(), r_scratch.GetReg(), displacement & 0x00000fff);
+ }
+ FreeTemp(r_scratch);
} else {
+ if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) ||
+ (scale != 0 && InexpensiveConstantInt(displacement, Instruction::CONST))) {
+ scale = 0; // Prefer unscaled indexing if the same number of insns.
+ }
RegStorage r_scratch = AllocTemp();
- LoadConstant(r_scratch, encoded_disp);
+ LoadConstant(r_scratch, displacement >> scale);
DCHECK(!r_src.IsFloat());
- store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
+ store = StoreBaseIndexed(r_base, r_scratch, r_src, scale, size);
FreeTemp(r_scratch);
}
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 1cde01e244..00217549b0 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -775,6 +775,10 @@ void Mir2Lir::CreateNativeGcMap() {
": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
native_gc_map_builder.AddEntry(native_offset, references);
}
+
+ // Maybe not necessary, but this could help prevent errors where we access the verified method
+ // after it has been deleted.
+ mir_graph_->GetCurrentDexCompilationUnit()->ClearVerifiedMethod();
}
/* Determine the offset of each literal field */
@@ -922,28 +926,6 @@ void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
-bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
- bool is_taken;
- switch (opcode) {
- case Instruction::IF_EQ: is_taken = (src1 == src2); break;
- case Instruction::IF_NE: is_taken = (src1 != src2); break;
- case Instruction::IF_LT: is_taken = (src1 < src2); break;
- case Instruction::IF_GE: is_taken = (src1 >= src2); break;
- case Instruction::IF_GT: is_taken = (src1 > src2); break;
- case Instruction::IF_LE: is_taken = (src1 <= src2); break;
- case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
- case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
- case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
- case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
- case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
- case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
- default:
- LOG(FATAL) << "Unexpected opcode " << opcode;
- UNREACHABLE();
- }
- return is_taken;
-}
-
// Convert relation of src1/src2 to src2/src1
ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
ConditionCode res;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 9be8719b5d..774176ebb1 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -322,6 +322,12 @@ void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
+void Mir2Lir::GenLongToInt(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = UpdateLocWide(rl_src);
+ rl_src = NarrowRegLoc(rl_src);
+ StoreValue(rl_dest, rl_src);
+}
+
void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src) {
rl_src = LoadValue(rl_src, kCoreReg);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 0d30927679..320c0f4900 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -657,24 +657,12 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GT:
case Instruction::IF_LE: {
LIR* taken = &label_list[bb->taken];
- // Result known at compile time?
- if (rl_src[0].is_const && rl_src[1].is_const) {
- bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
- mir_graph_->ConstantValue(rl_src[1].orig_sreg));
- BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target_id) &&
- (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, target_id))) {
- GenSuspendTest(opt_flags);
- }
- OpUnconditionalBranch(&label_list[target_id]);
- } else {
- if (mir_graph_->IsBackwardsBranch(bb) &&
- (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
- !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
- GenSuspendTest(opt_flags);
- }
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
+ if (mir_graph_->IsBackwardsBranch(bb) &&
+ (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
+ !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
+ GenSuspendTest(opt_flags);
}
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
break;
}
case Instruction::IF_EQZ:
@@ -684,23 +672,12 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
LIR* taken = &label_list[bb->taken];
- // Result known at compile time?
- if (rl_src[0].is_const) {
- bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
- BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target_id) &&
- (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, target_id))) {
- GenSuspendTest(opt_flags);
- }
- OpUnconditionalBranch(&label_list[target_id]);
- } else {
- if (mir_graph_->IsBackwardsBranch(bb) &&
- (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
- !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
- GenSuspendTest(opt_flags);
- }
- GenCompareZeroAndBranch(opcode, rl_src[0], taken);
+ if (mir_graph_->IsBackwardsBranch(bb) &&
+ (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
+ !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
+ GenSuspendTest(opt_flags);
}
+ GenCompareZeroAndBranch(opcode, rl_src[0], taken);
break;
}
@@ -956,9 +933,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::LONG_TO_INT:
- rl_src[0] = UpdateLocWide(rl_src[0]);
- rl_src[0] = NarrowRegLoc(rl_src[0]);
- StoreValue(rl_dest, rl_src[0]);
+ GenLongToInt(rl_dest, rl_src[0]);
break;
case Instruction::INT_TO_BYTE:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6847717d8b..5d78a6e25c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -666,7 +666,6 @@ class Mir2Lir : public Backend {
void MarkBoundary(DexOffset offset, const char* inst_str);
void NopLIR(LIR* lir);
void UnlinkLIR(LIR* lir);
- bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
bool IsInexpensiveConstant(RegLocation rl_src);
ConditionCode FlipComparisonOrder(ConditionCode before);
ConditionCode NegateComparison(ConditionCode before);
@@ -811,6 +810,7 @@ class Mir2Lir : public Backend {
LIR* taken);
void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenLongToInt(RegLocation rl_dest, RegLocation rl_src);
void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src);
void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 09e1482b4d..9cb0bf53e6 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -90,6 +90,7 @@ class X86Mir2Lir : public Mir2Lir {
OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ void GenLongToInt(RegLocation rl_dest, RegLocation rl_src);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a063ce18c2..80cdc83497 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -3213,6 +3213,26 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
+void X86Mir2Lir::GenLongToInt(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = UpdateLocWide(rl_src);
+ rl_src = NarrowRegLoc(rl_src);
+ StoreValue(rl_dest, rl_src);
+
+ if (cu_->target64) {
+ // if src and dest are in the same phys reg then StoreValue generates
+ // no operation but we need explicit 32-bit mov R, R to clear
+ // the higher 32-bits
+ rl_dest = UpdateLoc(rl_dest);
+ if (rl_src.location == kLocPhysReg && rl_dest.location == kLocPhysReg
+ && IsSameReg(rl_src.reg, rl_dest.reg)) {
+ LIR* copy_lir = OpRegCopyNoInsert(rl_dest.reg, rl_dest.reg);
+ // remove nop flag set by OpRegCopyNoInsert if src == dest
+ copy_lir->flags.is_nop = false;
+ AppendLIR(copy_lir);
+ }
+ }
+}
+
void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift) {
if (!cu_->target64) {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 4929b5b2b0..932a532e56 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -84,6 +84,15 @@ const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref
return (it != verified_methods_.end()) ? it->second : nullptr;
}
+void VerificationResults::RemoveVerifiedMethod(MethodReference ref) {
+ WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
+ auto it = verified_methods_.find(ref);
+ if (it != verified_methods_.end()) {
+ delete it->second;
+ verified_methods_.erase(it);
+ }
+}
+
void VerificationResults::AddRejectedClass(ClassReference ref) {
{
WriterMutexLock mu(Thread::Current(), rejected_classes_lock_);
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 0e7923fbc3..7fc2a2363d 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -48,6 +48,7 @@ class VerificationResults {
const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
LOCKS_EXCLUDED(verified_methods_lock_);
+ void RemoveVerifiedMethod(MethodReference ref) LOCKS_EXCLUDED(verified_methods_lock_);
void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index ebf7874dcf..3a91b08d5e 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -21,7 +21,6 @@
#include "dex/compiler_ir.h"
#include "dex_compilation_unit.h"
-#include "field_helper.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
@@ -134,10 +133,9 @@ inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
} else {
// Search dex file for localized ssb index, may fail if field's class is a parent
// of the class mentioned in the dex file and there is no dex cache entry.
- StackHandleScope<1> hs(Thread::Current());
+ std::string temp;
const DexFile::StringId* string_id =
- dex_file->FindStringId(
- FieldHelper(hs.NewHandle(resolved_field)).GetDeclaringClassDescriptor());
+ dex_file->FindStringId(resolved_field->GetDeclaringClass()->GetDescriptor(&temp));
if (string_id != nullptr) {
const DexFile::TypeId* type_id =
dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 2e9f8355f2..e4274712d4 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -19,9 +19,14 @@
#define ATRACE_TAG ATRACE_TAG_DALVIK
#include <utils/Trace.h>
+#include <unordered_set>
#include <vector>
#include <unistd.h>
+#ifndef __APPLE__
+#include <malloc.h> // For mallinfo
+#endif
+
#include "base/stl_util.h"
#include "base/timing_logger.h"
#include "class_linker.h"
@@ -53,6 +58,7 @@
#include "ScopedLocalRef.h"
#include "handle_scope-inl.h"
#include "thread.h"
+#include "thread_list.h"
#include "thread_pool.h"
#include "trampolines/trampoline_compiler.h"
#include "transaction.h"
@@ -495,6 +501,7 @@ void CompilerDriver::CompileAll(jobject class_loader,
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+ VLOG(compiler) << "Before precompile " << GetMemoryUsageString();
PreCompile(class_loader, dex_files, thread_pool.get(), timings);
Compile(class_loader, dex_files, thread_pool.get(), timings);
if (dump_stats_) {
@@ -591,20 +598,25 @@ void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFi
void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
LoadImageClasses(timings);
+ VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString();
Resolve(class_loader, dex_files, thread_pool, timings);
+ VLOG(compiler) << "Resolve: " << GetMemoryUsageString();
if (!compiler_options_->IsVerificationEnabled()) {
- LOG(INFO) << "Verify none mode specified, skipping verification.";
+ VLOG(compiler) << "Verify none mode specified, skipping verification.";
SetVerified(class_loader, dex_files, thread_pool, timings);
return;
}
Verify(class_loader, dex_files, thread_pool, timings);
+ VLOG(compiler) << "Verify: " << GetMemoryUsageString();
InitializeClasses(class_loader, dex_files, thread_pool, timings);
+ VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString();
UpdateImageClasses(timings);
+ VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString();
}
bool CompilerDriver::IsImageClass(const char* descriptor) const {
@@ -626,10 +638,10 @@ bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
}
}
-static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
+static void ResolveExceptionsForMethod(MutableHandle<mirror::ArtMethod> method_handle,
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const DexFile::CodeItem* code_item = mh->GetMethod()->GetCodeItem();
+ const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
}
@@ -649,10 +661,10 @@ static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
uint16_t encoded_catch_handler_handlers_type_idx =
DecodeUnsignedLeb128(&encoded_catch_handler_list);
// Add to set of types to resolve if not already in the dex cache resolved types
- if (!mh->GetMethod()->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+ if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
exceptions_to_resolve.insert(
std::pair<uint16_t, const DexFile*>(encoded_catch_handler_handlers_type_idx,
- mh->GetMethod()->GetDexFile()));
+ method_handle->GetDexFile()));
}
// ignore address associated with catch handler
DecodeUnsignedLeb128(&encoded_catch_handler_list);
@@ -669,14 +681,14 @@ static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
std::set<std::pair<uint16_t, const DexFile*>>* exceptions_to_resolve =
reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
StackHandleScope<1> hs(Thread::Current());
- MutableMethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+ MutableHandle<mirror::ArtMethod> method_handle(hs.NewHandle<mirror::ArtMethod>(nullptr));
for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mh.ChangeMethod(c->GetVirtualMethod(i));
- ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
+ method_handle.Assign(c->GetVirtualMethod(i));
+ ResolveExceptionsForMethod(method_handle, *exceptions_to_resolve);
}
for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
- mh.ChangeMethod(c->GetDirectMethod(i));
- ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
+ method_handle.Assign(c->GetDirectMethod(i));
+ ResolveExceptionsForMethod(method_handle, *exceptions_to_resolve);
}
return true;
}
@@ -782,23 +794,143 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string
}
}
-void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void* arg) {
- DCHECK(object != nullptr);
- DCHECK(arg != nullptr);
- CompilerDriver* compiler_driver = reinterpret_cast<CompilerDriver*>(arg);
- StackHandleScope<1> hs(Thread::Current());
- MaybeAddToImageClasses(hs.NewHandle(object->GetClass()), compiler_driver->image_classes_.get());
-}
+// Keeps all the data for the update together. Also doubles as the reference visitor.
+// Note: we can use object pointers because we suspend all threads.
+class ClinitImageUpdate {
+ public:
+ static ClinitImageUpdate* Create(std::set<std::string>* image_class_descriptors, Thread* self,
+ ClassLinker* linker, std::string* error_msg) {
+ std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(image_class_descriptors, self,
+ linker));
+ if (res->art_method_class_ == nullptr) {
+ *error_msg = "Could not find ArtMethod class.";
+ return nullptr;
+ } else if (res->dex_cache_class_ == nullptr) {
+ *error_msg = "Could not find DexCache class.";
+ return nullptr;
+ }
+
+ return res.release();
+ }
+
+ ~ClinitImageUpdate() {
+ // Allow others to suspend again.
+ self_->EndAssertNoThreadSuspension(old_cause_);
+ }
+
+ // Visitor for VisitReferences.
+ void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
+ if (ref != nullptr) {
+ VisitClinitClassesObject(ref);
+ }
+ }
+
+ // java.lang.Reference visitor for VisitReferences.
+ void operator()(mirror::Class* /* klass */, mirror::Reference* /* ref */) const {
+ }
+
+ void Walk() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Use the initial classes as roots for a search.
+ for (mirror::Class* klass_root : image_classes_) {
+ VisitClinitClassesObject(klass_root);
+ }
+ }
+
+ private:
+ ClinitImageUpdate(std::set<std::string>* image_class_descriptors, Thread* self,
+ ClassLinker* linker)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ image_class_descriptors_(image_class_descriptors), self_(self) {
+ CHECK(linker != nullptr);
+ CHECK(image_class_descriptors != nullptr);
+
+ // Make sure nobody interferes with us.
+ old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure");
+
+ // Find the interesting classes.
+ art_method_class_ = linker->LookupClass(self, "Ljava/lang/reflect/ArtMethod;",
+ ComputeModifiedUtf8Hash("Ljava/lang/reflect/ArtMethod;"), nullptr);
+ dex_cache_class_ = linker->LookupClass(self, "Ljava/lang/DexCache;",
+ ComputeModifiedUtf8Hash("Ljava/lang/DexCache;"), nullptr);
+
+ // Find all the already-marked classes.
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ linker->VisitClasses(FindImageClasses, this);
+ }
+
+ static bool FindImageClasses(mirror::Class* klass, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ClinitImageUpdate* data = reinterpret_cast<ClinitImageUpdate*>(arg);
+ std::string temp;
+ const char* name = klass->GetDescriptor(&temp);
+ if (data->image_class_descriptors_->find(name) != data->image_class_descriptors_->end()) {
+ data->image_classes_.push_back(klass);
+ }
+
+ return true;
+ }
+
+ void VisitClinitClassesObject(mirror::Object* object) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(object != nullptr);
+ if (marked_objects_.find(object) != marked_objects_.end()) {
+ // Already processed.
+ return;
+ }
+
+ // Mark it.
+ marked_objects_.insert(object);
+
+ if (object->IsClass()) {
+ // If it is a class, add it.
+ StackHandleScope<1> hs(self_);
+ MaybeAddToImageClasses(hs.NewHandle(object->AsClass()), image_class_descriptors_);
+ } else {
+ // Else visit the object's class.
+ VisitClinitClassesObject(object->GetClass());
+ }
+
+ // If it is not a dex cache or an ArtMethod, visit all references.
+ mirror::Class* klass = object->GetClass();
+ if (klass != art_method_class_ && klass != dex_cache_class_) {
+ object->VisitReferences<false /* visit class */>(*this, *this);
+ }
+ }
+
+ mutable std::unordered_set<mirror::Object*> marked_objects_;
+ std::set<std::string>* const image_class_descriptors_;
+ std::vector<mirror::Class*> image_classes_;
+ const mirror::Class* art_method_class_;
+ const mirror::Class* dex_cache_class_;
+ Thread* const self_;
+ const char* old_cause_;
+
+ DISALLOW_COPY_AND_ASSIGN(ClinitImageUpdate);
+};
void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
if (IsImage()) {
TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
- // Update image_classes_ with classes for objects created by <clinit> methods.
- gc::Heap* heap = Runtime::Current()->GetHeap();
- // TODO: Image spaces only?
- ScopedObjectAccess soa(Thread::Current());
- WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
- heap->VisitObjects(FindClinitImageClassesCallback, this);
+
+ Runtime* current = Runtime::Current();
+
+ // Suspend all threads.
+ current->GetThreadList()->SuspendAll();
+
+ std::string error_msg;
+ std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(image_classes_.get(),
+ Thread::Current(),
+ current->GetClassLinker(),
+ &error_msg));
+ CHECK(update.get() != nullptr) << error_msg; // TODO: Soft failure?
+
+ // Do the marking.
+ update->Walk();
+
+ // Resume threads.
+ current->GetThreadList()->ResumeAll();
}
}
@@ -1194,11 +1326,10 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
if (no_guarantee_of_dex_cache_entry) {
- StackHandleScope<1> hs(Thread::Current());
- MethodHelper mh(hs.NewHandle(method));
// See if the method is also declared in this dex cache.
- uint32_t dex_method_idx = mh.FindDexMethodIndexInOtherDexFile(
- *target_method->dex_file, target_method->dex_method_index);
+ uint32_t dex_method_idx =
+ method->FindDexMethodIndexInOtherDexFile(*target_method->dex_file,
+ target_method->dex_method_index);
if (dex_method_idx != DexFile::kDexNoIndex) {
target_method->dex_method_index = dex_method_idx;
} else {
@@ -1820,6 +1951,12 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
mirror::Throwable* exception = soa.Self()->GetException(&throw_location);
VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
<< exception->Dump();
+ std::ostream* file_log = manager->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
+ }
soa.Self()->ClearException();
transaction.Abort();
CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
@@ -1875,6 +2012,7 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi
CHECK(dex_file != nullptr);
CompileDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
+ VLOG(compiler) << "Compile: " << GetMemoryUsageString();
}
void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
@@ -2001,6 +2139,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
bool compilation_enabled) {
CompiledMethod* compiled_method = nullptr;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
+ MethodReference method_ref(&dex_file, method_idx);
if ((access_flags & kAccNative) != 0) {
// Are we interpreting only and have support for generic JNI down calls?
@@ -2014,7 +2153,6 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
} else {
- MethodReference method_ref(&dex_file, method_idx);
bool compile = compilation_enabled &&
verification_results_->IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
@@ -2051,16 +2189,18 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
// When compiling with PIC, there should be zero non-relative linker patches
CHECK(!compile_pic || non_relative_linker_patch_count == 0u);
- MethodReference ref(&dex_file, method_idx);
- DCHECK(GetCompiledMethod(ref) == nullptr) << PrettyMethod(method_idx, dex_file);
+ DCHECK(GetCompiledMethod(method_ref) == nullptr) << PrettyMethod(method_idx, dex_file);
{
MutexLock mu(self, compiled_methods_lock_);
- compiled_methods_.Put(ref, compiled_method);
+ compiled_methods_.Put(method_ref, compiled_method);
non_relative_linker_patch_count_ += non_relative_linker_patch_count;
}
- DCHECK(GetCompiledMethod(ref) != nullptr) << PrettyMethod(method_idx, dex_file);
+ DCHECK(GetCompiledMethod(method_ref) != nullptr) << PrettyMethod(method_idx, dex_file);
}
+ // Done compiling, delete the verified method to reduce native memory usage.
+ verification_results_->RemoveVerifiedMethod(method_ref);
+
if (self->IsExceptionPending()) {
ScopedObjectAccess soa(self);
LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n"
@@ -2210,4 +2350,21 @@ bool CompilerDriver::SkipCompilation(const std::string& method_name) {
}
return !compile;
}
+
+std::string CompilerDriver::GetMemoryUsageString() const {
+ std::ostringstream oss;
+ const ArenaPool* arena_pool = GetArenaPool();
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ oss << "arena alloc=" << PrettySize(arena_pool->GetBytesAllocated());
+ oss << " java alloc=" << PrettySize(heap->GetBytesAllocated());
+#ifdef HAVE_MALLOC_H
+ struct mallinfo info = mallinfo();
+ const size_t allocated_space = static_cast<size_t>(info.uordblks);
+ const size_t free_space = static_cast<size_t>(info.fordblks);
+ oss << " native alloc=" << PrettySize(allocated_space) << " free="
+ << PrettySize(free_space);
+#endif
+ return oss.str();
+}
+
} // namespace art
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 437a1a9c0e..615e0d0db4 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -39,6 +39,7 @@
#include "thread_pool.h"
#include "utils/arena_allocator.h"
#include "utils/dedupe_set.h"
+#include "dex/verified_method.h"
namespace art {
@@ -340,6 +341,9 @@ class CompilerDriver {
ArenaPool* GetArenaPool() {
return &arena_pool_;
}
+ const ArenaPool* GetArenaPool() const {
+ return &arena_pool_;
+ }
bool WriteElf(const std::string& android_root,
bool is_host,
@@ -395,6 +399,9 @@ class CompilerDriver {
// Should the compiler run on this method given profile information?
bool SkipCompilation(const std::string& method_name);
+ // Get memory usage during compilation.
+ std::string GetMemoryUsageString() const;
+
private:
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 0592f0cf1e..aec7d241f5 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_
#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_
+#include <ostream>
#include <string>
#include <vector>
@@ -70,7 +71,8 @@ class CompilerOptions FINAL {
#ifdef ART_SEA_IR_MODE
sea_ir_mode_(false),
#endif
- verbose_methods_(nullptr) {
+ verbose_methods_(nullptr),
+ init_failure_output_(nullptr) {
}
CompilerOptions(CompilerFilter compiler_filter,
@@ -90,7 +92,8 @@ class CompilerOptions FINAL {
#ifdef ART_SEA_IR_MODE
bool sea_ir_mode,
#endif
- const std::vector<std::string>* verbose_methods
+ const std::vector<std::string>* verbose_methods,
+ std::ostream* init_failure_output
) : // NOLINT(whitespace/parens)
compiler_filter_(compiler_filter),
huge_method_threshold_(huge_method_threshold),
@@ -109,7 +112,8 @@ class CompilerOptions FINAL {
#ifdef ART_SEA_IR_MODE
sea_ir_mode_(sea_ir_mode),
#endif
- verbose_methods_(verbose_methods) {
+ verbose_methods_(verbose_methods),
+ init_failure_output_(init_failure_output) {
}
CompilerFilter GetCompilerFilter() const {
@@ -217,6 +221,10 @@ class CompilerOptions FINAL {
return false;
}
+ std::ostream* GetInitFailureOutput() const {
+ return init_failure_output_;
+ }
+
private:
CompilerFilter compiler_filter_;
const size_t huge_method_threshold_;
@@ -241,6 +249,9 @@ class CompilerOptions FINAL {
// Vector of methods to have verbose output enabled for.
const std::vector<std::string>* const verbose_methods_;
+ // Log initialization of initialization failures to this stream if not null.
+ std::ostream* const init_failure_output_;
+
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
std::ostream& operator<<(std::ostream& os, const CompilerOptions::CompilerFilter& rhs);
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 84f57991c3..03ae489da1 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -102,6 +102,10 @@ class DexCompilationUnit {
return verified_method_;
}
+ void ClearVerifiedMethod() {
+ verified_method_ = nullptr;
+ }
+
const std::string& GetSymbol();
private:
@@ -117,7 +121,7 @@ class DexCompilationUnit {
const uint16_t class_def_idx_;
const uint32_t dex_method_idx_;
const uint32_t access_flags_;
- const VerifiedMethod* const verified_method_;
+ const VerifiedMethod* verified_method_;
std::string symbol_;
};
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 3b1d914f6e..ab5c6c77de 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -77,6 +77,7 @@ bool ImageWriter::PrepareImageAddressSpace() {
Thread::Current()->TransitionFromSuspendedToRunnable();
PruneNonImageClasses(); // Remove junk
ComputeLazyFieldsForImageClasses(); // Add useful information
+ ProcessStrings();
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
}
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -561,9 +562,9 @@ void ImageWriter::ProcessStrings() {
bool is_prefix = false;
if (it != existing_strings.end()) {
CHECK_LE(length, it->second);
- is_prefix = std::equal(combined_chars.begin() + it->first,
- combined_chars.begin() + it->first + it->second,
- combined_chars.begin() + new_string.first);
+ is_prefix = std::equal(combined_chars.begin() + new_string.first,
+ combined_chars.begin() + new_string.first + new_string.second,
+ combined_chars.begin() + it->first);
}
if (is_prefix) {
// Shares a prefix, set the offset to where the new offset will be.
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a57f892c58..8a7abb4001 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -366,6 +366,8 @@ class OatWriter::Arm64RelativeCallPatcher FINAL : public ArmBaseRelativeCallPatc
Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
+ // Ensure we emit the literal pool.
+ assembler.EmitSlowPaths();
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
assembler.FinalizeInstructions(code);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index b261460690..eb6181c711 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -155,6 +155,37 @@ void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
current_block_ = nullptr;
}
+static bool ShouldSkipCompilation(const CompilerDriver& compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ size_t number_of_dex_instructions,
+ size_t number_of_blocks ATTRIBUTE_UNUSED,
+ size_t number_of_branches) {
+ const CompilerOptions& compiler_options = compiler_driver.GetCompilerOptions();
+ CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
+ if (compiler_filter == CompilerOptions::kEverything) {
+ return false;
+ }
+
+ if (compiler_options.IsHugeMethod(number_of_dex_instructions)) {
+ LOG(INFO) << "Skip compilation of huge method "
+ << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
+ *dex_compilation_unit.GetDexFile())
+ << ": " << number_of_dex_instructions << " dex instructions";
+ return true;
+ }
+
+ // If it's large and contains no branches, it's likely to be machine generated initialization.
+ if (compiler_options.IsLargeMethod(number_of_dex_instructions) && (number_of_branches == 0)) {
+ LOG(INFO) << "Skip compilation of large method with no branch "
+ << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
+ *dex_compilation_unit.GetDexFile())
+ << ": " << number_of_dex_instructions << " dex instructions";
+ return true;
+ }
+
+ return false;
+}
+
HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
@@ -171,9 +202,27 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
InitializeLocals(code_item.registers_size_);
graph_->UpdateMaximumNumberOfOutVRegs(code_item.outs_size_);
+ // Compute the number of dex instructions, blocks, and branches. We will
+ // check these values against limits given to the compiler.
+ size_t number_of_dex_instructions = 0;
+ size_t number_of_blocks = 0;
+ size_t number_of_branches = 0;
+
// To avoid splitting blocks, we compute ahead of time the instructions that
// start a new block, and create these blocks.
- ComputeBranchTargets(code_ptr, code_end);
+ ComputeBranchTargets(
+ code_ptr, code_end, &number_of_dex_instructions, &number_of_blocks, &number_of_branches);
+
+ // Note that the compiler driver is null when unit testing.
+ if (compiler_driver_ != nullptr) {
+ if (ShouldSkipCompilation(*compiler_driver_,
+ *dex_compilation_unit_,
+ number_of_dex_instructions,
+ number_of_blocks,
+ number_of_branches)) {
+ return nullptr;
+ }
+ }
// Also create blocks for catch handlers.
if (code_item.tries_size_ != 0) {
@@ -232,7 +281,11 @@ void HGraphBuilder::MaybeUpdateCurrentBlock(size_t index) {
current_block_ = block;
}
-void HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr, const uint16_t* code_end) {
+void HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
+ const uint16_t* code_end,
+ size_t* number_of_dex_instructions,
+ size_t* number_of_blocks,
+ size_t* number_of_branches) {
// TODO: Support switch instructions.
branch_targets_.SetSize(code_end - code_ptr);
@@ -245,19 +298,23 @@ void HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr, const uint16_
// the locations these instructions branch to.
size_t dex_pc = 0;
while (code_ptr < code_end) {
+ (*number_of_dex_instructions)++;
const Instruction& instruction = *Instruction::At(code_ptr);
if (instruction.IsBranch()) {
+ (*number_of_branches)++;
int32_t target = instruction.GetTargetOffset() + dex_pc;
// Create a block for the target instruction.
if (FindBlockStartingAt(target) == nullptr) {
block = new (arena_) HBasicBlock(graph_, target);
branch_targets_.Put(target, block);
+ (*number_of_blocks)++;
}
dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
block = new (arena_) HBasicBlock(graph_, dex_pc);
branch_targets_.Put(dex_pc, block);
+ (*number_of_blocks)++;
}
} else {
code_ptr += instruction.SizeInCodeUnits();
@@ -313,6 +370,15 @@ void HGraphBuilder::Binop_23x_shift(const Instruction& instruction,
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
+void HGraphBuilder::Binop_23x_cmp(const Instruction& instruction,
+ Primitive::Type type,
+ HCompare::Bias bias) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ current_block_->AddInstruction(new (arena_) HCompare(type, first, second, bias));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
template<typename T>
void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
@@ -414,38 +480,36 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
bool is_instance_call = invoke_type != kStatic;
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
- HInvoke* invoke = nullptr;
- if (invoke_type == kVirtual || invoke_type == kInterface || invoke_type == kSuper) {
- MethodReference target_method(dex_file_, method_idx);
- uintptr_t direct_code;
- uintptr_t direct_method;
- int table_index;
- InvokeType optimized_invoke_type = invoke_type;
- compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
- &optimized_invoke_type, &target_method, &table_index,
- &direct_code, &direct_method);
- if (table_index == -1) {
- return false;
- }
+ MethodReference target_method(dex_file_, method_idx);
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ int table_index;
+ InvokeType optimized_invoke_type = invoke_type;
+
+ if (!compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
+ &optimized_invoke_type, &target_method, &table_index,
+ &direct_code, &direct_method)) {
+ LOG(INFO) << "Did not compile " << PrettyMethod(method_idx, *dex_file_)
+ << " because a method call could not be resolved";
+ return false;
+ }
+ DCHECK(optimized_invoke_type != kSuper);
- if (optimized_invoke_type == kVirtual) {
- invoke = new (arena_) HInvokeVirtual(
- arena_, number_of_arguments, return_type, dex_pc, table_index);
- } else if (optimized_invoke_type == kInterface) {
- invoke = new (arena_) HInvokeInterface(
- arena_, number_of_arguments, return_type, dex_pc, method_idx, table_index);
- } else if (optimized_invoke_type == kDirect) {
- // For this compiler, sharpening only works if we compile PIC.
- DCHECK(compiler_driver_->GetCompilerOptions().GetCompilePic());
- // Treat invoke-direct like static calls for now.
- invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index);
- }
+ HInvoke* invoke = nullptr;
+ if (optimized_invoke_type == kVirtual) {
+ invoke = new (arena_) HInvokeVirtual(
+ arena_, number_of_arguments, return_type, dex_pc, table_index);
+ } else if (optimized_invoke_type == kInterface) {
+ invoke = new (arena_) HInvokeInterface(
+ arena_, number_of_arguments, return_type, dex_pc, method_idx, table_index);
} else {
- DCHECK(invoke_type == kDirect || invoke_type == kStatic);
+ DCHECK(optimized_invoke_type == kDirect || optimized_invoke_type == kStatic);
+ // Sharpening to kDirect only works if we compile PIC.
+ DCHECK((optimized_invoke_type == invoke_type) || (optimized_invoke_type != kDirect)
+ || compiler_driver_->GetCompilerOptions().GetCompilePic());
// Treat invoke-direct like static calls for now.
invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_pc, method_idx);
+ arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index);
}
size_t start_index = 0;
@@ -1034,6 +1098,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::LONG_TO_FLOAT: {
+ Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::LONG_TO_DOUBLE: {
+ Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimDouble);
+ break;
+ }
+
+ case Instruction::FLOAT_TO_INT: {
+ Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimInt);
+ break;
+ }
+
case Instruction::INT_TO_BYTE: {
Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte);
break;
@@ -1492,7 +1571,27 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
case Instruction::CMP_LONG: {
- Binop_23x<HCompare>(instruction, Primitive::kPrimLong);
+ Binop_23x_cmp(instruction, Primitive::kPrimLong, HCompare::kNoBias);
+ break;
+ }
+
+ case Instruction::CMPG_FLOAT: {
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kGtBias);
+ break;
+ }
+
+ case Instruction::CMPG_DOUBLE: {
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kGtBias);
+ break;
+ }
+
+ case Instruction::CMPL_FLOAT: {
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kLtBias);
+ break;
+ }
+
+ case Instruction::CMPL_DOUBLE: {
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kLtBias);
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 204005daa6..8519bcba60 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -80,7 +80,13 @@ class HGraphBuilder : public ValueObject {
// Finds all instructions that start a new block, and populates branch_targets_ with
// the newly created blocks.
- void ComputeBranchTargets(const uint16_t* start, const uint16_t* end);
+ // As a side effect, also compute the number of dex instructions, blocks, and
+ // branches.
+ void ComputeBranchTargets(const uint16_t* start,
+ const uint16_t* end,
+ size_t* number_of_dex_instructions,
+ size_t* number_of_block,
+ size_t* number_of_branches);
void MaybeUpdateCurrentBlock(size_t index);
HBasicBlock* FindBlockStartingAt(int32_t index) const;
@@ -107,6 +113,8 @@ class HGraphBuilder : public ValueObject {
template<typename T>
void Binop_23x_shift(const Instruction& instruction, Primitive::Type type);
+ void Binop_23x_cmp(const Instruction& instruction, Primitive::Type type, HCompare::Bias bias);
+
template<typename T>
void Binop_12x(const Instruction& instruction, Primitive::Type type);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0b593275c7..e581af22aa 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -71,11 +71,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
}
}
GenerateSlowPaths();
-
- size_t code_size = GetAssembler()->CodeSize();
- uint8_t* buffer = allocator->Allocate(code_size);
- MemoryRegion code(buffer, code_size);
- GetAssembler()->FinalizeInstructions(code);
+ Finalize(allocator);
}
void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
@@ -97,9 +93,13 @@ void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
}
}
GenerateSlowPaths();
+ Finalize(allocator);
+}
+void CodeGenerator::Finalize(CodeAllocator* allocator) {
size_t code_size = GetAssembler()->CodeSize();
uint8_t* buffer = allocator->Allocate(code_size);
+
MemoryRegion code(buffer, code_size);
GetAssembler()->FinalizeInstructions(code);
}
@@ -228,7 +228,8 @@ void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
DCHECK(!blocked_fpu_registers_[loc.reg()]);
blocked_fpu_registers_[loc.reg()] = true;
} else {
- DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
+ DCHECK(loc.GetPolicy() == Location::kRequiresRegister
+ || loc.GetPolicy() == Location::kRequiresFpuRegister);
}
}
@@ -259,10 +260,21 @@ void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
Location loc = locations->GetTemp(i);
if (loc.IsUnallocated()) {
- DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
- // TODO: Adjust handling of temps. We currently consider temps to use
- // core registers. They may also use floating point registers at some point.
- loc = AllocateFreeRegister(Primitive::kPrimInt);
+ switch (loc.GetPolicy()) {
+ case Location::kRequiresRegister:
+ // Allocate a core register (large enough to fit a 32-bit integer).
+ loc = AllocateFreeRegister(Primitive::kPrimInt);
+ break;
+
+ case Location::kRequiresFpuRegister:
+ // Allocate a core register (large enough to fit a 64-bit double).
+ loc = AllocateFreeRegister(Primitive::kPrimDouble);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected policy for temporary location "
+ << loc.GetPolicy();
+ }
locations->SetTempAt(i, loc);
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index f906eb8c05..7c8f6a2d29 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -30,6 +30,14 @@ namespace art {
static size_t constexpr kVRegSize = 4;
static size_t constexpr kUninitializedFrameSize = 0;
+// Binary encoding of 2^32 for type double.
+static int64_t constexpr k2Pow32EncodingForDouble = INT64_C(0x41F0000000000000);
+// Binary encoding of 2^31 for type double.
+static int64_t constexpr k2Pow31EncodingForDouble = INT64_C(0x41E0000000000000);
+
+// Maximum value for a primitive integer.
+static int32_t constexpr kPrimIntMax = 0x7fffffff;
+
class Assembler;
class CodeGenerator;
class DexCompilationUnit;
@@ -85,6 +93,7 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
}
virtual void Initialize() = 0;
+ virtual void Finalize(CodeAllocator* allocator);
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(HBasicBlock* block) = 0;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 890cfdd0e6..448a5a0707 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -495,7 +495,8 @@ InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGene
codegen_(codegen) {}
void CodeGeneratorARM::GenerateFrameEntry() {
- bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
+ bool skip_overflow_check =
+ IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
if (!skip_overflow_check) {
if (kExplicitStackOverflowCheck) {
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM();
@@ -655,26 +656,26 @@ void CodeGeneratorARM::Move32(Location destination, Location source) {
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ Mov(destination.As<Register>(), source.As<Register>());
+ __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovrs(destination.As<Register>(), source.As<SRegister>());
+ __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
} else {
- __ LoadFromOffset(kLoadWord, destination.As<Register>(), SP, source.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ vmovsr(destination.As<SRegister>(), source.As<Register>());
+ __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovs(destination.As<SRegister>(), source.As<SRegister>());
+ __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
} else {
- __ LoadSFromOffset(destination.As<SRegister>(), SP, source.GetStackIndex());
+ __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
}
} else {
DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
- __ StoreToOffset(kStoreWord, source.As<Register>(), SP, destination.GetStackIndex());
+ __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
} else if (source.IsFpuRegister()) {
- __ StoreSToOffset(source.As<SRegister>(), SP, destination.GetStackIndex());
+ __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
} else {
DCHECK(source.IsStackSlot()) << source;
__ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
@@ -689,19 +690,25 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
}
if (destination.IsRegisterPair()) {
if (source.IsRegisterPair()) {
- __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
- __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
+ EmitParallelMoves(
+ Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
+ Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
+ Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
+ Location::RegisterLocation(destination.AsRegisterPairLow<Register>()));
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else if (source.IsQuickParameter()) {
uint16_t register_index = source.GetQuickParameterRegisterIndex();
uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
- __ Mov(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(register_index));
- __ LoadFromOffset(kLoadWord, destination.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
+ EmitParallelMoves(
+ Location::RegisterLocation(calling_convention.GetRegisterAt(register_index)),
+ Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
+ Location::StackSlot(
+ calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()),
+ Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()));
} else {
+ // No conflict possible, so just do the moves.
DCHECK(source.IsDoubleStackSlot());
if (destination.AsRegisterPairLow<Register>() == R1) {
DCHECK_EQ(destination.AsRegisterPairHigh<Register>(), R2);
@@ -725,22 +732,21 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
uint16_t register_index = destination.GetQuickParameterRegisterIndex();
uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegisterPair()) {
- __ Mov(calling_convention.GetRegisterAt(register_index),
- source.AsRegisterPairLow<Register>());
- __ StoreToOffset(kStoreWord, source.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(stack_index + 1));
+ UNIMPLEMENTED(FATAL);
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else {
DCHECK(source.IsDoubleStackSlot());
- __ LoadFromOffset(
- kLoadWord, calling_convention.GetRegisterAt(register_index), SP, source.GetStackIndex());
- __ LoadFromOffset(kLoadWord, R0, SP, source.GetHighStackIndex(kArmWordSize));
- __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(stack_index + 1));
+ EmitParallelMoves(
+ Location::StackSlot(source.GetStackIndex()),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(register_index)),
+ Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
+ Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index + 1)));
}
} else {
DCHECK(destination.IsDoubleStackSlot());
if (source.IsRegisterPair()) {
+ // No conflict possible, so just do the moves.
if (source.AsRegisterPairLow<Register>() == R1) {
DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
__ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
@@ -753,21 +759,24 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
InvokeDexCallingConvention calling_convention;
uint16_t register_index = source.GetQuickParameterRegisterIndex();
uint16_t stack_index = source.GetQuickParameterStackIndex();
+ // Just move the low part. The only time a source is a quick parameter is
+ // when moving the parameter to its stack locations. And the (Java) caller
+ // of this method has already done that.
__ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(register_index),
- SP, destination.GetStackIndex());
- __ LoadFromOffset(kLoadWord, R0,
- SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
- __ StoreToOffset(kStoreWord, R0, SP, destination.GetHighStackIndex(kArmWordSize));
+ SP, destination.GetStackIndex());
+ DCHECK_EQ(calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize(),
+ static_cast<size_t>(destination.GetHighStackIndex(kArmWordSize)));
} else if (source.IsFpuRegisterPair()) {
__ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
SP,
destination.GetStackIndex());
} else {
DCHECK(source.IsDoubleStackSlot());
- __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
- __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
- __ LoadFromOffset(kLoadWord, IP, SP, source.GetHighStackIndex(kArmWordSize));
- __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
+ EmitParallelMoves(
+ Location::StackSlot(source.GetStackIndex()),
+ Location::StackSlot(destination.GetStackIndex()),
+ Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
+ Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)));
}
}
}
@@ -783,7 +792,7 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
if (const_to_move->IsIntConstant()) {
int32_t value = const_to_move->AsIntConstant()->GetValue();
if (location.IsRegister()) {
- __ LoadImmediate(location.As<Register>(), value);
+ __ LoadImmediate(location.AsRegister<Register>(), value);
} else {
DCHECK(location.IsStackSlot());
__ LoadImmediate(IP, value);
@@ -933,27 +942,27 @@ void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
// Condition has been materialized, compare the output to 0
DCHECK(if_instr->GetLocations()->InAt(0).IsRegister());
- __ cmp(if_instr->GetLocations()->InAt(0).As<Register>(),
+ __ cmp(if_instr->GetLocations()->InAt(0).AsRegister<Register>(),
ShifterOperand(0));
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), NE);
} else {
// Condition has not been materialized, use its inputs as the
// comparison and its condition as the branch condition.
LocationSummary* locations = cond->GetLocations();
+ Register left = locations->InAt(0).AsRegister<Register>();
if (locations->InAt(1).IsRegister()) {
- __ cmp(locations->InAt(0).As<Register>(),
- ShifterOperand(locations->InAt(1).As<Register>()));
+ __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
} else {
DCHECK(locations->InAt(1).IsConstant());
int32_t value =
locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
ShifterOperand operand;
- if (ShifterOperand::CanHoldArm(value, &operand)) {
- __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
+ if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
+ __ cmp(left, operand);
} else {
Register temp = IP;
__ LoadImmediate(temp, value);
- __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
+ __ cmp(left, ShifterOperand(temp));
}
}
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()),
@@ -979,27 +988,27 @@ void LocationsBuilderARM::VisitCondition(HCondition* comp) {
void InstructionCodeGeneratorARM::VisitCondition(HCondition* comp) {
if (!comp->NeedsMaterialization()) return;
-
LocationSummary* locations = comp->GetLocations();
+ Register left = locations->InAt(0).AsRegister<Register>();
+
if (locations->InAt(1).IsRegister()) {
- __ cmp(locations->InAt(0).As<Register>(),
- ShifterOperand(locations->InAt(1).As<Register>()));
+ __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
} else {
DCHECK(locations->InAt(1).IsConstant());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
ShifterOperand operand;
- if (ShifterOperand::CanHoldArm(value, &operand)) {
- __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
+ if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
+ __ cmp(left, operand);
} else {
Register temp = IP;
__ LoadImmediate(temp, value);
- __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
+ __ cmp(left, ShifterOperand(temp));
}
}
__ it(ARMCondition(comp->GetCondition()), kItElse);
- __ mov(locations->Out().As<Register>(), ShifterOperand(1),
+ __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
ARMCondition(comp->GetCondition()));
- __ mov(locations->Out().As<Register>(), ShifterOperand(0),
+ __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
ARMOppositeCondition(comp->GetCondition()));
}
@@ -1169,7 +1178,7 @@ void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
}
void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
// TODO: Implement all kinds of calls:
// 1) boot -> boot
@@ -1216,7 +1225,7 @@ void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -1227,7 +1236,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
__ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
__ LoadFromOffset(kLoadWord, temp, temp, class_offset);
} else {
- __ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
@@ -1249,7 +1258,7 @@ void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
- Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
(invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -1257,14 +1266,15 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
// Set the hidden argument.
- __ LoadImmediate(invoke->GetLocations()->GetTemp(1).As<Register>(), invoke->GetDexMethodIndex());
+ __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
+ invoke->GetDexMethodIndex());
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
__ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
__ LoadFromOffset(kLoadWord, temp, temp, class_offset);
} else {
- __ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
// temp = temp->GetImtEntryAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
@@ -1308,7 +1318,7 @@ void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
switch (neg->GetResultType()) {
case Primitive::kPrimInt:
DCHECK(in.IsRegister());
- __ rsb(out.As<Register>(), in.As<Register>(), ShifterOperand(0));
+ __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
break;
case Primitive::kPrimLong:
@@ -1334,7 +1344,7 @@ void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
case Primitive::kPrimFloat:
DCHECK(in.IsFpuRegister());
- __ vnegs(out.As<SRegister>(), in.As<SRegister>());
+ __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
break;
case Primitive::kPrimDouble:
@@ -1353,6 +1363,7 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
@@ -1395,6 +1406,12 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-int' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1434,7 +1451,6 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimChar:
// Processing a Dex `int-to-char' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -1458,6 +1474,15 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1481,6 +1506,14 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimFloat:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1504,6 +1537,7 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
Location in = locations->InAt(0);
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
@@ -1511,7 +1545,7 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-byte' instruction.
- __ sbfx(out.As<Register>(), in.As<Register>(), 0, 8);
+ __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
break;
default:
@@ -1526,7 +1560,7 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-short' instruction.
- __ sbfx(out.As<Register>(), in.As<Register>(), 0, 16);
+ __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
break;
default:
@@ -1541,18 +1575,26 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
// Processing a Dex `long-to-int' instruction.
DCHECK(out.IsRegister());
if (in.IsRegisterPair()) {
- __ Mov(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
} else if (in.IsDoubleStackSlot()) {
- __ LoadFromOffset(kLoadWord, out.As<Register>(), SP, in.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
} else {
DCHECK(in.IsConstant());
DCHECK(in.GetConstant()->IsLongConstant());
int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
- __ LoadImmediate(out.As<Register>(), static_cast<int32_t>(value));
+ __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
}
break;
- case Primitive::kPrimFloat:
+ case Primitive::kPrimFloat: {
+ // Processing a Dex `float-to-int' instruction.
+ SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
+ __ vmovs(temp, in.AsFpuRegister<SRegister>());
+ __ vcvtis(temp, temp);
+ __ vmovrs(out.AsRegister<Register>(), temp);
+ break;
+ }
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1573,7 +1615,7 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
// Processing a Dex `int-to-long' instruction.
DCHECK(out.IsRegisterPair());
DCHECK(in.IsRegister());
- __ Mov(out.AsRegisterPairLow<Register>(), in.As<Register>());
+ __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
// Sign extension.
__ Asr(out.AsRegisterPairHigh<Register>(),
out.AsRegisterPairLow<Register>(),
@@ -1597,9 +1639,8 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimChar:
// Processing a Dex `int-to-char' instruction.
- __ ubfx(out.As<Register>(), in.As<Register>(), 0, 16);
+ __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
break;
default:
@@ -1615,12 +1656,53 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimInt:
case Primitive::kPrimChar: {
// Processing a Dex `int-to-float' instruction.
- __ vmovsr(out.As<SRegister>(), in.As<Register>());
- __ vcvtsi(out.As<SRegister>(), out.As<SRegister>());
+ __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
+ __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-float' instruction.
+ Register low = in.AsRegisterPairLow<Register>();
+ Register high = in.AsRegisterPairHigh<Register>();
+ SRegister output = out.AsFpuRegister<SRegister>();
+ Register constant_low = locations->GetTemp(0).AsRegister<Register>();
+ Register constant_high = locations->GetTemp(1).AsRegister<Register>();
+ SRegister temp1_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
+ DRegister temp1_d = FromLowSToD(temp1_s);
+ SRegister temp2_s = locations->GetTemp(3).AsFpuRegisterPairLow<SRegister>();
+ DRegister temp2_d = FromLowSToD(temp2_s);
+
+ // Operations use doubles for precision reasons (each 32-bit
+ // half of a long fits in the 53-bit mantissa of a double,
+ // but not in the 24-bit mantissa of a float). This is
+ // especially important for the low bits. The result is
+ // eventually converted to float.
+
+ // temp1_d = int-to-double(high)
+ __ vmovsr(temp1_s, high);
+ __ vcvtdi(temp1_d, temp1_s);
+ // Using vmovd to load the `k2Pow32EncodingForDouble` constant
+ // as an immediate value into `temp2_d` does not work, as
+ // this instruction only transfers 8 significant bits of its
+ // immediate operand. Instead, use two 32-bit core
+ // registers to load `k2Pow32EncodingForDouble` into
+ // `temp2_d`.
+ __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
+ __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
+ __ vmovdrr(temp2_d, constant_low, constant_high);
+ // temp1_d = temp1_d * 2^32
+ __ vmuld(temp1_d, temp1_d, temp2_d);
+ // temp2_d = unsigned-to-double(low)
+ __ vmovsr(temp2_s, low);
+ __ vcvtdu(temp2_d, temp2_s);
+ // temp1_d = temp1_d + temp2_d
+ __ vaddd(temp1_d, temp1_d, temp2_d);
+ // output = double-to-float(temp1_d);
+ __ vcvtsd(output, temp1_d);
break;
}
- case Primitive::kPrimLong:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1639,13 +1721,44 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimInt:
case Primitive::kPrimChar: {
// Processing a Dex `int-to-double' instruction.
- __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.As<Register>());
+ __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
__ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
out.AsFpuRegisterPairLow<SRegister>());
break;
}
- case Primitive::kPrimLong:
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-double' instruction.
+ Register low = in.AsRegisterPairLow<Register>();
+ Register high = in.AsRegisterPairHigh<Register>();
+ SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
+ DRegister out_d = FromLowSToD(out_s);
+ Register constant_low = locations->GetTemp(0).AsRegister<Register>();
+ Register constant_high = locations->GetTemp(1).AsRegister<Register>();
+ SRegister temp_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
+ DRegister temp_d = FromLowSToD(temp_s);
+
+ // out_d = int-to-double(high)
+ __ vmovsr(out_s, high);
+ __ vcvtdi(out_d, out_s);
+ // Using vmovd to load the `k2Pow32EncodingForDouble` constant
+ // as an immediate value into `temp_d` does not work, as
+ // this instruction only transfers 8 significant bits of its
+ // immediate operand. Instead, use two 32-bit core
+ // registers to load `k2Pow32EncodingForDouble` into `temp_d`.
+ __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
+ __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
+ __ vmovdrr(temp_d, constant_low, constant_high);
+ // out_d = out_d * 2^32
+ __ vmuld(out_d, out_d, temp_d);
+ // temp_d = unsigned-to-double(low)
+ __ vmovsr(temp_s, low);
+ __ vcvtdu(temp_d, temp_s);
+ // out_d = out_d + temp_d
+ __ vaddd(out_d, out_d, temp_d);
+ break;
+ }
+
case Primitive::kPrimFloat:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1697,10 +1810,12 @@ void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
switch (add->GetResultType()) {
case Primitive::kPrimInt:
if (second.IsRegister()) {
- __ add(out.As<Register>(), first.As<Register>(), ShifterOperand(second.As<Register>()));
+ __ add(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ ShifterOperand(second.AsRegister<Register>()));
} else {
- __ AddConstant(out.As<Register>(),
- first.As<Register>(),
+ __ AddConstant(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
second.GetConstant()->AsIntConstant()->GetValue());
}
break;
@@ -1715,7 +1830,9 @@ void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
break;
case Primitive::kPrimFloat:
- __ vadds(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
+ __ vadds(out.AsFpuRegister<SRegister>(),
+ first.AsFpuRegister<SRegister>(),
+ second.AsFpuRegister<SRegister>());
break;
case Primitive::kPrimDouble:
@@ -1761,10 +1878,12 @@ void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ sub(out.As<Register>(), first.As<Register>(), ShifterOperand(second.As<Register>()));
+ __ sub(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ ShifterOperand(second.AsRegister<Register>()));
} else {
- __ AddConstant(out.As<Register>(),
- first.As<Register>(),
+ __ AddConstant(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
-second.GetConstant()->AsIntConstant()->GetValue());
}
break;
@@ -1781,7 +1900,9 @@ void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
}
case Primitive::kPrimFloat: {
- __ vsubs(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
+ __ vsubs(out.AsFpuRegister<SRegister>(),
+ first.AsFpuRegister<SRegister>(),
+ second.AsFpuRegister<SRegister>());
break;
}
@@ -1830,7 +1951,9 @@ void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
Location second = locations->InAt(1);
switch (mul->GetResultType()) {
case Primitive::kPrimInt: {
- __ mul(out.As<Register>(), first.As<Register>(), second.As<Register>());
+ __ mul(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ second.AsRegister<Register>());
break;
}
case Primitive::kPrimLong: {
@@ -1865,7 +1988,9 @@ void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
}
case Primitive::kPrimFloat: {
- __ vmuls(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
+ __ vmuls(out.AsFpuRegister<SRegister>(),
+ first.AsFpuRegister<SRegister>(),
+ second.AsFpuRegister<SRegister>());
break;
}
@@ -1925,7 +2050,9 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
- __ sdiv(out.As<Register>(), first.As<Register>(), second.As<Register>());
+ __ sdiv(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ second.AsRegister<Register>());
break;
}
@@ -1943,7 +2070,9 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
}
case Primitive::kPrimFloat: {
- __ vdivs(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
+ __ vdivs(out.AsFpuRegister<SRegister>(),
+ first.AsFpuRegister<SRegister>(),
+ second.AsFpuRegister<SRegister>());
break;
}
@@ -2002,16 +2131,16 @@ void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
switch (rem->GetResultType()) {
case Primitive::kPrimInt: {
- Register reg1 = first.As<Register>();
- Register reg2 = second.As<Register>();
- Register temp = locations->GetTemp(0).As<Register>();
+ Register reg1 = first.AsRegister<Register>();
+ Register reg2 = second.AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
// temp = reg1 / reg2 (integer division)
// temp = temp * reg2
// dest = reg1 - temp
__ sdiv(temp, reg1, reg2);
__ mul(temp, temp, reg2);
- __ sub(out.As<Register>(), reg1, ShifterOperand(temp));
+ __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
break;
}
@@ -2058,7 +2187,7 @@ void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction)
switch (instruction->GetType()) {
case Primitive::kPrimInt: {
if (value.IsRegister()) {
- __ cmp(value.As<Register>(), ShifterOperand(0));
+ __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
__ b(slow_path->GetEntryLabel(), EQ);
} else {
DCHECK(value.IsConstant()) << value;
@@ -2127,11 +2256,11 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
Primitive::Type type = op->GetResultType();
switch (type) {
case Primitive::kPrimInt: {
- Register out_reg = out.As<Register>();
- Register first_reg = first.As<Register>();
+ Register out_reg = out.AsRegister<Register>();
+ Register first_reg = first.AsRegister<Register>();
// Arm doesn't mask the shift count so we need to do it ourselves.
if (second.IsRegister()) {
- Register second_reg = second.As<Register>();
+ Register second_reg = second.AsRegister<Register>();
__ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
if (op->IsShl()) {
__ Lsl(out_reg, first_reg, second_reg);
@@ -2160,7 +2289,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
- DCHECK_EQ(calling_convention.GetRegisterAt(2), second.As<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegister<Register>());
DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
DCHECK_EQ(R2, out.AsRegisterPairHigh<Register>());
@@ -2270,11 +2399,11 @@ void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
Location in = locations->InAt(0);
switch (not_->InputAt(0)->GetType()) {
case Primitive::kPrimBoolean:
- __ eor(out.As<Register>(), in.As<Register>(), ShifterOperand(1));
+ __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
break;
case Primitive::kPrimInt:
- __ mvn(out.As<Register>(), ShifterOperand(in.As<Register>()));
+ __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
break;
case Primitive::kPrimLong:
@@ -2292,44 +2421,72 @@ void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
void LocationsBuilderARM::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ switch (compare->InputAt(0)->GetType()) {
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
+ }
}
void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
LocationSummary* locations = compare->GetLocations();
- switch (compare->InputAt(0)->GetType()) {
+ Register out = locations->Out().AsRegister<Register>();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+
+ Label less, greater, done;
+ Primitive::Type type = compare->InputAt(0)->GetType();
+ switch (type) {
case Primitive::kPrimLong: {
- Register output = locations->Out().As<Register>();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
- Label less, greater, done;
__ cmp(left.AsRegisterPairHigh<Register>(),
ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare.
__ b(&less, LT);
__ b(&greater, GT);
- // Do LoadImmediate before any `cmp`, as LoadImmediate might affect
- // the status flags.
- __ LoadImmediate(output, 0);
+ // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
+ __ LoadImmediate(out, 0);
__ cmp(left.AsRegisterPairLow<Register>(),
ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare.
- __ b(&done, EQ);
- __ b(&less, CC);
-
- __ Bind(&greater);
- __ LoadImmediate(output, 1);
- __ b(&done);
-
- __ Bind(&less);
- __ LoadImmediate(output, -1);
-
- __ Bind(&done);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ __ LoadImmediate(out, 0);
+ if (type == Primitive::kPrimFloat) {
+ __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
+ } else {
+ __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ }
+ __ vmstat(); // transfer FP status register to ARM APSR.
+ __ b(compare->IsGtBias() ? &greater : &less, VS); // VS for unordered.
break;
}
default:
- LOG(FATAL) << "Unimplemented compare type " << compare->InputAt(0)->GetType();
+ LOG(FATAL) << "Unexpected compare type " << type;
}
+ __ b(&done, EQ);
+ __ b(&less, CC); // CC is for both: unsigned compare for longs and 'less than' for floats.
+
+ __ Bind(&greater);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+
+ __ Bind(&less);
+ __ LoadImmediate(out, -1);
+
+ __ Bind(&done);
}
void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
@@ -2362,32 +2519,32 @@ void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction)
void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ StoreToOffset(kStoreByte, value, obj, offset);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ StoreToOffset(kStoreHalfword, value, obj, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ StoreToOffset(kStoreWord, value, obj, offset);
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
- Register temp = locations->GetTemp(0).As<Register>();
- Register card = locations->GetTemp(1).As<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
@@ -2400,7 +2557,7 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instr
}
case Primitive::kPrimFloat: {
- SRegister value = locations->InAt(1).As<SRegister>();
+ SRegister value = locations->InAt(1).AsFpuRegister<SRegister>();
__ StoreSToOffset(value, obj, offset);
break;
}
@@ -2426,37 +2583,37 @@ void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction)
void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadSignedByte, out, obj, offset);
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
break;
}
@@ -2469,7 +2626,7 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instr
}
case Primitive::kPrimFloat: {
- SRegister out = locations->Out().As<SRegister>();
+ SRegister out = locations->Out().AsFpuRegister<SRegister>();
__ LoadSFromOffset(out, obj, offset);
break;
}
@@ -2503,7 +2660,7 @@ void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmp(obj.As<Register>(), ShifterOperand(0));
+ __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
__ b(slow_path->GetEntryLabel(), EQ);
} else {
DCHECK(obj.IsConstant()) << obj;
@@ -2522,18 +2679,19 @@ void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>()));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
__ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
}
break;
@@ -2541,12 +2699,13 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ LoadFromOffset(kLoadSignedByte, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>()));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
__ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
}
break;
@@ -2554,12 +2713,13 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
__ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
}
break;
@@ -2567,12 +2727,13 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
__ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
}
break;
@@ -2582,12 +2743,13 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadFromOffset(kLoadWord, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
__ LoadFromOffset(kLoadWord, out, IP, data_offset);
}
break;
@@ -2597,10 +2759,11 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
Location out = locations->Out();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_8));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
__ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
@@ -2645,7 +2808,7 @@ void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
Primitive::Type value_type = instruction->GetComponentType();
bool needs_runtime_call = locations->WillCall();
@@ -2656,12 +2819,13 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register value = locations->InAt(2).As<Register>();
+ Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ StoreToOffset(kStoreByte, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>()));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
__ StoreToOffset(kStoreByte, value, IP, data_offset);
}
break;
@@ -2670,12 +2834,13 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register value = locations->InAt(2).As<Register>();
+ Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ StoreToOffset(kStoreHalfword, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
__ StoreToOffset(kStoreHalfword, value, IP, data_offset);
}
break;
@@ -2685,24 +2850,27 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
if (!needs_runtime_call) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register value = locations->InAt(2).As<Register>();
+ Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ StoreToOffset(kStoreWord, value, obj, offset);
} else {
DCHECK(index.IsRegister()) << index;
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
__ StoreToOffset(kStoreWord, value, IP, data_offset);
}
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
- Register temp = locations->GetTemp(0).As<Register>();
- Register card = locations->GetTemp(1).As<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc());
}
break;
}
@@ -2711,10 +2879,11 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
Location value = locations->InAt(2);
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_8));
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
__ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
@@ -2740,8 +2909,8 @@ void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- Register obj = locations->InAt(0).As<Register>();
- Register out = locations->Out().As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
}
@@ -2761,8 +2930,8 @@ void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- Register index = locations->InAt(0).As<Register>();
- Register length = locations->InAt(1).As<Register>();
+ Register index = locations->InAt(0).AsRegister<Register>();
+ Register length = locations->InAt(1).AsRegister<Register>();
__ cmp(index, ShifterOperand(length));
__ b(slow_path->GetEntryLabel(), CS);
@@ -2843,15 +3012,15 @@ void ParallelMoveResolverARM::EmitMove(size_t index) {
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ Mov(destination.As<Register>(), source.As<Register>());
+ __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
} else {
DCHECK(destination.IsStackSlot());
- __ StoreToOffset(kStoreWord, source.As<Register>(),
+ __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
SP, destination.GetStackIndex());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ LoadFromOffset(kLoadWord, destination.As<Register>(),
+ __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
SP, source.GetStackIndex());
} else {
DCHECK(destination.IsStackSlot());
@@ -2863,7 +3032,7 @@ void ParallelMoveResolverARM::EmitMove(size_t index) {
DCHECK(source.GetConstant()->IsIntConstant());
int32_t value = source.GetConstant()->AsIntConstant()->GetValue();
if (destination.IsRegister()) {
- __ LoadImmediate(destination.As<Register>(), value);
+ __ LoadImmediate(destination.AsRegister<Register>(), value);
} else {
DCHECK(destination.IsStackSlot());
__ LoadImmediate(IP, value);
@@ -2895,15 +3064,15 @@ void ParallelMoveResolverARM::EmitSwap(size_t index) {
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- DCHECK_NE(source.As<Register>(), IP);
- DCHECK_NE(destination.As<Register>(), IP);
- __ Mov(IP, source.As<Register>());
- __ Mov(source.As<Register>(), destination.As<Register>());
- __ Mov(destination.As<Register>(), IP);
+ DCHECK_NE(source.AsRegister<Register>(), IP);
+ DCHECK_NE(destination.AsRegister<Register>(), IP);
+ __ Mov(IP, source.AsRegister<Register>());
+ __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
+ __ Mov(destination.AsRegister<Register>(), IP);
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange(source.As<Register>(), destination.GetStackIndex());
+ Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange(destination.As<Register>(), source.GetStackIndex());
+ Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange(source.GetStackIndex(), destination.GetStackIndex());
} else {
@@ -2929,7 +3098,7 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
- Register out = cls->GetLocations()->Out().As<Register>();
+ Register out = cls->GetLocations()->Out().AsRegister<Register>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -2969,7 +3138,8 @@ void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
- GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+ GenerateClassInitializationCheck(slow_path,
+ check->GetLocations()->InAt(0).AsRegister<Register>());
}
void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
@@ -2992,37 +3162,37 @@ void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register cls = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadUnsignedByte, out, cls, offset);
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadSignedByte, out, cls, offset);
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadSignedHalfword, out, cls, offset);
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadUnsignedHalfword, out, cls, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, cls, offset);
break;
}
@@ -3035,7 +3205,7 @@ void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instructi
}
case Primitive::kPrimFloat: {
- SRegister out = locations->Out().As<SRegister>();
+ SRegister out = locations->Out().AsFpuRegister<SRegister>();
__ LoadSFromOffset(out, cls, offset);
break;
}
@@ -3068,32 +3238,32 @@ void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register cls = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ StoreToOffset(kStoreByte, value, cls, offset);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ StoreToOffset(kStoreHalfword, value, cls, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ StoreToOffset(kStoreWord, value, cls, offset);
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
- Register temp = locations->GetTemp(0).As<Register>();
- Register card = locations->GetTemp(1).As<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
codegen_->MarkGCCard(temp, card, cls, value);
}
break;
@@ -3106,7 +3276,7 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
}
case Primitive::kPrimFloat: {
- SRegister value = locations->InAt(1).As<SRegister>();
+ SRegister value = locations->InAt(1).AsFpuRegister<SRegister>();
__ StoreSToOffset(value, cls, offset);
break;
}
@@ -3133,7 +3303,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
codegen_->AddSlowPath(slow_path);
- Register out = load->GetLocations()->Out().As<Register>();
+ Register out = load->GetLocations()->Out().AsRegister<Register>();
codegen_->LoadCurrentMethod(out);
__ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
@@ -3150,7 +3320,7 @@ void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
}
void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
- Register out = load->GetLocations()->Out().As<Register>();
+ Register out = load->GetLocations()->Out().AsRegister<Register>();
int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
__ LoadFromOffset(kLoadWord, out, TR, offset);
__ LoadImmediate(IP, 0);
@@ -3181,9 +3351,9 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
- Register cls = locations->InAt(1).As<Register>();
- Register out = locations->Out().As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Label done, zero;
SlowPathCodeARM* slow_path = nullptr;
@@ -3228,9 +3398,9 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
- Register cls = locations->InAt(1).As<Register>();
- Register temp = locations->GetTemp(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
@@ -3292,9 +3462,9 @@ void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instr
LocationSummary* locations = instruction->GetLocations();
if (instruction->GetResultType() == Primitive::kPrimInt) {
- Register first = locations->InAt(0).As<Register>();
- Register second = locations->InAt(1).As<Register>();
- Register out = locations->Out().As<Register>();
+ Register first = locations->InAt(0).AsRegister<Register>();
+ Register second = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (instruction->IsAnd()) {
__ and_(out, first, ShifterOperand(second));
} else if (instruction->IsOr()) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0fc430750d..a61ef2d4f6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -38,7 +38,7 @@ namespace art {
namespace arm64 {
-// TODO: clean-up some of the constant definitions.
+static constexpr bool kExplicitStackOverflowCheck = false;
static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
@@ -167,7 +167,7 @@ MemOperand StackOperandFrom(Location location) {
return MemOperand(sp, location.GetStackIndex());
}
-MemOperand HeapOperand(const Register& base, size_t offset) {
+MemOperand HeapOperand(const Register& base, size_t offset = 0) {
// A heap reference must be 32bit, so fit in a W register.
DCHECK(base.IsW());
return MemOperand(base.X(), offset);
@@ -393,6 +393,20 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
};
+class StackOverflowCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ StackOverflowCheckSlowPathARM64() {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowStackOverflow), nullptr, 0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM64);
+};
+
class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
@@ -418,7 +432,6 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
return &return_label_;
}
-
private:
HSuspendCheck* const instruction_;
// If not null, the block to branch to after the suspend check.
@@ -437,7 +450,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
__ Brk(__LINE__); // TODO: Unimplemented TypeCheckSlowPathARM64.
- __ b(GetExitLabel());
+ __ B(GetExitLabel());
}
private:
@@ -479,13 +492,30 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
#undef __
#define __ GetVIXLAssembler()->
+void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
+ // Ensure we emit the literal pool.
+ __ FinalizeCode();
+ CodeGenerator::Finalize(allocator);
+}
+
void CodeGeneratorARM64::GenerateFrameEntry() {
- // TODO: Add proper support for the stack overflow check.
- UseScratchRegisterScope temps(GetVIXLAssembler());
- Register temp = temps.AcquireX();
- __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
- __ Ldr(temp, MemOperand(temp, 0));
- RecordPcInfo(nullptr, 0);
+ bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
+ if (do_overflow_check) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ if (kExplicitStackOverflowCheck) {
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM64();
+ AddSlowPath(slow_path);
+
+ __ Ldr(temp, MemOperand(tr, Thread::StackEndOffset<kArm64WordSize>().Int32Value()));
+ __ Cmp(sp, temp);
+ __ B(lo, slow_path->GetEntryLabel());
+ } else {
+ __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Ldr(wzr, MemOperand(temp, 0));
+ RecordPcInfo(nullptr, 0);
+ }
+ }
CPURegList preserved_regs = GetFramePreservedRegisters();
int frame_size = GetFrameSize();
@@ -588,12 +618,12 @@ Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register card = temps.AcquireX();
- Register temp = temps.AcquireX();
+ Register temp = temps.AcquireW(); // Index within the CardTable - 32bit.
vixl::Label done;
__ Cbz(value, &done);
__ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
- __ Strb(card, MemOperand(card, temp));
+ __ Strb(card, MemOperand(card, temp.X()));
__ Bind(&done);
}
@@ -601,7 +631,7 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
// Block reserved registers:
// ip0 (VIXL temporary)
// ip1 (VIXL temporary)
- // xSuspend (Suspend counter)
+ // tr
// lr
// sp is not part of the allocatable registers, so we don't need to block it.
// TODO: Avoid blocking callee-saved registers, and instead preserve them
@@ -772,12 +802,14 @@ void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
uint32_t dex_pc) {
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
- RecordPcInfo(instruction, dex_pc);
- DCHECK(instruction->IsSuspendCheck()
- || instruction->IsBoundsCheck()
- || instruction->IsNullCheck()
- || instruction->IsDivZeroCheck()
- || !IsLeafMethod());
+ if (instruction != nullptr) {
+ RecordPcInfo(instruction, dex_pc);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+ }
}
void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
@@ -787,12 +819,30 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod
__ Ldr(temp, HeapOperand(class_reg, mirror::Class::StatusOffset()));
__ Cmp(temp, mirror::Class::kStatusInitialized);
__ B(lt, slow_path->GetEntryLabel());
- // Even if the initialized flag is set, we may be in a situation where caches are not synced
- // properly. Therefore, we do a memory fence.
- __ Dmb(InnerShareable, BarrierAll);
+ // Even if the initialized flag is set, we need to ensure consistent memory ordering.
+ __ Dmb(InnerShareable, BarrierReads);
__ Bind(slow_path->GetExitLabel());
}
+void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
+ HBasicBlock* successor) {
+ SuspendCheckSlowPathARM64* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
+ codegen_->AddSlowPath(slow_path);
+ UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
+ Register temp = temps.AcquireW();
+
+ __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
+ if (successor == nullptr) {
+ __ Cbnz(temp, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetReturnLabel());
+ } else {
+ __ Cbz(temp, codegen_->GetLabelOf(successor));
+ __ B(slow_path->GetEntryLabel());
+ // slow_path will return to GetLabelOf(successor).
+ }
+}
+
InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
CodeGeneratorARM64* codegen)
: HGraphVisitor(graph),
@@ -801,10 +851,6 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
M(ParallelMove) \
- M(Rem) \
- M(Shl) \
- M(Shr) \
- M(UShr) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
@@ -897,6 +943,63 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
}
}
+void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected shift type " << type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+
+ Primitive::Type type = instr->GetType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ Register dst = OutputRegister(instr);
+ Register lhs = InputRegisterAt(instr, 0);
+ Operand rhs = InputOperandAt(instr, 1);
+ if (rhs.IsImmediate()) {
+ uint32_t shift_value = (type == Primitive::kPrimInt)
+ ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue)
+ : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue);
+ if (instr->IsShl()) {
+ __ Lsl(dst, lhs, shift_value);
+ } else if (instr->IsShr()) {
+ __ Asr(dst, lhs, shift_value);
+ } else {
+ __ Lsr(dst, lhs, shift_value);
+ }
+ } else {
+ Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
+
+ if (instr->IsShl()) {
+ __ Lsl(dst, lhs, rhs_reg);
+ } else if (instr->IsShr()) {
+ __ Asr(dst, lhs, rhs_reg);
+ } else {
+ __ Lsr(dst, lhs, rhs_reg);
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected shift operation type " << type;
+ }
+}
+
void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
HandleBinaryOp(instruction);
}
@@ -927,17 +1030,17 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
Register obj = InputRegisterAt(instruction, 0);
Location index = locations->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
- MemOperand source(obj);
+ MemOperand source = HeapOperand(obj);
UseScratchRegisterScope temps(GetVIXLAssembler());
if (index.IsConstant()) {
offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
- source = MemOperand(obj, offset);
+ source = HeapOperand(obj, offset);
} else {
Register temp = temps.AcquireSameSizeAs(obj);
Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
__ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
- source = MemOperand(temp, offset);
+ source = HeapOperand(temp, offset);
}
codegen_->Load(type, OutputCPURegister(instruction), source);
@@ -982,17 +1085,17 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
CPURegister value = InputCPURegisterAt(instruction, 2);
Location index = locations->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
- MemOperand destination(obj);
+ MemOperand destination = HeapOperand(obj);
UseScratchRegisterScope temps(GetVIXLAssembler());
if (index.IsConstant()) {
offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
- destination = MemOperand(obj, offset);
+ destination = HeapOperand(obj, offset);
} else {
Register temp = temps.AcquireSameSizeAs(obj);
Register index_reg = InputRegisterAt(instruction, 1);
__ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
- destination = MemOperand(temp, offset);
+ destination = HeapOperand(temp, offset);
}
codegen_->Store(value_type, value, destination);
@@ -1059,29 +1162,59 @@ void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
-void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
+void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ Primitive::Type in_type = compare->InputAt(0)->GetType();
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << in_type;
+ }
}
-void InstructionCodeGeneratorARM64::VisitCompare(HCompare* instruction) {
- Primitive::Type in_type = instruction->InputAt(0)->GetType();
+void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
+ Primitive::Type in_type = compare->InputAt(0)->GetType();
- DCHECK_EQ(in_type, Primitive::kPrimLong);
+ // 0 if: left == right
+ // 1 if: left > right
+ // -1 if: left < right
switch (in_type) {
case Primitive::kPrimLong: {
- vixl::Label done;
- Register result = OutputRegister(instruction);
- Register left = InputRegisterAt(instruction, 0);
- Operand right = InputOperandAt(instruction, 1);
- __ Subs(result.X(), left, right);
- __ B(eq, &done);
- __ Mov(result, 1);
- __ Cneg(result, result, le);
- __ Bind(&done);
+ Register result = OutputRegister(compare);
+ Register left = InputRegisterAt(compare, 0);
+ Operand right = InputOperandAt(compare, 1);
+
+ __ Cmp(left, right);
+ __ Cset(result, ne);
+ __ Cneg(result, result, lt);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ Register result = OutputRegister(compare);
+ FPRegister left = InputFPRegisterAt(compare, 0);
+ FPRegister right = InputFPRegisterAt(compare, 1);
+
+ __ Fcmp(left, right);
+ if (compare->IsGtBias()) {
+ __ Cset(result, ne);
+ } else {
+ __ Csetm(result, ne);
+ }
+ __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
break;
}
default:
@@ -1110,7 +1243,7 @@ void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
Condition cond = ARM64Condition(instruction->GetCondition());
__ Cmp(lhs, rhs);
- __ Csel(res, vixl::Assembler::AppropriateZeroRegFor(res), Operand(1), InvertCondition(cond));
+ __ Cset(res, cond);
}
#define FOR_EACH_CONDITION_INSTRUCTION(M) \
@@ -1235,8 +1368,20 @@ void LocationsBuilderARM64::VisitGoto(HGoto* got) {
void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
HBasicBlock* successor = got->GetSuccessor();
- // TODO: Support for suspend checks emission.
- if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
+ DCHECK(!successor->IsExitBlock());
+ HBasicBlock* block = got->GetBlock();
+ HInstruction* previous = got->GetPrevious();
+ HLoopInformation* info = block->GetLoopInformation();
+
+ if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) {
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+ GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+ return;
+ }
+ if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+ GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ }
+ if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
}
}
@@ -1244,27 +1389,32 @@ void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
HInstruction* cond = if_instr->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
HInstruction* cond = if_instr->InputAt(0);
- DCHECK(cond->IsCondition());
HCondition* condition = cond->AsCondition();
vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
- // TODO: Support constant condition input in VisitIf.
-
- if (condition->NeedsMaterialization()) {
+ if (cond->IsIntConstant()) {
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) {
+ __ B(true_target);
+ }
+ return;
+ } else {
+ DCHECK_EQ(cond_value, 0);
+ }
+ } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
// The condition instruction has been materialized, compare the output to 0.
Location cond_val = if_instr->GetLocations()->InAt(0);
DCHECK(cond_val.IsRegister());
__ Cbnz(InputRegisterAt(if_instr, 0), true_target);
-
} else {
// The condition instruction has not been materialized, use its inputs as
// the comparison and its condition as the branch condition.
@@ -1282,7 +1432,6 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
__ B(arm64_cond, true_target);
}
}
-
if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
__ B(false_target);
}
@@ -1295,8 +1444,7 @@ void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
- MemOperand field = MemOperand(InputRegisterAt(instruction, 0),
- instruction->GetFieldOffset().Uint32Value());
+ MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
}
@@ -1310,7 +1458,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* ins
Primitive::Type field_type = instruction->GetFieldType();
CPURegister value = InputCPURegisterAt(instruction, 1);
Register obj = InputRegisterAt(instruction, 0);
- codegen_->Store(field_type, value, MemOperand(obj, instruction->GetFieldOffset().Uint32Value()));
+ codegen_->Store(field_type, value, HeapOperand(obj, instruction->GetFieldOffset()));
if (field_type == Primitive::kPrimNot) {
codegen_->MarkGCCard(obj, Register(value));
}
@@ -1339,7 +1487,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
__ Cbz(obj, &done);
// Compare the class of `obj` with `cls`.
- __ Ldr(out, MemOperand(obj, mirror::Object::ClassOffset().Int32Value()));
+ __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
__ Cmp(out, cls);
if (instruction->IsClassFinal()) {
// Classes must be equal for the instanceof to succeed.
@@ -1446,14 +1594,12 @@ void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = method;
codegen_->LoadCurrentMethod(temp);
// temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, MemOperand(temp.X(),
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
// temp = temp[index_in_cache];
- __ Ldr(temp, MemOperand(temp.X(), index_in_cache));
+ __ Ldr(temp, HeapOperand(temp, index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, MemOperand(temp.X(),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64WordSize).SizeValue()));
+ __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize)));
// lr();
__ Blr(lr);
@@ -1464,7 +1610,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
- Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
Offset class_offset = mirror::Object::ClassOffset();
@@ -1472,16 +1618,16 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
- __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
- __ Ldr(temp.W(), MemOperand(temp, class_offset.SizeValue()));
+ __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp, HeapOperand(temp, class_offset));
} else {
DCHECK(receiver.IsRegister());
- __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
+ __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
}
// temp = temp->GetMethodAt(method_offset);
- __ Ldr(temp.W(), MemOperand(temp, method_offset));
+ __ Ldr(temp, HeapOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
- __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
+ __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue()));
// lr();
__ Blr(lr);
DCHECK(!codegen_->IsLeafMethod());
@@ -1506,7 +1652,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
- __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -1555,7 +1701,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
codegen_->LoadCurrentMethod(out);
__ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
__ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
- __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ Cbz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -1797,6 +1943,43 @@ void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderARM64::VisitRem(HRem* rem) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall);
+ switch (rem->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << rem->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
+ Primitive::Type type = rem->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register dividend = InputRegisterAt(rem, 0);
+ Register divisor = InputRegisterAt(rem, 1);
+ Register output = OutputRegister(rem);
+ Register temp = temps.AcquireSameSizeAs(output);
+
+ __ Sdiv(temp, dividend, divisor);
+ __ Msub(output, temp, divisor, dividend);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
Primitive::Type return_type = instruction->InputAt(0)->GetType();
@@ -1819,6 +2002,22 @@ void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
__ Br(lr);
}
+void LocationsBuilderARM64::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void LocationsBuilderARM64::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
Primitive::Type field_type = store->InputAt(1)->GetType();
@@ -1863,9 +2062,8 @@ void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
}
void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
- Register cls = InputRegisterAt(instruction, 0);
- uint32_t offset = instruction->GetFieldOffset().Uint32Value();
- codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), MemOperand(cls, offset));
+ MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
+ codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
}
void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
@@ -1878,10 +2076,10 @@ void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
CPURegister value = InputCPURegisterAt(instruction, 1);
Register cls = InputRegisterAt(instruction, 0);
- uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+ Offset offset = instruction->GetFieldOffset();
Primitive::Type field_type = instruction->GetFieldType();
- codegen_->Store(field_type, value, MemOperand(cls, offset));
+ codegen_->Store(field_type, value, HeapOperand(cls, offset));
if (field_type == Primitive::kPrimNot) {
codegen_->MarkGCCard(cls, Register(value));
}
@@ -1892,14 +2090,17 @@ void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
}
void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
- // TODO: Improve support for suspend checks.
- SuspendCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, nullptr);
- codegen_->AddSlowPath(slow_path);
-
- __ Subs(wSuspend, wSuspend, 1);
- __ B(slow_path->GetEntryLabel(), le);
- __ Bind(slow_path->GetReturnLabel());
+ HBasicBlock* block = instruction->GetBlock();
+ if (block->GetLoopInformation() != nullptr) {
+ DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
+ // The back edge will generate the suspend check.
+ return;
+ }
+ if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
+ // The goto will generate the suspend check.
+ return;
+ }
+ GenerateSuspendCheck(instruction, nullptr);
}
void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
@@ -1928,6 +2129,7 @@ void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
Primitive::Type input_type = conversion->GetInputType();
Primitive::Type result_type = conversion->GetResultType();
+ DCHECK_NE(input_type, result_type);
if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
(result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
@@ -1956,17 +2158,34 @@ void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* convers
int result_size = Primitive::ComponentSize(result_type);
int input_size = Primitive::ComponentSize(input_type);
int min_size = kBitsPerByte * std::min(result_size, input_size);
+ Register output = OutputRegister(conversion);
+ Register source = InputRegisterAt(conversion, 0);
if ((result_type == Primitive::kPrimChar) ||
((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
- __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, min_size);
+ __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size);
} else {
- __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, min_size);
+ __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size);
}
- return;
+ } else if (IsFPType(result_type) && IsIntegralType(input_type)) {
+ CHECK(input_type == Primitive::kPrimInt || input_type == Primitive::kPrimLong);
+ __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
+ } else if (IsIntegralType(result_type) && IsFPType(input_type)) {
+ CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
+ __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
+ } else if (IsFPType(result_type) && IsFPType(input_type)) {
+ __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
+ } else {
+ LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+ << " to " << result_type;
}
+}
+
+void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
- LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
- << " to " << result_type;
+void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
}
void LocationsBuilderARM64::VisitXor(HXor* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a40f27fafa..0e3d25f9aa 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -44,12 +44,10 @@ static const vixl::FPRegister kParameterFPRegisters[] = {
static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
const vixl::Register tr = vixl::x18; // Thread Register
-const vixl::Register wSuspend = vixl::w19; // Suspend Register
-const vixl::Register xSuspend = vixl::x19;
const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
-const vixl::CPURegList runtime_reserved_core_registers(tr, xSuspend, vixl::lr);
+const vixl::CPURegList runtime_reserved_core_registers(tr, vixl::lr);
const vixl::CPURegList quick_callee_saved_registers(vixl::CPURegister::kRegister,
vixl::kXRegSize,
kArm64CalleeSaveRefSpills);
@@ -110,7 +108,9 @@ class InstructionCodeGeneratorARM64 : public HGraphVisitor {
private:
void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, vixl::Register class_reg);
+ void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* instr);
+ void HandleShift(HBinaryOperation* instr);
Arm64Assembler* const assembler_;
CodeGeneratorARM64* const codegen_;
@@ -130,6 +130,7 @@ class LocationsBuilderARM64 : public HGraphVisitor {
private:
void HandleBinaryOp(HBinaryOperation* instr);
+ void HandleShift(HBinaryOperation* instr);
void HandleInvoke(HInvoke* instr);
CodeGeneratorARM64* const codegen_;
@@ -232,6 +233,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
}
}
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveHelper(Location destination, Location source, Primitive::Type type);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3689452234..6f83d9faf4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -143,7 +143,9 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
BoundsCheckSlowPathX86(HBoundsCheck* instruction,
Location index_location,
Location length_location)
- : instruction_(instruction), index_location_(index_location), length_location_(length_location) {}
+ : instruction_(instruction),
+ index_location_(index_location),
+ length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
@@ -311,7 +313,8 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
if (instruction_->IsInstanceOf()) {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize,
+ pInstanceofNonTrivial)));
} else {
DCHECK(instruction_->IsCheckCast());
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
@@ -464,7 +467,8 @@ void CodeGeneratorX86::GenerateFrameEntry() {
static const int kFakeReturnRegister = 8;
core_spill_mask_ |= (1 << kFakeReturnRegister);
- bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
+ bool skip_overflow_check =
+ IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
if (!skip_overflow_check && !kExplicitStackOverflowCheck) {
__ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
RecordPcInfo(nullptr, 0);
@@ -567,28 +571,28 @@ void CodeGeneratorX86::Move32(Location destination, Location source) {
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ movl(destination.As<Register>(), source.As<Register>());
+ __ movl(destination.AsRegister<Register>(), source.AsRegister<Register>());
} else if (source.IsFpuRegister()) {
- __ movd(destination.As<Register>(), source.As<XmmRegister>());
+ __ movd(destination.AsRegister<Register>(), source.AsFpuRegister<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
- __ movl(destination.As<Register>(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.AsRegister<Register>(), Address(ESP, source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ movd(destination.As<XmmRegister>(), source.As<Register>());
+ __ movd(destination.AsFpuRegister<XmmRegister>(), source.AsRegister<Register>());
} else if (source.IsFpuRegister()) {
- __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
+ __ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
- __ movss(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
+ __ movss(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
}
} else {
DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
- __ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
+ __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegister<Register>());
} else if (source.IsFpuRegister()) {
- __ movss(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
+ __ movss(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
__ pushl(Address(ESP, source.GetStackIndex()));
@@ -603,19 +607,25 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
}
if (destination.IsRegisterPair()) {
if (source.IsRegisterPair()) {
- __ movl(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
- __ movl(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
+ EmitParallelMoves(
+ Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
+ Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
+ Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
+ Location::RegisterLocation(destination.AsRegisterPairLow<Register>()));
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
uint16_t register_index = source.GetQuickParameterRegisterIndex();
uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
- __ movl(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(register_index));
- __ movl(destination.AsRegisterPairHigh<Register>(), Address(ESP,
- calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()));
+ EmitParallelMoves(
+ Location::RegisterLocation(calling_convention.GetRegisterAt(register_index)),
+ Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
+ Location::StackSlot(
+ calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()),
+ Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()));
} else {
+ // No conflict possible, so just do the moves.
DCHECK(source.IsDoubleStackSlot());
__ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex()));
__ movl(destination.AsRegisterPairHigh<Register>(),
@@ -625,47 +635,52 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
InvokeDexCallingConvention calling_convention;
uint16_t register_index = destination.GetQuickParameterRegisterIndex();
uint16_t stack_index = destination.GetQuickParameterStackIndex();
- if (source.IsRegister()) {
- __ movl(calling_convention.GetRegisterAt(register_index), source.AsRegisterPairLow<Register>());
- __ movl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)),
- source.AsRegisterPairHigh<Register>());
+ if (source.IsRegisterPair()) {
+ LOG(FATAL) << "Unimplemented";
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movl(calling_convention.GetRegisterAt(register_index),
- Address(ESP, source.GetStackIndex()));
- __ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
- __ popl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)));
+ EmitParallelMoves(
+ Location::StackSlot(source.GetStackIndex()),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(register_index)),
+ Location::StackSlot(source.GetHighStackIndex(kX86WordSize)),
+ Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index + 1)));
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
- __ movsd(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
+ __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
} else {
LOG(FATAL) << "Unimplemented";
}
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
if (source.IsRegisterPair()) {
+ // No conflict possible, so just do the moves.
__ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
source.AsRegisterPairHigh<Register>());
} else if (source.IsQuickParameter()) {
+ // No conflict possible, so just do the move.
InvokeDexCallingConvention calling_convention;
uint16_t register_index = source.GetQuickParameterRegisterIndex();
uint16_t stack_index = source.GetQuickParameterStackIndex();
+ // Just move the low part. The only time a source is a quick parameter is
+ // when moving the parameter to its stack locations. And the (Java) caller
+ // of this method has already done that.
__ movl(Address(ESP, destination.GetStackIndex()),
calling_convention.GetRegisterAt(register_index));
DCHECK_EQ(calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize(),
static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize)));
} else if (source.IsFpuRegister()) {
- __ movsd(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
+ __ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
} else {
DCHECK(source.IsDoubleStackSlot());
- __ pushl(Address(ESP, source.GetStackIndex()));
- __ popl(Address(ESP, destination.GetStackIndex()));
- __ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
- __ popl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)));
+ EmitParallelMoves(
+ Location::StackSlot(source.GetStackIndex()),
+ Location::StackSlot(destination.GetStackIndex()),
+ Location::StackSlot(source.GetHighStackIndex(kX86WordSize)),
+ Location::StackSlot(destination.GetHighStackIndex(kX86WordSize)));
}
}
}
@@ -681,7 +696,7 @@ void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstr
if (const_to_move->IsIntConstant()) {
Immediate imm(const_to_move->AsIntConstant()->GetValue());
if (location.IsRegister()) {
- __ movl(location.As<Register>(), imm);
+ __ movl(location.AsRegister<Register>(), imm);
} else if (location.IsStackSlot()) {
__ movl(Address(ESP, location.GetStackIndex()), imm);
} else {
@@ -695,7 +710,8 @@ void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstr
__ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
} else if (location.IsDoubleStackSlot()) {
__ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)),
+ Immediate(High32Bits(value)));
} else {
DCHECK(location.IsConstant());
DCHECK_EQ(location.GetConstant(), instruction);
@@ -828,7 +844,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
- __ cmpl(lhs.As<Register>(), Immediate(0));
+ __ cmpl(lhs.AsRegister<Register>(), Immediate(0));
} else {
__ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
}
@@ -843,13 +859,13 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
// LHS is guaranteed to be in a register (see
// LocationsBuilderX86::VisitCondition).
if (rhs.IsRegister()) {
- __ cmpl(lhs.As<Register>(), rhs.As<Register>());
+ __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
} else if (rhs.IsConstant()) {
HIntConstant* instruction = rhs.GetConstant()->AsIntConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(lhs.As<Register>(), imm);
+ __ cmpl(lhs.AsRegister<Register>(), imm);
} else {
- __ cmpl(lhs.As<Register>(), Address(ESP, rhs.GetStackIndex()));
+ __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
__ j(X86Condition(cond->AsCondition()->GetCondition()),
codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -920,18 +936,18 @@ void LocationsBuilderX86::VisitCondition(HCondition* comp) {
void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
- Register reg = locations->Out().As<Register>();
+ Register reg = locations->Out().AsRegister<Register>();
// Clear register: setcc only sets the low byte.
__ xorl(reg, reg);
if (locations->InAt(1).IsRegister()) {
- __ cmpl(locations->InAt(0).As<Register>(),
- locations->InAt(1).As<Register>());
+ __ cmpl(locations->InAt(0).AsRegister<Register>(),
+ locations->InAt(1).AsRegister<Register>());
} else if (locations->InAt(1).IsConstant()) {
HConstant* instruction = locations->InAt(1).GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(locations->InAt(0).As<Register>(), imm);
+ __ cmpl(locations->InAt(0).AsRegister<Register>(), imm);
} else {
- __ cmpl(locations->InAt(0).As<Register>(),
+ __ cmpl(locations->InAt(0).AsRegister<Register>(),
Address(ESP, locations->InAt(1).GetStackIndex()));
}
__ setb(X86Condition(comp->GetCondition()), reg);
@@ -1078,7 +1094,7 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- DCHECK_EQ(ret->GetLocations()->InAt(0).As<Register>(), EAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
break;
case Primitive::kPrimLong:
@@ -1088,7 +1104,7 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).As<XmmRegister>(), XMM0);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
break;
default:
@@ -1104,7 +1120,7 @@ void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
}
void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
// TODO: Implement all kinds of calls:
// 1) boot -> boot
@@ -1169,7 +1185,7 @@ void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
}
void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -1180,7 +1196,7 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
__ movl(temp, Address(ESP, receiver.GetStackIndex()));
__ movl(temp, Address(temp, class_offset));
} else {
- __ movl(temp, Address(receiver.As<Register>(), class_offset));
+ __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -1200,7 +1216,7 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
- Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
(invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -1209,14 +1225,14 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// Set the hidden argument.
__ movl(temp, Immediate(invoke->GetDexMethodIndex()));
- __ movd(invoke->GetLocations()->GetTemp(1).As<XmmRegister>(), temp);
+ __ movd(invoke->GetLocations()->GetTemp(1).AsFpuRegister<XmmRegister>(), temp);
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
__ movl(temp, Address(ESP, receiver.GetStackIndex()));
__ movl(temp, Address(temp, class_offset));
} else {
- __ movl(temp, Address(receiver.As<Register>(), class_offset));
+ __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
}
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -1239,11 +1255,16 @@ void LocationsBuilderX86::VisitNeg(HNeg* neg) {
break;
case Primitive::kPrimFloat:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
- // Output overlaps as we need a fresh (zero-initialized)
- // register to perform subtraction from zero.
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresFpuRegister());
break;
default:
@@ -1259,7 +1280,7 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) {
case Primitive::kPrimInt:
DCHECK(in.IsRegister());
DCHECK(in.Equals(out));
- __ negl(out.As<Register>());
+ __ negl(out.AsRegister<Register>());
break;
case Primitive::kPrimLong:
@@ -1275,21 +1296,29 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) {
__ negl(out.AsRegisterPairHigh<Register>());
break;
- case Primitive::kPrimFloat:
- DCHECK(!in.Equals(out));
- // out = 0
- __ xorps(out.As<XmmRegister>(), out.As<XmmRegister>());
- // out = out - in
- __ subss(out.As<XmmRegister>(), in.As<XmmRegister>());
+ case Primitive::kPrimFloat: {
+ DCHECK(in.Equals(out));
+ Register constant = locations->GetTemp(0).AsRegister<Register>();
+ XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ // Implement float negation with an exclusive or with value
+ // 0x80000000 (mask for bit 31, representing the sign of a
+ // single-precision floating-point number).
+ __ movl(constant, Immediate(INT32_C(0x80000000)));
+ __ movd(mask, constant);
+ __ xorps(out.AsFpuRegister<XmmRegister>(), mask);
break;
+ }
- case Primitive::kPrimDouble:
- DCHECK(!in.Equals(out));
- // out = 0
- __ xorpd(out.As<XmmRegister>(), out.As<XmmRegister>());
- // out = out - in
- __ subsd(out.As<XmmRegister>(), in.As<XmmRegister>());
+ case Primitive::kPrimDouble: {
+ DCHECK(in.Equals(out));
+ XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ // Implement double negation with an exclusive or with value
+ // 0x8000000000000000 (mask for bit 63, representing the sign of
+ // a double-precision floating-point number).
+ __ LoadLongConstant(mask, INT64_C(0x8000000000000000));
+ __ xorpd(out.AsFpuRegister<XmmRegister>(), mask);
break;
+ }
default:
LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
@@ -1301,6 +1330,7 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
@@ -1343,6 +1373,12 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-int' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1382,7 +1418,6 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimChar:
// Processing a Dex `int-to-char' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -1406,6 +1441,13 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1429,6 +1471,13 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimFloat:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1452,6 +1501,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
Location in = locations->InAt(0);
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
@@ -1460,13 +1510,13 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimChar:
// Processing a Dex `int-to-byte' instruction.
if (in.IsRegister()) {
- __ movsxb(out.As<Register>(), in.As<ByteRegister>());
+ __ movsxb(out.AsRegister<Register>(), in.AsRegister<ByteRegister>());
} else if (in.IsStackSlot()) {
- __ movsxb(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ __ movsxb(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
- __ movl(out.As<Register>(), Immediate(static_cast<int8_t>(value)));
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<int8_t>(value)));
}
break;
@@ -1483,13 +1533,13 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimChar:
// Processing a Dex `int-to-short' instruction.
if (in.IsRegister()) {
- __ movsxw(out.As<Register>(), in.As<Register>());
+ __ movsxw(out.AsRegister<Register>(), in.AsRegister<Register>());
} else if (in.IsStackSlot()) {
- __ movsxw(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ __ movsxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
- __ movl(out.As<Register>(), Immediate(static_cast<int16_t>(value)));
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<int16_t>(value)));
}
break;
@@ -1504,18 +1554,42 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimLong:
// Processing a Dex `long-to-int' instruction.
if (in.IsRegisterPair()) {
- __ movl(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ __ movl(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
} else if (in.IsDoubleStackSlot()) {
- __ movl(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ __ movl(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
} else {
DCHECK(in.IsConstant());
DCHECK(in.GetConstant()->IsLongConstant());
int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
- __ movl(out.As<Register>(), Immediate(static_cast<int32_t>(value)));
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<int32_t>(value)));
}
break;
- case Primitive::kPrimFloat:
+ case Primitive::kPrimFloat: {
+ // Processing a Dex `float-to-int' instruction.
+ XmmRegister input = in.AsFpuRegister<XmmRegister>();
+ Register output = out.AsRegister<Register>();
+ XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ Label done, nan;
+
+ __ movl(output, Immediate(kPrimIntMax));
+ // temp = int-to-float(output)
+ __ cvtsi2ss(temp, output);
+ // if input >= temp goto done
+ __ comiss(input, temp);
+ __ j(kAboveEqual, &done);
+ // if input == NaN goto nan
+ __ j(kUnordered, &nan);
+ // output = float-to-int-truncate(input)
+ __ cvttss2si(output, input);
+ __ jmp(&done);
+ __ Bind(&nan);
+ // output = 0
+ __ xorl(output, output);
+ __ Bind(&done);
+ break;
+ }
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1536,7 +1610,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
// Processing a Dex `int-to-long' instruction.
DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
- DCHECK_EQ(in.As<Register>(), EAX);
+ DCHECK_EQ(in.AsRegister<Register>(), EAX);
__ cdq();
break;
@@ -1557,16 +1631,15 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimChar:
// Processing a Dex `Process a Dex `int-to-char'' instruction.
if (in.IsRegister()) {
- __ movzxw(out.As<Register>(), in.As<Register>());
+ __ movzxw(out.AsRegister<Register>(), in.AsRegister<Register>());
} else if (in.IsStackSlot()) {
- __ movzxw(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ __ movzxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
- __ movl(out.As<Register>(), Immediate(static_cast<uint16_t>(value)));
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value)));
}
break;
@@ -1578,15 +1651,48 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimFloat:
switch (input_type) {
- // Processing a Dex `int-to-float' instruction.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- __ cvtsi2ss(out.As<XmmRegister>(), in.As<Register>());
+ // Processing a Dex `int-to-float' instruction.
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>());
break;
- case Primitive::kPrimLong:
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-float' instruction.
+ Register low = in.AsRegisterPairLow<Register>();
+ Register high = in.AsRegisterPairHigh<Register>();
+ XmmRegister result = out.AsFpuRegister<XmmRegister>();
+ XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister constant = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+
+ // Operations use doubles for precision reasons (each 32-bit
+ // half of a long fits in the 53-bit mantissa of a double,
+ // but not in the 24-bit mantissa of a float). This is
+ // especially important for the low bits. The result is
+ // eventually converted to float.
+
+ // low = low - 2^31 (to prevent bit 31 of `low` to be
+ // interpreted as a sign bit)
+ __ subl(low, Immediate(0x80000000));
+ // temp = int-to-double(high)
+ __ cvtsi2sd(temp, high);
+ // temp = temp * 2^32
+ __ LoadLongConstant(constant, k2Pow32EncodingForDouble);
+ __ mulsd(temp, constant);
+ // result = int-to-double(low)
+ __ cvtsi2sd(result, low);
+ // result = result + 2^31 (restore the original value of `low`)
+ __ LoadLongConstant(constant, k2Pow31EncodingForDouble);
+ __ addsd(result, constant);
+ // result = result + temp
+ __ addsd(result, temp);
+ // result = double-to-float(result)
+ __ cvtsd2ss(result, result);
+ break;
+ }
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1600,15 +1706,40 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimDouble:
switch (input_type) {
- // Processing a Dex `int-to-double' instruction.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- __ cvtsi2sd(out.As<XmmRegister>(), in.As<Register>());
+ // Processing a Dex `int-to-double' instruction.
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>());
break;
- case Primitive::kPrimLong:
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-double' instruction.
+ Register low = in.AsRegisterPairLow<Register>();
+ Register high = in.AsRegisterPairHigh<Register>();
+ XmmRegister result = out.AsFpuRegister<XmmRegister>();
+ XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister constant = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+
+ // low = low - 2^31 (to prevent bit 31 of `low` to be
+ // interpreted as a sign bit)
+ __ subl(low, Immediate(0x80000000));
+ // temp = int-to-double(high)
+ __ cvtsi2sd(temp, high);
+ // temp = temp * 2^32
+ __ LoadLongConstant(constant, k2Pow32EncodingForDouble);
+ __ mulsd(temp, constant);
+ // result = int-to-double(low)
+ __ cvtsi2sd(result, low);
+ // result = result + 2^31 (restore the original value of `low`)
+ __ LoadLongConstant(constant, k2Pow31EncodingForDouble);
+ __ addsd(result, constant);
+ // result = result + temp
+ __ addsd(result, temp);
+ break;
+ }
+
case Primitive::kPrimFloat:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1660,11 +1791,12 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ addl(first.As<Register>(), second.As<Register>());
+ __ addl(first.AsRegister<Register>(), second.AsRegister<Register>());
} else if (second.IsConstant()) {
- __ addl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ __ addl(first.AsRegister<Register>(),
+ Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ addl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ __ addl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -1683,18 +1815,18 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
case Primitive::kPrimFloat: {
if (second.IsFpuRegister()) {
- __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else {
- __ addss(first.As<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ __ addss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimDouble: {
if (second.IsFpuRegister()) {
- __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else {
- __ addsd(first.As<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ __ addsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -1736,11 +1868,12 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ subl(first.As<Register>(), second.As<Register>());
+ __ subl(first.AsRegister<Register>(), second.AsRegister<Register>());
} else if (second.IsConstant()) {
- __ subl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ __ subl(first.AsRegister<Register>(),
+ Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ subl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ __ subl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -1758,12 +1891,12 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
}
case Primitive::kPrimFloat: {
- __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -1818,13 +1951,13 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
switch (mul->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ imull(first.As<Register>(), second.As<Register>());
+ __ imull(first.AsRegister<Register>(), second.AsRegister<Register>());
} else if (second.IsConstant()) {
Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
- __ imull(first.As<Register>(), imm);
+ __ imull(first.AsRegister<Register>(), imm);
} else {
DCHECK(second.IsStackSlot());
- __ imull(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ __ imull(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -1836,8 +1969,8 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
Register in1_lo = first.AsRegisterPairLow<Register>();
Address in2_hi(ESP, second.GetHighStackIndex(kX86WordSize));
Address in2_lo(ESP, second.GetStackIndex());
- Register eax = locations->GetTemp(0).As<Register>();
- Register edx = locations->GetTemp(1).As<Register>();
+ Register eax = locations->GetTemp(0).AsRegister<Register>();
+ Register edx = locations->GetTemp(1).AsRegister<Register>();
DCHECK_EQ(EAX, eax);
DCHECK_EQ(EDX, edx);
@@ -1868,12 +2001,12 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
}
case Primitive::kPrimFloat: {
- __ mulss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- __ mulsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -1893,12 +2026,13 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
switch (instruction->GetResultType()) {
case Primitive::kPrimInt: {
- Register second_reg = second.As<Register>();
- DCHECK_EQ(EAX, first.As<Register>());
- DCHECK_EQ(is_div ? EAX : EDX, out.As<Register>());
+ Register second_reg = second.AsRegister<Register>();
+ DCHECK_EQ(EAX, first.AsRegister<Register>());
+ DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>());
SlowPathCodeX86* slow_path =
- new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(out.As<Register>(), is_div);
+ new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(out.AsRegister<Register>(),
+ is_div);
codegen_->AddSlowPath(slow_path);
// 0x80000000/-1 triggers an arithmetic exception!
@@ -1997,13 +2131,13 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
case Primitive::kPrimFloat: {
DCHECK(first.Equals(out));
- __ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
DCHECK(first.Equals(out));
- __ divsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -2097,7 +2231,7 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction)
switch (instruction->GetType()) {
case Primitive::kPrimInt: {
if (value.IsRegister()) {
- __ testl(value.As<Register>(), value.As<Register>());
+ __ testl(value.AsRegister<Register>(), value.AsRegister<Register>());
__ j(kEqual, slow_path->GetEntryLabel());
} else if (value.IsStackSlot()) {
__ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
@@ -2112,7 +2246,7 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction)
}
case Primitive::kPrimLong: {
if (value.IsRegisterPair()) {
- Register temp = locations->GetTemp(0).As<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
__ movl(temp, value.AsRegisterPairLow<Register>());
__ orl(temp, value.AsRegisterPairHigh<Register>());
__ j(kEqual, slow_path->GetEntryLabel());
@@ -2165,9 +2299,9 @@ void InstructionCodeGeneratorX86::HandleShift(HBinaryOperation* op) {
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
- Register first_reg = first.As<Register>();
+ Register first_reg = first.AsRegister<Register>();
if (second.IsRegister()) {
- Register second_reg = second.As<Register>();
+ Register second_reg = second.AsRegister<Register>();
DCHECK_EQ(ECX, second_reg);
if (op->IsShl()) {
__ shll(first_reg, second_reg);
@@ -2189,7 +2323,7 @@ void InstructionCodeGeneratorX86::HandleShift(HBinaryOperation* op) {
break;
}
case Primitive::kPrimLong: {
- Register second_reg = second.As<Register>();
+ Register second_reg = second.AsRegister<Register>();
DCHECK_EQ(ECX, second_reg);
if (op->IsShl()) {
GenerateShlLong(first, second_reg);
@@ -2335,11 +2469,11 @@ void InstructionCodeGeneratorX86::VisitNot(HNot* not_) {
DCHECK(in.Equals(out));
switch (not_->InputAt(0)->GetType()) {
case Primitive::kPrimBoolean:
- __ xorl(out.As<Register>(), Immediate(1));
+ __ xorl(out.AsRegister<Register>(), Immediate(1));
break;
case Primitive::kPrimInt:
- __ notl(out.As<Register>());
+ __ notl(out.AsRegister<Register>());
break;
case Primitive::kPrimLong:
@@ -2355,20 +2489,36 @@ void InstructionCodeGeneratorX86::VisitNot(HNot* not_) {
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ switch (compare->InputAt(0)->GetType()) {
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ // TODO: we set any here but we don't handle constants
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
+ }
}
void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) {
LocationSummary* locations = compare->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+
+ Label less, greater, done;
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
- Label less, greater, done;
- Register output = locations->Out().As<Register>();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
- if (right.IsRegister()) {
+ if (right.IsRegisterPair()) {
__ cmpl(left.AsRegisterPairHigh<Register>(), right.AsRegisterPairHigh<Register>());
} else {
DCHECK(right.IsDoubleStackSlot());
@@ -2383,23 +2533,33 @@ void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) {
DCHECK(right.IsDoubleStackSlot());
__ cmpl(left.AsRegisterPairLow<Register>(), Address(ESP, right.GetStackIndex()));
}
- __ movl(output, Immediate(0));
- __ j(kEqual, &done);
- __ j(kBelow, &less); // Unsigned compare.
-
- __ Bind(&greater);
- __ movl(output, Immediate(1));
- __ jmp(&done);
-
- __ Bind(&less);
- __ movl(output, Immediate(-1));
-
- __ Bind(&done);
+ break;
+ }
+ case Primitive::kPrimFloat: {
+ __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
+ break;
+ }
+ case Primitive::kPrimDouble: {
+ __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
break;
}
default:
- LOG(FATAL) << "Unimplemented compare type " << compare->InputAt(0)->GetType();
+ LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
}
+ __ movl(out, Immediate(0));
+ __ j(kEqual, &done);
+ __ j(kBelow, &less); // kBelow is for CF (unsigned & floats).
+
+ __ Bind(&greater);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+
+ __ Bind(&less);
+ __ movl(out, Immediate(-1));
+
+ __ Bind(&done);
}
void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
@@ -2444,33 +2604,33 @@ void LocationsBuilderX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction)
void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- ByteRegister value = locations->InAt(1).As<ByteRegister>();
+ ByteRegister value = locations->InAt(1).AsRegister<ByteRegister>();
__ movb(Address(obj, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ movw(Address(obj, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ movl(Address(obj, offset), value);
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- Register temp = locations->GetTemp(0).As<Register>();
- Register card = locations->GetTemp(1).As<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
@@ -2484,13 +2644,13 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instr
}
case Primitive::kPrimFloat: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movss(Address(obj, offset), value);
break;
}
case Primitive::kPrimDouble: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movsd(Address(obj, offset), value);
break;
}
@@ -2522,37 +2682,37 @@ void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction)
void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movzxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movsxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movsxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movzxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movl(out, Address(obj, offset));
break;
}
@@ -2565,13 +2725,13 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr
}
case Primitive::kPrimFloat: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movss(out, Address(obj, offset));
break;
}
case Primitive::kPrimDouble: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movsd(out, Address(obj, offset));
break;
}
@@ -2599,7 +2759,7 @@ void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) {
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.As<Register>(), Immediate(0));
+ __ cmpl(obj.AsRegister<Register>(), Immediate(0));
} else if (obj.IsStackSlot()) {
__ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0));
} else {
@@ -2621,54 +2781,54 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movzxb(out, Address(obj, index.As<Register>(), TIMES_1, data_offset));
+ __ movzxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movsxb(out, Address(obj, index.As<Register>(), TIMES_1, data_offset));
+ __ movsxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movsxw(out, Address(obj, index.As<Register>(), TIMES_2, data_offset));
+ __ movsxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
}
break;
}
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movzxw(out, Address(obj, index.As<Register>(), TIMES_2, data_offset));
+ __ movzxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
}
break;
}
@@ -2676,12 +2836,12 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
} else {
- __ movl(out, Address(obj, index.As<Register>(), TIMES_4, data_offset));
+ __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
}
break;
}
@@ -2695,9 +2855,9 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
__ movl(out.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
} else {
__ movl(out.AsRegisterPairLow<Register>(),
- Address(obj, index.As<Register>(), TIMES_8, data_offset));
+ Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
__ movl(out.AsRegisterPairHigh<Register>(),
- Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize));
+ Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize));
}
break;
}
@@ -2757,7 +2917,7 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
@@ -2772,17 +2932,17 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.As<ByteRegister>());
+ __ movb(Address(obj, offset), value.AsRegister<ByteRegister>());
} else {
__ movb(Address(obj, offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movb(Address(obj, index.As<Register>(), TIMES_1, data_offset),
- value.As<ByteRegister>());
+ __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset),
+ value.AsRegister<ByteRegister>());
} else {
- __ movb(Address(obj, index.As<Register>(), TIMES_1, data_offset),
+ __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -2795,17 +2955,17 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.As<Register>());
+ __ movw(Address(obj, offset), value.AsRegister<Register>());
} else {
__ movw(Address(obj, offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movw(Address(obj, index.As<Register>(), TIMES_2, data_offset),
- value.As<Register>());
+ __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset),
+ value.AsRegister<Register>());
} else {
- __ movw(Address(obj, index.As<Register>(), TIMES_2, data_offset),
+ __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -2817,9 +2977,10 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
if (!needs_runtime_call) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.As<Register>());
+ __ movl(Address(obj, offset), value.AsRegister<Register>());
} else {
DCHECK(value.IsConstant()) << value;
__ movl(Address(obj, offset),
@@ -2828,19 +2989,19 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
} else {
DCHECK(index.IsRegister()) << index;
if (value.IsRegister()) {
- __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
- value.As<Register>());
+ __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset),
+ value.AsRegister<Register>());
} else {
DCHECK(value.IsConstant()) << value;
- __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
+ __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
if (needs_write_barrier) {
- Register temp = locations->GetTemp(0).As<Register>();
- Register card = locations->GetTemp(1).As<Register>();
- codegen_->MarkGCCard(temp, card, obj, value.As<Register>());
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
+ codegen_->MarkGCCard(temp, card, obj, value.AsRegister<Register>());
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
@@ -2866,16 +3027,16 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
}
} else {
if (value.IsRegisterPair()) {
- __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset),
+ __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
value.AsRegisterPairLow<Register>());
- __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize),
+ __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
value.AsRegisterPairHigh<Register>());
} else {
DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset),
+ __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
Immediate(Low32Bits(val)));
- __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize),
+ __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
Immediate(High32Bits(val)));
}
}
@@ -2902,8 +3063,8 @@ void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- Register obj = locations->InAt(0).As<Register>();
- Register out = locations->Out().As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movl(out, Address(obj, offset));
}
@@ -2923,8 +3084,8 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- Register index = locations->InAt(0).As<Register>();
- Register length = locations->InAt(1).As<Register>();
+ Register index = locations->InAt(0).AsRegister<Register>();
+ Register length = locations->InAt(1).AsRegister<Register>();
__ cmpl(index, length);
__ j(kAboveEqual, slow_path->GetEntryLabel());
@@ -3001,14 +3162,14 @@ void ParallelMoveResolverX86::EmitMove(size_t index) {
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ movl(destination.As<Register>(), source.As<Register>());
+ __ movl(destination.AsRegister<Register>(), source.AsRegister<Register>());
} else {
DCHECK(destination.IsStackSlot());
- __ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
+ __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegister<Register>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ movl(destination.As<Register>(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.AsRegister<Register>(), Address(ESP, source.GetStackIndex()));
} else {
DCHECK(destination.IsStackSlot());
MoveMemoryToMemory(destination.GetStackIndex(),
@@ -3018,7 +3179,7 @@ void ParallelMoveResolverX86::EmitMove(size_t index) {
HIntConstant* instruction = source.GetConstant()->AsIntConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
if (destination.IsRegister()) {
- __ movl(destination.As<Register>(), imm);
+ __ movl(destination.AsRegister<Register>(), imm);
} else {
__ movl(Address(ESP, destination.GetStackIndex()), imm);
}
@@ -3060,11 +3221,11 @@ void ParallelMoveResolverX86::EmitSwap(size_t index) {
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- __ xchgl(destination.As<Register>(), source.As<Register>());
+ __ xchgl(destination.AsRegister<Register>(), source.AsRegister<Register>());
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange(source.As<Register>(), destination.GetStackIndex());
+ Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange(destination.As<Register>(), source.GetStackIndex());
+ Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange(destination.GetStackIndex(), source.GetStackIndex());
} else {
@@ -3090,7 +3251,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
- Register out = cls->GetLocations()->Out().As<Register>();
+ Register out = cls->GetLocations()->Out().AsRegister<Register>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -3129,7 +3290,8 @@ void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
- GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+ GenerateClassInitializationCheck(slow_path,
+ check->GetLocations()->InAt(0).AsRegister<Register>());
}
void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
@@ -3150,37 +3312,37 @@ void LocationsBuilderX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
void InstructionCodeGeneratorX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register cls = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movzxb(out, Address(cls, offset));
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movsxb(out, Address(cls, offset));
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movsxw(out, Address(cls, offset));
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movzxw(out, Address(cls, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
__ movl(out, Address(cls, offset));
break;
}
@@ -3193,13 +3355,13 @@ void InstructionCodeGeneratorX86::VisitStaticFieldGet(HStaticFieldGet* instructi
}
case Primitive::kPrimFloat: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movss(out, Address(cls, offset));
break;
}
case Primitive::kPrimDouble: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movsd(out, Address(cls, offset));
break;
}
@@ -3237,33 +3399,33 @@ void LocationsBuilderX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register cls = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(0).AsRegister<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- ByteRegister value = locations->InAt(1).As<ByteRegister>();
+ ByteRegister value = locations->InAt(1).AsRegister<ByteRegister>();
__ movb(Address(cls, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ movw(Address(cls, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).As<Register>();
+ Register value = locations->InAt(1).AsRegister<Register>();
__ movl(Address(cls, offset), value);
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- Register temp = locations->GetTemp(0).As<Register>();
- Register card = locations->GetTemp(1).As<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
codegen_->MarkGCCard(temp, card, cls, value);
}
break;
@@ -3277,13 +3439,13 @@ void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instructi
}
case Primitive::kPrimFloat: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movss(Address(cls, offset), value);
break;
}
case Primitive::kPrimDouble: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movsd(Address(cls, offset), value);
break;
}
@@ -3304,7 +3466,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
- Register out = load->GetLocations()->Out().As<Register>();
+ Register out = load->GetLocations()->Out().AsRegister<Register>();
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
@@ -3322,7 +3484,7 @@ void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) {
Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
- __ fs()->movl(load->GetLocations()->Out().As<Register>(), address);
+ __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), address);
__ fs()->movl(address, Immediate(0));
}
@@ -3350,9 +3512,9 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
Location cls = locations->InAt(1);
- Register out = locations->Out().As<Register>();
+ Register out = locations->Out().AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Label done, zero;
SlowPathCodeX86* slow_path = nullptr;
@@ -3364,7 +3526,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ movl(out, Address(obj, class_offset));
// Compare the class of `obj` with `cls`.
if (cls.IsRegister()) {
- __ cmpl(out, cls.As<Register>());
+ __ cmpl(out, cls.AsRegister<Register>());
} else {
DCHECK(cls.IsStackSlot()) << cls;
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
@@ -3403,9 +3565,9 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).As<Register>();
+ Register obj = locations->InAt(0).AsRegister<Register>();
Location cls = locations->InAt(1);
- Register temp = locations->GetTemp(0).As<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
@@ -3418,7 +3580,7 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
// Compare the class of `obj` with `cls`.
if (cls.IsRegister()) {
- __ cmpl(temp, cls.As<Register>());
+ __ cmpl(temp, cls.AsRegister<Register>());
} else {
DCHECK(cls.IsStackSlot()) << cls;
__ cmpl(temp, Address(ESP, cls.GetStackIndex()));
@@ -3477,30 +3639,33 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr
if (instruction->GetResultType() == Primitive::kPrimInt) {
if (second.IsRegister()) {
if (instruction->IsAnd()) {
- __ andl(first.As<Register>(), second.As<Register>());
+ __ andl(first.AsRegister<Register>(), second.AsRegister<Register>());
} else if (instruction->IsOr()) {
- __ orl(first.As<Register>(), second.As<Register>());
+ __ orl(first.AsRegister<Register>(), second.AsRegister<Register>());
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.As<Register>(), second.As<Register>());
+ __ xorl(first.AsRegister<Register>(), second.AsRegister<Register>());
}
} else if (second.IsConstant()) {
if (instruction->IsAnd()) {
- __ andl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ __ andl(first.AsRegister<Register>(),
+ Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else if (instruction->IsOr()) {
- __ orl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ __ orl(first.AsRegister<Register>(),
+ Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ __ xorl(first.AsRegister<Register>(),
+ Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (instruction->IsAnd()) {
- __ andl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ __ andl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
} else if (instruction->IsOr()) {
- __ orl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ __ orl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ __ xorl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
}
}
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 34fa1e7a3b..47fd304969 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -539,37 +539,37 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ movq(destination.As<CpuRegister>(), source.As<CpuRegister>());
+ __ movq(destination.AsRegister<CpuRegister>(), source.AsRegister<CpuRegister>());
} else if (source.IsFpuRegister()) {
- __ movd(destination.As<CpuRegister>(), source.As<XmmRegister>());
+ __ movd(destination.AsRegister<CpuRegister>(), source.AsFpuRegister<XmmRegister>());
} else if (source.IsStackSlot()) {
- __ movl(destination.As<CpuRegister>(),
+ __ movl(destination.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movq(destination.As<CpuRegister>(),
+ __ movq(destination.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ movd(destination.As<XmmRegister>(), source.As<CpuRegister>());
+ __ movd(destination.AsFpuRegister<XmmRegister>(), source.AsRegister<CpuRegister>());
} else if (source.IsFpuRegister()) {
- __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
+ __ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
} else if (source.IsStackSlot()) {
- __ movss(destination.As<XmmRegister>(),
+ __ movss(destination.AsFpuRegister<XmmRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movsd(destination.As<XmmRegister>(),
+ __ movsd(destination.AsFpuRegister<XmmRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
}
} else if (destination.IsStackSlot()) {
if (source.IsRegister()) {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<CpuRegister>());
+ source.AsRegister<CpuRegister>());
} else if (source.IsFpuRegister()) {
__ movss(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<XmmRegister>());
+ source.AsFpuRegister<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
__ movl(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -579,10 +579,10 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
DCHECK(destination.IsDoubleStackSlot());
if (source.IsRegister()) {
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<CpuRegister>());
+ source.AsRegister<CpuRegister>());
} else if (source.IsFpuRegister()) {
__ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<XmmRegister>());
+ source.AsFpuRegister<XmmRegister>());
} else {
DCHECK(source.IsDoubleStackSlot());
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -604,7 +604,7 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
if (const_to_move->IsIntConstant()) {
Immediate imm(const_to_move->AsIntConstant()->GetValue());
if (location.IsRegister()) {
- __ movl(location.As<CpuRegister>(), imm);
+ __ movl(location.AsRegister<CpuRegister>(), imm);
} else if (location.IsStackSlot()) {
__ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
} else {
@@ -614,7 +614,7 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
} else if (const_to_move->IsLongConstant()) {
int64_t value = const_to_move->AsLongConstant()->GetValue();
if (location.IsRegister()) {
- __ movq(location.As<CpuRegister>(), Immediate(value));
+ __ movq(location.AsRegister<CpuRegister>(), Immediate(value));
} else if (location.IsDoubleStackSlot()) {
__ movq(CpuRegister(TMP), Immediate(value));
__ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
@@ -637,7 +637,8 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- Move(location, Location::DoubleStackSlot(GetStackSlot(instruction->AsLoadLocal()->GetLocal())));
+ Move(location,
+ Location::DoubleStackSlot(GetStackSlot(instruction->AsLoadLocal()->GetLocal())));
break;
default:
@@ -741,7 +742,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
- __ cmpl(lhs.As<CpuRegister>(), Immediate(0));
+ __ cmpl(lhs.AsRegister<CpuRegister>(), Immediate(0));
} else {
__ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()),
Immediate(0));
@@ -755,12 +756,12 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
if (rhs.IsRegister()) {
- __ cmpl(lhs.As<CpuRegister>(), rhs.As<CpuRegister>());
+ __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
} else if (rhs.IsConstant()) {
- __ cmpl(lhs.As<CpuRegister>(),
+ __ cmpl(lhs.AsRegister<CpuRegister>(),
Immediate(rhs.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ cmpl(lhs.As<CpuRegister>(),
+ __ cmpl(lhs.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
__ j(X86_64Condition(cond->AsCondition()->GetCondition()),
@@ -831,17 +832,17 @@ void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
- CpuRegister reg = locations->Out().As<CpuRegister>();
+ CpuRegister reg = locations->Out().AsRegister<CpuRegister>();
// Clear register: setcc only sets the low byte.
__ xorq(reg, reg);
if (locations->InAt(1).IsRegister()) {
- __ cmpl(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
+ __ cmpl(locations->InAt(0).AsRegister<CpuRegister>(),
+ locations->InAt(1).AsRegister<CpuRegister>());
} else if (locations->InAt(1).IsConstant()) {
- __ cmpl(locations->InAt(0).As<CpuRegister>(),
+ __ cmpl(locations->InAt(0).AsRegister<CpuRegister>(),
Immediate(locations->InAt(1).GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ cmpl(locations->InAt(0).As<CpuRegister>(),
+ __ cmpl(locations->InAt(0).AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
}
__ setcc(X86_64Condition(comp->GetCondition()), reg);
@@ -899,33 +900,61 @@ void InstructionCodeGeneratorX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ switch (compare->InputAt(0)->GetType()) {
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
+ }
}
void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) {
- Label greater, done;
LocationSummary* locations = compare->GetLocations();
- switch (compare->InputAt(0)->GetType()) {
- case Primitive::kPrimLong:
- __ cmpq(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+
+ Label less, greater, done;
+ Primitive::Type type = compare->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimLong: {
+ __ cmpq(left.AsRegister<CpuRegister>(), right.AsRegister<CpuRegister>());
+ break;
+ }
+ case Primitive::kPrimFloat: {
+ __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
+ break;
+ }
+ case Primitive::kPrimDouble: {
+ __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
break;
+ }
default:
- LOG(FATAL) << "Unimplemented compare type " << compare->InputAt(0)->GetType();
+ LOG(FATAL) << "Unexpected compare type " << type;
}
-
- CpuRegister output = locations->Out().As<CpuRegister>();
- __ movl(output, Immediate(0));
+ __ movl(out, Immediate(0));
__ j(kEqual, &done);
- __ j(kGreater, &greater);
+ __ j(type == Primitive::kPrimLong ? kLess : kBelow, &less); // ucomis{s,d} sets CF (kBelow)
- __ movl(output, Immediate(-1));
+ __ Bind(&greater);
+ __ movl(out, Immediate(1));
__ jmp(&done);
- __ Bind(&greater);
- __ movl(output, Immediate(1));
+ __ Bind(&less);
+ __ movl(out, Immediate(-1));
__ Bind(&done);
}
@@ -1019,12 +1048,12 @@ void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) {
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- DCHECK_EQ(ret->GetLocations()->InAt(0).As<CpuRegister>().AsRegister(), RAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).As<XmmRegister>().AsFloatRegister(),
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
XMM0);
break;
@@ -1097,7 +1126,7 @@ void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
}
void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
// TODO: Implement all kinds of calls:
// 1) boot -> boot
// 2) app -> boot
@@ -1156,7 +1185,7 @@ void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -1167,7 +1196,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
__ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
__ movl(temp, Address(temp, class_offset));
} else {
- __ movl(temp, Address(receiver.As<CpuRegister>(), class_offset));
+ __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -1187,7 +1216,7 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
(invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -1195,7 +1224,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
// Set the hidden argument.
- __ movq(invoke->GetLocations()->GetTemp(1).As<CpuRegister>(),
+ __ movq(invoke->GetLocations()->GetTemp(1).AsRegister<CpuRegister>(),
Immediate(invoke->GetDexMethodIndex()));
// temp = object->GetClass();
@@ -1203,7 +1232,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
__ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
__ movl(temp, Address(temp, class_offset));
} else {
- __ movl(temp, Address(receiver.As<CpuRegister>(), class_offset));
+ __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
}
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -1228,9 +1257,9 @@ void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
- // Output overlaps as we need a fresh (zero-initialized)
- // register to perform subtraction from zero.
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
break;
default:
@@ -1246,49 +1275,40 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) {
case Primitive::kPrimInt:
DCHECK(in.IsRegister());
DCHECK(in.Equals(out));
- __ negl(out.As<CpuRegister>());
+ __ negl(out.AsRegister<CpuRegister>());
break;
case Primitive::kPrimLong:
DCHECK(in.IsRegister());
DCHECK(in.Equals(out));
- __ negq(out.As<CpuRegister>());
+ __ negq(out.AsRegister<CpuRegister>());
break;
- case Primitive::kPrimFloat:
- DCHECK(in.IsFpuRegister());
- DCHECK(out.IsFpuRegister());
- DCHECK(!in.Equals(out));
- // TODO: Instead of computing negation as a subtraction from
- // zero, implement it with an exclusive or with value 0x80000000
- // (mask for bit 31, representing the sign of a single-precision
- // floating-point number), fetched from a constant pool:
- //
- // xorps out, [RIP:...] // value at RIP is 0x80 00 00 00
-
- // out = 0
- __ xorps(out.As<XmmRegister>(), out.As<XmmRegister>());
- // out = out - in
- __ subss(out.As<XmmRegister>(), in.As<XmmRegister>());
+ case Primitive::kPrimFloat: {
+ DCHECK(in.Equals(out));
+ CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>();
+ XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ // Implement float negation with an exclusive or with value
+ // 0x80000000 (mask for bit 31, representing the sign of a
+ // single-precision floating-point number).
+ __ movq(constant, Immediate(INT64_C(0x80000000)));
+ __ movd(mask, constant);
+ __ xorps(out.AsFpuRegister<XmmRegister>(), mask);
break;
+ }
- case Primitive::kPrimDouble:
- DCHECK(in.IsFpuRegister());
- DCHECK(out.IsFpuRegister());
- DCHECK(!in.Equals(out));
- // TODO: Instead of computing negation as a subtraction from
- // zero, implement it with an exclusive or with value
+ case Primitive::kPrimDouble: {
+ DCHECK(in.Equals(out));
+ CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>();
+ XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ // Implement double negation with an exclusive or with value
// 0x8000000000000000 (mask for bit 63, representing the sign of
- // a double-precision floating-point number), fetched from a
- // constant pool:
- //
- // xorpd out, [RIP:...] // value at RIP is 0x80 00 00 00 00 00 00 00
-
- // out = 0
- __ xorpd(out.As<XmmRegister>(), out.As<XmmRegister>());
- // out = out - in
- __ subsd(out.As<XmmRegister>(), in.As<XmmRegister>());
+ // a double-precision floating-point number).
+ __ movq(constant, Immediate(INT64_C(0x8000000000000000)));
+ __ movd(mask, constant);
+ __ xorpd(out.AsFpuRegister<XmmRegister>(), mask);
break;
+ }
default:
LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
@@ -1300,6 +1320,7 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
@@ -1342,6 +1363,12 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-int' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1383,7 +1410,6 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimChar:
// Processing a Dex `int-to-char' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -1407,6 +1433,11 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1430,6 +1461,11 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
case Primitive::kPrimFloat:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1453,6 +1489,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
Location in = locations->InAt(0);
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
@@ -1461,13 +1498,13 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimChar:
// Processing a Dex `int-to-byte' instruction.
if (in.IsRegister()) {
- __ movsxb(out.As<CpuRegister>(), in.As<CpuRegister>());
+ __ movsxb(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
} else if (in.IsStackSlot()) {
- __ movsxb(out.As<CpuRegister>(),
+ __ movsxb(out.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
- __ movl(out.As<CpuRegister>(),
+ __ movl(out.AsRegister<CpuRegister>(),
Immediate(static_cast<int8_t>(in.GetConstant()->AsIntConstant()->GetValue())));
}
break;
@@ -1485,13 +1522,13 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimChar:
// Processing a Dex `int-to-short' instruction.
if (in.IsRegister()) {
- __ movsxw(out.As<CpuRegister>(), in.As<CpuRegister>());
+ __ movsxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
} else if (in.IsStackSlot()) {
- __ movsxw(out.As<CpuRegister>(),
+ __ movsxw(out.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
- __ movl(out.As<CpuRegister>(),
+ __ movl(out.AsRegister<CpuRegister>(),
Immediate(static_cast<int16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
}
break;
@@ -1507,19 +1544,43 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimLong:
// Processing a Dex `long-to-int' instruction.
if (in.IsRegister()) {
- __ movl(out.As<CpuRegister>(), in.As<CpuRegister>());
+ __ movl(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
} else if (in.IsDoubleStackSlot()) {
- __ movl(out.As<CpuRegister>(),
+ __ movl(out.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), in.GetStackIndex()));
} else {
DCHECK(in.IsConstant());
DCHECK(in.GetConstant()->IsLongConstant());
int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
- __ movl(out.As<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
+ __ movl(out.AsRegister<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
}
break;
- case Primitive::kPrimFloat:
+ case Primitive::kPrimFloat: {
+ // Processing a Dex `float-to-int' instruction.
+ XmmRegister input = in.AsFpuRegister<XmmRegister>();
+ CpuRegister output = out.AsRegister<CpuRegister>();
+ XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ Label done, nan;
+
+ __ movl(output, Immediate(kPrimIntMax));
+ // temp = int-to-float(output)
+ __ cvtsi2ss(temp, output);
+ // if input >= temp goto done
+ __ comiss(input, temp);
+ __ j(kAboveEqual, &done);
+ // if input == NaN goto nan
+ __ j(kUnordered, &nan);
+ // output = float-to-int-truncate(input)
+ __ cvttss2si(output, input);
+ __ jmp(&done);
+ __ Bind(&nan);
+ // output = 0
+ __ xorl(output, output);
+ __ Bind(&done);
+ break;
+ }
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1540,7 +1601,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimChar:
// Processing a Dex `int-to-long' instruction.
DCHECK(in.IsRegister());
- __ movsxd(out.As<CpuRegister>(), in.As<CpuRegister>());
+ __ movsxd(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
break;
case Primitive::kPrimFloat:
@@ -1560,16 +1621,15 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimChar:
// Processing a Dex `int-to-char' instruction.
if (in.IsRegister()) {
- __ movzxw(out.As<CpuRegister>(), in.As<CpuRegister>());
+ __ movzxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
} else if (in.IsStackSlot()) {
- __ movzxw(out.As<CpuRegister>(),
+ __ movzxw(out.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
- __ movl(out.As<CpuRegister>(),
+ __ movl(out.AsRegister<CpuRegister>(),
Immediate(static_cast<uint16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
}
break;
@@ -1582,15 +1642,19 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimFloat:
switch (input_type) {
- // Processing a Dex `int-to-float' instruction.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- __ cvtsi2ss(out.As<XmmRegister>(), in.As<CpuRegister>());
+ // Processing a Dex `int-to-float' instruction.
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-float' instruction.
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ break;
+
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1604,15 +1668,19 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimDouble:
switch (input_type) {
- // Processing a Dex `int-to-double' instruction.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- __ cvtsi2sd(out.As<XmmRegister>(), in.As<CpuRegister>());
+ // Processing a Dex `int-to-double' instruction.
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
break;
case Primitive::kPrimLong:
+ // Processing a Dex `long-to-double' instruction.
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ break;
+
case Primitive::kPrimFloat:
LOG(FATAL) << "Type conversion from " << input_type
<< " to " << result_type << " not yet implemented";
@@ -1670,28 +1738,28 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) {
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ addl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ addl(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else if (second.IsConstant()) {
Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
- __ addl(first.As<CpuRegister>(), imm);
+ __ addl(first.AsRegister<CpuRegister>(), imm);
} else {
- __ addl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ addl(first.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ addq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ addq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
break;
}
case Primitive::kPrimFloat: {
- __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -1736,27 +1804,27 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ subl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ subl(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else if (second.IsConstant()) {
Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
- __ subl(first.As<CpuRegister>(), imm);
+ __ subl(first.AsRegister<CpuRegister>(), imm);
} else {
- __ subl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ subl(first.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ subq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ subq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
break;
}
case Primitive::kPrimFloat: {
- __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -1802,28 +1870,29 @@ void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) {
switch (mul->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ imull(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ imull(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else if (second.IsConstant()) {
Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
- __ imull(first.As<CpuRegister>(), imm);
+ __ imull(first.AsRegister<CpuRegister>(), imm);
} else {
DCHECK(second.IsStackSlot());
- __ imull(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ imull(first.AsRegister<CpuRegister>(),
+ Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ imulq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ imulq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
break;
}
case Primitive::kPrimFloat: {
- __ mulss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- __ mulsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -1840,10 +1909,10 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
bool is_div = instruction->IsDiv();
LocationSummary* locations = instruction->GetLocations();
- CpuRegister out_reg = locations->Out().As<CpuRegister>();
- CpuRegister second_reg = locations->InAt(1).As<CpuRegister>();
+ CpuRegister out_reg = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister second_reg = locations->InAt(1).AsRegister<CpuRegister>();
- DCHECK_EQ(RAX, locations->InAt(0).As<CpuRegister>().AsRegister());
+ DCHECK_EQ(RAX, locations->InAt(0).AsRegister<CpuRegister>().AsRegister());
DCHECK_EQ(is_div ? RAX : RDX, out_reg.AsRegister());
SlowPathCodeX86_64* slow_path =
@@ -1915,12 +1984,12 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) {
}
case Primitive::kPrimFloat: {
- __ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- __ divsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -1993,7 +2062,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
switch (instruction->GetType()) {
case Primitive::kPrimInt: {
if (value.IsRegister()) {
- __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ testl(value.AsRegister<CpuRegister>(), value.AsRegister<CpuRegister>());
__ j(kEqual, slow_path->GetEntryLabel());
} else if (value.IsStackSlot()) {
__ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
@@ -2008,7 +2077,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
}
case Primitive::kPrimLong: {
if (value.IsRegister()) {
- __ testq(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ testq(value.AsRegister<CpuRegister>(), value.AsRegister<CpuRegister>());
__ j(kEqual, slow_path->GetEntryLabel());
} else if (value.IsDoubleStackSlot()) {
__ cmpq(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
@@ -2050,13 +2119,13 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) {
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations = op->GetLocations();
- CpuRegister first_reg = locations->InAt(0).As<CpuRegister>();
+ CpuRegister first_reg = locations->InAt(0).AsRegister<CpuRegister>();
Location second = locations->InAt(1);
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- CpuRegister second_reg = second.As<CpuRegister>();
+ CpuRegister second_reg = second.AsRegister<CpuRegister>();
if (op->IsShl()) {
__ shll(first_reg, second_reg);
} else if (op->IsShr()) {
@@ -2078,7 +2147,7 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) {
}
case Primitive::kPrimLong: {
if (second.IsRegister()) {
- CpuRegister second_reg = second.As<CpuRegister>();
+ CpuRegister second_reg = second.AsRegister<CpuRegister>();
if (op->IsShl()) {
__ shlq(first_reg, second_reg);
} else if (op->IsShr()) {
@@ -2196,20 +2265,20 @@ void LocationsBuilderX86_64::VisitNot(HNot* not_) {
void InstructionCodeGeneratorX86_64::VisitNot(HNot* not_) {
LocationSummary* locations = not_->GetLocations();
- DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
- locations->Out().As<CpuRegister>().AsRegister());
+ DCHECK_EQ(locations->InAt(0).AsRegister<CpuRegister>().AsRegister(),
+ locations->Out().AsRegister<CpuRegister>().AsRegister());
Location out = locations->Out();
switch (not_->InputAt(0)->GetType()) {
case Primitive::kPrimBoolean:
- __ xorq(out.As<CpuRegister>(), Immediate(1));
+ __ xorq(out.AsRegister<CpuRegister>(), Immediate(1));
break;
case Primitive::kPrimInt:
- __ notl(out.As<CpuRegister>());
+ __ notl(out.AsRegister<CpuRegister>());
break;
case Primitive::kPrimLong:
- __ notq(out.As<CpuRegister>());
+ __ notq(out.AsRegister<CpuRegister>());
break;
default:
@@ -2248,51 +2317,51 @@ void LocationsBuilderX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instructio
void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movb(Address(obj, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movw(Address(obj, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movl(Address(obj, offset), value);
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
- CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
- CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
}
case Primitive::kPrimLong: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movq(Address(obj, offset), value);
break;
}
case Primitive::kPrimFloat: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movss(Address(obj, offset), value);
break;
}
case Primitive::kPrimDouble: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movsd(Address(obj, offset), value);
break;
}
@@ -2312,55 +2381,55 @@ void LocationsBuilderX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instructio
void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movzxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimByte: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movsxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimShort: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movsxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimChar: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movzxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movl(out, Address(obj, offset));
break;
}
case Primitive::kPrimLong: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movq(out, Address(obj, offset));
break;
}
case Primitive::kPrimFloat: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movss(out, Address(obj, offset));
break;
}
case Primitive::kPrimDouble: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movsd(out, Address(obj, offset));
break;
}
@@ -2388,7 +2457,7 @@ void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) {
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.As<CpuRegister>(), Immediate(0));
+ __ cmpl(obj.AsRegister<CpuRegister>(), Immediate(0));
} else if (obj.IsStackSlot()) {
__ cmpl(Address(CpuRegister(RSP), obj.GetStackIndex()), Immediate(0));
} else {
@@ -2411,54 +2480,54 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movzxb(out, Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset));
+ __ movzxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movsxb(out, Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset));
+ __ movsxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movsxw(out, Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset));
+ __ movsxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
}
break;
}
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movzxw(out, Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset));
+ __ movzxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
}
break;
}
@@ -2467,48 +2536,48 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
} else {
- __ movl(out, Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset));
+ __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
}
break;
}
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movq(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
} else {
- __ movq(out, Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset));
+ __ movq(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
}
break;
}
case Primitive::kPrimFloat: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movss(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
} else {
- __ movss(out, Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset));
+ __ movss(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
}
break;
}
case Primitive::kPrimDouble: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movsd(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
} else {
- __ movsd(out, Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset));
+ __ movsd(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
}
break;
}
@@ -2556,7 +2625,7 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
@@ -2571,16 +2640,17 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.As<CpuRegister>());
+ __ movb(Address(obj, offset), value.AsRegister<CpuRegister>());
} else {
- __ movb(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ __ movb(Address(obj, offset),
+ Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movb(Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset),
- value.As<CpuRegister>());
+ __ movb(Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset),
+ value.AsRegister<CpuRegister>());
} else {
- __ movb(Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset),
+ __ movb(Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -2593,19 +2663,20 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.As<CpuRegister>());
+ __ movw(Address(obj, offset), value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
- __ movw(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ __ movw(Address(obj, offset),
+ Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
DCHECK(index.IsRegister()) << index;
if (value.IsRegister()) {
- __ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
- value.As<CpuRegister>());
+ __ movw(Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset),
+ value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
- __ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
+ __ movw(Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -2620,7 +2691,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.As<CpuRegister>());
+ __ movl(Address(obj, offset), value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
__ movl(Address(obj, offset),
@@ -2629,24 +2700,25 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
} else {
DCHECK(index.IsRegister()) << index;
if (value.IsRegister()) {
- __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
- value.As<CpuRegister>());
+ __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
+ value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
- __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
+ __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
- CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
- CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
- codegen_->MarkGCCard(temp, card, obj, value.As<CpuRegister>());
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
+ codegen_->MarkGCCard(temp, card, obj, value.AsRegister<CpuRegister>());
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject), true));
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject),
+ true));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
@@ -2658,11 +2730,11 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
DCHECK(value.IsRegister());
- __ movq(Address(obj, offset), value.As<CpuRegister>());
+ __ movq(Address(obj, offset), value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsRegister());
- __ movq(Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset),
- value.As<CpuRegister>());
+ __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
+ value.AsRegister<CpuRegister>());
}
break;
}
@@ -2672,11 +2744,11 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
DCHECK(value.IsFpuRegister());
- __ movss(Address(obj, offset), value.As<XmmRegister>());
+ __ movss(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
} else {
DCHECK(value.IsFpuRegister());
- __ movss(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
- value.As<XmmRegister>());
+ __ movss(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
+ value.AsFpuRegister<XmmRegister>());
}
break;
}
@@ -2686,11 +2758,11 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
DCHECK(value.IsFpuRegister());
- __ movsd(Address(obj, offset), value.As<XmmRegister>());
+ __ movsd(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
} else {
DCHECK(value.IsFpuRegister());
- __ movsd(Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset),
- value.As<XmmRegister>());
+ __ movsd(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
+ value.AsFpuRegister<XmmRegister>());
}
break;
}
@@ -2711,8 +2783,8 @@ void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- CpuRegister obj = locations->InAt(0).As<CpuRegister>();
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movl(out, Address(obj, offset));
}
@@ -2732,8 +2804,8 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction)
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- CpuRegister index = locations->InAt(0).As<CpuRegister>();
- CpuRegister length = locations->InAt(1).As<CpuRegister>();
+ CpuRegister index = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister length = locations->InAt(1).AsRegister<CpuRegister>();
__ cmpl(index, length);
__ j(kAboveEqual, slow_path->GetEntryLabel());
@@ -2817,21 +2889,21 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) {
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ movq(destination.As<CpuRegister>(), source.As<CpuRegister>());
+ __ movq(destination.AsRegister<CpuRegister>(), source.AsRegister<CpuRegister>());
} else if (destination.IsStackSlot()) {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<CpuRegister>());
+ source.AsRegister<CpuRegister>());
} else {
DCHECK(destination.IsDoubleStackSlot());
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<CpuRegister>());
+ source.AsRegister<CpuRegister>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ movl(destination.As<CpuRegister>(),
+ __ movl(destination.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else if (destination.IsFpuRegister()) {
- __ movss(destination.As<XmmRegister>(),
+ __ movss(destination.AsFpuRegister<XmmRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(destination.IsStackSlot());
@@ -2840,10 +2912,11 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) {
}
} else if (source.IsDoubleStackSlot()) {
if (destination.IsRegister()) {
- __ movq(destination.As<CpuRegister>(),
+ __ movq(destination.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else if (destination.IsFpuRegister()) {
- __ movsd(destination.As<XmmRegister>(), Address(CpuRegister(RSP), source.GetStackIndex()));
+ __ movsd(destination.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -2854,7 +2927,7 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) {
if (constant->IsIntConstant()) {
Immediate imm(constant->AsIntConstant()->GetValue());
if (destination.IsRegister()) {
- __ movl(destination.As<CpuRegister>(), imm);
+ __ movl(destination.AsRegister<CpuRegister>(), imm);
} else {
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
@@ -2862,7 +2935,7 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) {
} else if (constant->IsLongConstant()) {
int64_t value = constant->AsLongConstant()->GetValue();
if (destination.IsRegister()) {
- __ movq(destination.As<CpuRegister>(), Immediate(value));
+ __ movq(destination.AsRegister<CpuRegister>(), Immediate(value));
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movq(CpuRegister(TMP), Immediate(value));
@@ -2872,7 +2945,7 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) {
Immediate imm(bit_cast<float, int32_t>(constant->AsFloatConstant()->GetValue()));
if (destination.IsFpuRegister()) {
__ movl(CpuRegister(TMP), imm);
- __ movd(destination.As<XmmRegister>(), CpuRegister(TMP));
+ __ movd(destination.AsFpuRegister<XmmRegister>(), CpuRegister(TMP));
} else {
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
@@ -2882,7 +2955,7 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) {
Immediate imm(bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue()));
if (destination.IsFpuRegister()) {
__ movq(CpuRegister(TMP), imm);
- __ movd(destination.As<XmmRegister>(), CpuRegister(TMP));
+ __ movd(destination.AsFpuRegister<XmmRegister>(), CpuRegister(TMP));
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movq(CpuRegister(TMP), imm);
@@ -2891,14 +2964,14 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) {
}
} else if (source.IsFpuRegister()) {
if (destination.IsFpuRegister()) {
- __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
+ __ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
} else if (destination.IsStackSlot()) {
__ movss(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<XmmRegister>());
+ source.AsFpuRegister<XmmRegister>());
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.As<XmmRegister>());
+ source.AsFpuRegister<XmmRegister>());
}
}
}
@@ -2959,31 +3032,31 @@ void ParallelMoveResolverX86_64::EmitSwap(size_t index) {
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- __ xchgq(destination.As<CpuRegister>(), source.As<CpuRegister>());
+ __ xchgq(destination.AsRegister<CpuRegister>(), source.AsRegister<CpuRegister>());
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange32(source.As<CpuRegister>(), destination.GetStackIndex());
+ Exchange32(source.AsRegister<CpuRegister>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange32(destination.As<CpuRegister>(), source.GetStackIndex());
+ Exchange32(destination.AsRegister<CpuRegister>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange32(destination.GetStackIndex(), source.GetStackIndex());
} else if (source.IsRegister() && destination.IsDoubleStackSlot()) {
- Exchange64(source.As<CpuRegister>(), destination.GetStackIndex());
+ Exchange64(source.AsRegister<CpuRegister>(), destination.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsRegister()) {
- Exchange64(destination.As<CpuRegister>(), source.GetStackIndex());
+ Exchange64(destination.AsRegister<CpuRegister>(), source.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
Exchange64(destination.GetStackIndex(), source.GetStackIndex());
} else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
- __ movd(CpuRegister(TMP), source.As<XmmRegister>());
- __ movaps(source.As<XmmRegister>(), destination.As<XmmRegister>());
- __ movd(destination.As<XmmRegister>(), CpuRegister(TMP));
+ __ movd(CpuRegister(TMP), source.AsFpuRegister<XmmRegister>());
+ __ movaps(source.AsFpuRegister<XmmRegister>(), destination.AsFpuRegister<XmmRegister>());
+ __ movd(destination.AsFpuRegister<XmmRegister>(), CpuRegister(TMP));
} else if (source.IsFpuRegister() && destination.IsStackSlot()) {
- Exchange32(source.As<XmmRegister>(), destination.GetStackIndex());
+ Exchange32(source.AsFpuRegister<XmmRegister>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsFpuRegister()) {
- Exchange32(destination.As<XmmRegister>(), source.GetStackIndex());
+ Exchange32(destination.AsFpuRegister<XmmRegister>(), source.GetStackIndex());
} else if (source.IsFpuRegister() && destination.IsDoubleStackSlot()) {
- Exchange64(source.As<XmmRegister>(), destination.GetStackIndex());
+ Exchange64(source.AsFpuRegister<XmmRegister>(), destination.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsFpuRegister()) {
- Exchange64(destination.As<XmmRegister>(), source.GetStackIndex());
+ Exchange64(destination.AsFpuRegister<XmmRegister>(), source.GetStackIndex());
} else {
LOG(FATAL) << "Unimplemented swap between " << source << " and " << destination;
}
@@ -3018,7 +3091,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
- CpuRegister out = cls->GetLocations()->Out().As<CpuRegister>();
+ CpuRegister out = cls->GetLocations()->Out().AsRegister<CpuRegister>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -3056,7 +3129,8 @@ void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
- GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<CpuRegister>());
+ GenerateClassInitializationCheck(slow_path,
+ check->GetLocations()->InAt(0).AsRegister<CpuRegister>());
}
void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
@@ -3068,55 +3142,55 @@ void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
void InstructionCodeGeneratorX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister cls = locations->InAt(0).As<CpuRegister>();
+ CpuRegister cls = locations->InAt(0).AsRegister<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movzxb(out, Address(cls, offset));
break;
}
case Primitive::kPrimByte: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movsxb(out, Address(cls, offset));
break;
}
case Primitive::kPrimShort: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movsxw(out, Address(cls, offset));
break;
}
case Primitive::kPrimChar: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movzxw(out, Address(cls, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movl(out, Address(cls, offset));
break;
}
case Primitive::kPrimLong: {
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movq(out, Address(cls, offset));
break;
}
case Primitive::kPrimFloat: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movss(out, Address(cls, offset));
break;
}
case Primitive::kPrimDouble: {
- XmmRegister out = locations->Out().As<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
__ movsd(out, Address(cls, offset));
break;
}
@@ -3144,51 +3218,51 @@ void LocationsBuilderX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister cls = locations->InAt(0).As<CpuRegister>();
+ CpuRegister cls = locations->InAt(0).AsRegister<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movb(Address(cls, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movw(Address(cls, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movl(Address(cls, offset), value);
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
- CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
- CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
codegen_->MarkGCCard(temp, card, cls, value);
}
break;
}
case Primitive::kPrimLong: {
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
__ movq(Address(cls, offset), value);
break;
}
case Primitive::kPrimFloat: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movss(Address(cls, offset), value);
break;
}
case Primitive::kPrimDouble: {
- XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
__ movsd(Address(cls, offset), value);
break;
}
@@ -3209,7 +3283,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
- CpuRegister out = load->GetLocations()->Out().As<CpuRegister>();
+ CpuRegister out = load->GetLocations()->Out().AsRegister<CpuRegister>();
codegen_->LoadCurrentMethod(CpuRegister(out));
__ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
@@ -3228,7 +3302,7 @@ void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) {
Address address = Address::Absolute(
Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
- __ gs()->movl(load->GetLocations()->Out().As<CpuRegister>(), address);
+ __ gs()->movl(load->GetLocations()->Out().AsRegister<CpuRegister>(), address);
__ gs()->movl(address, Immediate(0));
}
@@ -3257,9 +3331,9 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
Location cls = locations->InAt(1);
- CpuRegister out = locations->Out().As<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Label done, zero;
SlowPathCodeX86_64* slow_path = nullptr;
@@ -3271,7 +3345,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
// Compare the class of `obj` with `cls`.
__ movl(out, Address(obj, class_offset));
if (cls.IsRegister()) {
- __ cmpl(out, cls.As<CpuRegister>());
+ __ cmpl(out, cls.AsRegister<CpuRegister>());
} else {
DCHECK(cls.IsStackSlot()) << cls;
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
@@ -3309,9 +3383,9 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
Location cls = locations->InAt(1);
- CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
@@ -3323,7 +3397,7 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
// Compare the class of `obj` with `cls`.
__ movl(temp, Address(obj, class_offset));
if (cls.IsRegister()) {
- __ cmpl(temp, cls.As<CpuRegister>());
+ __ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
DCHECK(cls.IsStackSlot()) << cls;
__ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
@@ -3388,43 +3462,43 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in
if (instruction->GetResultType() == Primitive::kPrimInt) {
if (second.IsRegister()) {
if (instruction->IsAnd()) {
- __ andl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ andl(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else if (instruction->IsOr()) {
- __ orl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ orl(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ xorl(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
}
} else if (second.IsConstant()) {
Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
if (instruction->IsAnd()) {
- __ andl(first.As<CpuRegister>(), imm);
+ __ andl(first.AsRegister<CpuRegister>(), imm);
} else if (instruction->IsOr()) {
- __ orl(first.As<CpuRegister>(), imm);
+ __ orl(first.AsRegister<CpuRegister>(), imm);
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.As<CpuRegister>(), imm);
+ __ xorl(first.AsRegister<CpuRegister>(), imm);
}
} else {
Address address(CpuRegister(RSP), second.GetStackIndex());
if (instruction->IsAnd()) {
- __ andl(first.As<CpuRegister>(), address);
+ __ andl(first.AsRegister<CpuRegister>(), address);
} else if (instruction->IsOr()) {
- __ orl(first.As<CpuRegister>(), address);
+ __ orl(first.AsRegister<CpuRegister>(), address);
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.As<CpuRegister>(), address);
+ __ xorl(first.AsRegister<CpuRegister>(), address);
}
}
} else {
DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
if (instruction->IsAnd()) {
- __ andq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ andq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else if (instruction->IsOr()) {
- __ orq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ orq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else {
DCHECK(instruction->IsXor());
- __ xorq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ __ xorq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
}
}
}
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index c36b1436d3..82fe03caa2 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -32,7 +32,7 @@ static HGraph* TestCode(const uint16_t* data, ArenaAllocator* allocator) {
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
graph->BuildDominatorTree();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
return graph;
}
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 25168b5b0c..6e5f1bd203 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -91,29 +91,38 @@ SideEffects GlobalValueNumberer::GetBlockEffects(HBasicBlock* block) const {
return block_effects_.Get(block->GetBlockId());
}
-static bool IsLoopExit(HBasicBlock* block, HBasicBlock* successor) {
- HLoopInformation* block_info = block->GetLoopInformation();
- HLoopInformation* other_info = successor->GetLoopInformation();
- return block_info != other_info && (other_info == nullptr || block_info->IsIn(*other_info));
-}
-
void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
- if (kIsDebugBuild) {
- // Check that all non back-edge processors have been visited.
- for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
- HBasicBlock* predecessor = block->GetPredecessors().Get(i);
- DCHECK(visited_.Get(predecessor->GetBlockId())
- || (block->GetLoopInformation() != nullptr
- && (block->GetLoopInformation()->GetBackEdges().Get(0) == predecessor)));
+ ValueSet* set = nullptr;
+ const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
+ if (predecessors.Size() == 0 || predecessors.Get(0)->IsEntryBlock()) {
+ // The entry block should only accumulate constant instructions, and
+ // the builder puts constants only in the entry block.
+ // Therefore, there is no need to propagate the value set to the next block.
+ set = new (allocator_) ValueSet(allocator_);
+ } else {
+ HBasicBlock* dominator = block->GetDominator();
+ set = sets_.Get(dominator->GetBlockId())->Copy();
+ if (dominator->GetSuccessors().Size() != 1 || dominator->GetSuccessors().Get(0) != block) {
+ // We have to copy if the dominator has other successors, or `block` is not a successor
+ // of the dominator.
+ set = set->Copy();
+ }
+ if (!set->IsEmpty()) {
+ if (block->IsLoopHeader()) {
+ DCHECK_EQ(block->GetDominator(), block->GetLoopInformation()->GetPreHeader());
+ set->Kill(GetLoopEffects(block));
+ } else if (predecessors.Size() > 1) {
+ for (size_t i = 0, e = predecessors.Size(); i < e; ++i) {
+ set->IntersectionWith(sets_.Get(predecessors.Get(i)->GetBlockId()));
+ if (set->IsEmpty()) {
+ break;
+ }
+ }
+ }
}
- visited_.Put(block->GetBlockId(), true);
}
- ValueSet* set = sets_.Get(block->GetBlockId());
-
- if (block->IsLoopHeader()) {
- set->Kill(GetLoopEffects(block));
- }
+ sets_.Put(block->GetBlockId(), set);
HInstruction* current = block->GetFirstInstruction();
while (current != nullptr) {
@@ -131,57 +140,6 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
}
current = next;
}
-
- if (block == graph_->GetEntryBlock()) {
- // The entry block should only accumulate constant instructions, and
- // the builder puts constants only in the entry block.
- // Therefore, there is no need to propagate the value set to the next block.
- DCHECK_EQ(block->GetDominatedBlocks().Size(), 1u);
- HBasicBlock* dominated = block->GetDominatedBlocks().Get(0);
- sets_.Put(dominated->GetBlockId(), new (allocator_) ValueSet(allocator_));
- return;
- }
-
- // Copy the value set to dominated blocks. We can re-use
- // the current set for the last dominated block because we are done visiting
- // this block.
- for (size_t i = 0, e = block->GetDominatedBlocks().Size(); i < e; ++i) {
- HBasicBlock* dominated = block->GetDominatedBlocks().Get(i);
- sets_.Put(dominated->GetBlockId(), i == e - 1 ? set : set->Copy());
- }
-
- // Kill instructions in the value set of each successor. If the successor
- // is a loop exit, then we use the side effects of the loop. If not, we use
- // the side effects of this block.
- for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
- HBasicBlock* successor = block->GetSuccessors().Get(i);
- if (successor->IsLoopHeader()
- && successor->GetLoopInformation()->GetBackEdges().Get(0) == block) {
- // In case of a back edge, we already have visited the loop header.
- // We should not update its value set, because the last dominated block
- // of the loop header uses the same value set.
- DCHECK(visited_.Get(successor->GetBlockId()));
- continue;
- }
- DCHECK(!visited_.Get(successor->GetBlockId()));
- ValueSet* successor_set = sets_.Get(successor->GetBlockId());
- // The dominator sets the set, and we are guaranteed to have visited it already.
- DCHECK(successor_set != nullptr);
-
- // If this block dominates this successor there is nothing to do.
- // Also if the set is empty, there is nothing to kill.
- if (successor->GetDominator() != block && !successor_set->IsEmpty()) {
- if (block->IsInLoop() && IsLoopExit(block, successor)) {
- // All instructions killed in the loop must be killed for a loop exit.
- SideEffects effects = GetLoopEffects(block->GetLoopInformation()->GetHeader());
- sets_.Get(successor->GetBlockId())->Kill(effects);
- } else {
- // Following block (that might be in the same loop).
- // Just kill instructions based on this block's side effects.
- sets_.Get(successor->GetBlockId())->Kill(GetBlockEffects(block));
- }
- }
- }
}
} // namespace art
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 8e739cb6d3..81f2c3fa87 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -96,6 +96,26 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
return nullptr;
}
+ // Returns whether `instruction` is in the set.
+ HInstruction* IdentityLookup(HInstruction* instruction) const {
+ size_t hash_code = instruction->ComputeHashCode();
+ size_t index = hash_code % kDefaultNumberOfEntries;
+ HInstruction* existing = table_[index];
+ if (existing != nullptr && existing == instruction) {
+ return existing;
+ }
+
+ for (ValueSetNode* node = collisions_; node != nullptr; node = node->GetNext()) {
+ if (node->GetHashCode() == hash_code) {
+ existing = node->GetInstruction();
+ if (existing == instruction) {
+ return existing;
+ }
+ }
+ }
+ return nullptr;
+ }
+
// Removes all instructions in the set that are affected by the given side effects.
void Kill(SideEffects side_effects) {
for (size_t i = 0; i < kDefaultNumberOfEntries; ++i) {
@@ -106,9 +126,9 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
}
}
- ValueSetNode* current = collisions_;
- ValueSetNode* previous = nullptr;
- while (current != nullptr) {
+ for (ValueSetNode* current = collisions_, *previous = nullptr;
+ current != nullptr;
+ current = current->GetNext()) {
HInstruction* instruction = current->GetInstruction();
if (instruction->GetSideEffects().DependsOn(side_effects)) {
if (previous == nullptr) {
@@ -120,7 +140,6 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
} else {
previous = current;
}
- current = current->GetNext();
}
}
@@ -143,6 +162,44 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
return copy;
}
+ void Clear() {
+ number_of_entries_ = 0;
+ collisions_ = nullptr;
+ for (size_t i = 0; i < kDefaultNumberOfEntries; ++i) {
+ table_[i] = nullptr;
+ }
+ }
+
+ // Update this `ValueSet` by intersecting with instructions in `other`.
+ void IntersectionWith(ValueSet* other) {
+ if (IsEmpty()) {
+ return;
+ } else if (other->IsEmpty()) {
+ Clear();
+ } else {
+ for (size_t i = 0; i < kDefaultNumberOfEntries; ++i) {
+ if (table_[i] != nullptr && other->IdentityLookup(table_[i]) == nullptr) {
+ --number_of_entries_;
+ table_[i] = nullptr;
+ }
+ }
+ for (ValueSetNode* current = collisions_, *previous = nullptr;
+ current != nullptr;
+ current = current->GetNext()) {
+ if (other->IdentityLookup(current->GetInstruction()) == nullptr) {
+ if (previous == nullptr) {
+ collisions_ = current->GetNext();
+ } else {
+ previous->SetNext(current->GetNext());
+ }
+ --number_of_entries_;
+ } else {
+ previous = current;
+ }
+ }
+ }
+ }
+
bool IsEmpty() const { return number_of_entries_ == 0; }
size_t GetNumberOfEntries() const { return number_of_entries_; }
@@ -173,13 +230,11 @@ class GlobalValueNumberer : public ValueObject {
allocator_(allocator),
block_effects_(allocator, graph->GetBlocks().Size()),
loop_effects_(allocator, graph->GetBlocks().Size()),
- sets_(allocator, graph->GetBlocks().Size()),
- visited_(allocator, graph->GetBlocks().Size()) {
+ sets_(allocator, graph->GetBlocks().Size()) {
size_t number_of_blocks = graph->GetBlocks().Size();
block_effects_.SetSize(number_of_blocks);
loop_effects_.SetSize(number_of_blocks);
sets_.SetSize(number_of_blocks);
- visited_.SetSize(number_of_blocks);
for (size_t i = 0; i < number_of_blocks; ++i) {
block_effects_.Put(i, SideEffects::None());
@@ -219,9 +274,6 @@ class GlobalValueNumberer : public ValueObject {
// in the path from the dominator to the block.
GrowableArray<ValueSet*> sets_;
- // Mark visisted blocks. Only used for debugging.
- GrowableArray<bool> visited_;
-
ART_FRIEND_TEST(GVNTest, LoopSideEffects);
DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
};
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index ad6e3382bc..a6a68ca59d 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -175,7 +175,7 @@ TEST(GVNTest, LoopFieldElimination) {
graph->BuildDominatorTree();
graph->TransformToSSA();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
GlobalValueNumberer(&allocator, graph).Run();
// Check that all field get instructions are still there.
@@ -239,7 +239,7 @@ TEST(GVNTest, LoopSideEffects) {
graph->BuildDominatorTree();
graph->TransformToSSA();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
ASSERT_TRUE(inner_loop_header->GetLoopInformation()->IsIn(
*outer_loop_header->GetLoopInformation()));
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 3d65e9a0a4..49ca44331d 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -26,6 +26,7 @@ class InstructionSimplifierVisitor : public HGraphVisitor {
void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
void VisitEqual(HEqual* equal) OVERRIDE;
void VisitArraySet(HArraySet* equal) OVERRIDE;
+ void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
};
void InstructionSimplifier::Run() {
@@ -78,4 +79,12 @@ void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) {
}
}
+void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruction) {
+ if (instruction->GetResultType() == instruction->GetInputType()) {
+ // Remove the instruction if it's converting to the same type.
+ instruction->ReplaceWith(instruction->GetInput());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index c49cf7e03f..28ca5e81e6 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -44,7 +44,7 @@ static void TestCode(const uint16_t* data, const int* expected_order, size_t num
graph->BuildDominatorTree();
graph->TransformToSSA();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
x86::CodeGeneratorX86 codegen(graph);
SsaLivenessAnalysis liveness(*graph, &codegen);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index e3c6fec23b..5c7e6f0325 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -38,7 +38,7 @@ static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
RemoveSuspendChecks(graph);
graph->BuildDominatorTree();
graph->TransformToSSA();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
return graph;
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 246e7ef309..4b69e57960 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -50,7 +50,7 @@ static void TestCode(const uint16_t* data, const char* expected) {
ASSERT_NE(graph, nullptr);
graph->BuildDominatorTree();
graph->TransformToSSA();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
x86::CodeGeneratorX86 codegen(graph);
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index e1c8e8ed6e..1ff26d914c 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -161,7 +161,14 @@ class Location : public ValueObject {
}
template <typename T>
- T As() const {
+ T AsRegister() const {
+ DCHECK(IsRegister());
+ return static_cast<T>(reg());
+ }
+
+ template <typename T>
+ T AsFpuRegister() const {
+ DCHECK(IsFpuRegister());
return static_cast<T>(reg());
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 8cb2ef6de8..ba4dccf598 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -30,6 +30,36 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
VisitBlockForBackEdges(entry_block_, visited, &visiting);
}
+static void RemoveAsUser(HInstruction* instruction) {
+ for (size_t i = 0; i < instruction->InputCount(); i++) {
+ instruction->InputAt(i)->RemoveUser(instruction, i);
+ }
+
+ HEnvironment* environment = instruction->GetEnvironment();
+ if (environment != nullptr) {
+ for (size_t i = 0, e = environment->Size(); i < e; ++i) {
+ HInstruction* vreg = environment->GetInstructionAt(i);
+ if (vreg != nullptr) {
+ vreg->RemoveEnvironmentUser(environment, i);
+ }
+ }
+ }
+}
+
+void HGraph::RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const {
+ for (size_t i = 0; i < blocks_.Size(); ++i) {
+ if (!visited.IsBitSet(i)) {
+ HBasicBlock* block = blocks_.Get(i);
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ RemoveAsUser(it.Current());
+ }
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ RemoveAsUser(it.Current());
+ }
+ }
+ }
+}
+
void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) const {
for (size_t i = 0; i < blocks_.Size(); ++i) {
if (!visited.IsBitSet(i)) {
@@ -72,16 +102,21 @@ void HGraph::BuildDominatorTree() {
// (1) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
- // (2) Remove blocks not visited during the initial DFS.
- // Step (3) requires dead blocks to be removed from the
+ // (2) Remove instructions and phis from blocks not visited during
+ // the initial DFS as users from other instructions, so that
+ // users can be safely removed before uses later.
+ RemoveInstructionsAsUsersFromDeadBlocks(visited);
+
+ // (3) Remove blocks not visited during the initial DFS.
+ // Step (4) requires dead blocks to be removed from the
// predecessors list of live blocks.
RemoveDeadBlocks(visited);
- // (3) Simplify the CFG now, so that we don't need to recompute
+ // (4) Simplify the CFG now, so that we don't need to recompute
// dominators and the reverse post order.
SimplifyCFG();
- // (4) Compute the immediate dominator of each block. We visit
+ // (5) Compute the immediate dominator of each block. We visit
// the successors of a block only when all its forward branches
// have been processed.
GrowableArray<size_t> visits(arena_, blocks_.Size());
@@ -237,7 +272,7 @@ void HGraph::SimplifyCFG() {
}
}
-bool HGraph::FindNaturalLoops() const {
+bool HGraph::AnalyzeNaturalLoops() const {
for (size_t i = 0; i < blocks_.Size(); ++i) {
HBasicBlock* block = blocks_.Get(i);
if (block->IsLoopHeader()) {
@@ -391,19 +426,7 @@ static void Remove(HInstructionList* instruction_list,
instruction->SetBlock(nullptr);
instruction_list->RemoveInstruction(instruction);
- for (size_t i = 0; i < instruction->InputCount(); i++) {
- instruction->InputAt(i)->RemoveUser(instruction, i);
- }
-
- HEnvironment* environment = instruction->GetEnvironment();
- if (environment != nullptr) {
- for (size_t i = 0, e = environment->Size(); i < e; ++i) {
- HInstruction* vreg = environment->GetInstructionAt(i);
- if (vreg != nullptr) {
- vreg->RemoveEnvironmentUser(environment, i);
- }
- }
- }
+ RemoveAsUser(instruction);
}
void HBasicBlock::RemoveInstruction(HInstruction* instruction) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index f562113e6e..3908a61910 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -112,10 +112,10 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
void TransformToSSA();
void SimplifyCFG();
- // Find all natural loops in this graph. Aborts computation and returns false
- // if one loop is not natural, that is the header does not dominate the back
- // edge.
- bool FindNaturalLoops() const;
+ // Analyze all natural loops in this graph. Returns false if one
+ // loop is not natural, that is the header does not dominate the
+ // back edge.
+ bool AnalyzeNaturalLoops() const;
void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
void SimplifyLoop(HBasicBlock* header);
@@ -173,6 +173,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
void VisitBlockForBackEdges(HBasicBlock* block,
ArenaBitVector* visited,
ArenaBitVector* visiting);
+ void RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const;
void RemoveDeadBlocks(const ArenaBitVector& visited) const;
ArenaAllocator* const arena_;
@@ -777,7 +778,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
}
// Returns whether two instructions are equal, that is:
- // 1) They have the same type and contain the same data,
+ // 1) They have the same type and contain the same data (InstructionDataEquals).
// 2) Their inputs are identical.
bool Equals(HInstruction* other) const;
@@ -1363,28 +1364,45 @@ class HGreaterThanOrEqual : public HCondition {
// Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
class HCompare : public HBinaryOperation {
public:
- HCompare(Primitive::Type type, HInstruction* first, HInstruction* second)
- : HBinaryOperation(Primitive::kPrimInt, first, second) {
+ // The bias applies for floating point operations and indicates how NaN
+ // comparisons are treated:
+ enum Bias {
+ kNoBias, // bias is not applicable (i.e. for long operation)
+ kGtBias, // return 1 for NaN comparisons
+ kLtBias, // return -1 for NaN comparisons
+ };
+
+ HCompare(Primitive::Type type, HInstruction* first, HInstruction* second, Bias bias)
+ : HBinaryOperation(Primitive::kPrimInt, first, second), bias_(bias) {
DCHECK_EQ(type, first->GetType());
DCHECK_EQ(type, second->GetType());
}
- virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
return
x == y ? 0 :
x > y ? 1 :
-1;
}
- virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
return
x == y ? 0 :
x > y ? 1 :
-1;
}
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ return bias_ == other->AsCompare()->bias_;
+ }
+
+ bool IsGtBias() { return bias_ == kGtBias; }
+
DECLARE_INSTRUCTION(Compare);
private:
+ const Bias bias_;
+
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
@@ -1652,7 +1670,12 @@ class HNewInstance : public HExpression<0> {
uint16_t GetTypeIndex() const { return type_index_; }
// Calls runtime so needs an environment.
- virtual bool NeedsEnvironment() const { return true; }
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ // It may throw when called on:
+ // - interfaces
+ // - abstract/innaccessible/unknown classes
+ // TODO: optimize when possible.
+ bool CanThrow() const OVERRIDE { return true; }
DECLARE_INSTRUCTION(NewInstance);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d8533eb8bf..100a6bc4a3 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -192,7 +192,6 @@ static bool CanOptimize(const DexFile::CodeItem& code_item) {
}
static void RunOptimizations(HGraph* graph, const HGraphVisualizer& visualizer) {
- TransformToSsa ssa(graph);
HDeadCodeElimination opt1(graph);
HConstantFolding opt2(graph);
SsaRedundantPhiElimination opt3(graph);
@@ -202,7 +201,6 @@ static void RunOptimizations(HGraph* graph, const HGraphVisualizer& visualizer)
InstructionSimplifier opt7(graph);
HOptimization* optimizations[] = {
- &ssa,
&opt1,
&opt2,
&opt3,
@@ -220,6 +218,23 @@ static void RunOptimizations(HGraph* graph, const HGraphVisualizer& visualizer)
}
}
+static bool TryBuildingSsa(HGraph* graph,
+ const DexCompilationUnit& dex_compilation_unit,
+ const HGraphVisualizer& visualizer) {
+ graph->BuildDominatorTree();
+ graph->TransformToSSA();
+
+ if (!graph->AnalyzeNaturalLoops()) {
+ LOG(INFO) << "Skipping compilation of "
+ << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
+ *dex_compilation_unit.GetDexFile())
+ << ": it contains a non natural loop";
+ return false;
+ }
+ visualizer.DumpGraph("ssa transform");
+ return true;
+}
+
CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -281,7 +296,12 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
if (run_optimizations_
&& CanOptimize(*code_item)
&& RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ VLOG(compiler) << "Optimizing " << PrettyMethod(method_idx, dex_file);
optimized_compiled_methods_++;
+ if (!TryBuildingSsa(graph, dex_compilation_unit, visualizer)) {
+ // We could not transform the graph to SSA, bailout.
+ return nullptr;
+ }
RunOptimizations(graph, visualizer);
PrepareForRegisterAllocation(graph).Run();
@@ -316,23 +336,10 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
UNREACHABLE();
} else {
+ VLOG(compiler) << "Compile baseline " << PrettyMethod(method_idx, dex_file);
unoptimized_compiled_methods_++;
codegen->CompileBaseline(&allocator);
- if (CanOptimize(*code_item)) {
- // Run these phases to get some test coverage.
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- visualizer.DumpGraph("ssa");
- graph->FindNaturalLoops();
- SsaRedundantPhiElimination(graph).Run();
- SsaDeadPhiElimination(graph).Run();
- GVNOptimization(graph).Run();
- SsaLivenessAnalysis liveness(*graph, codegen);
- liveness.Analyze();
- visualizer.DumpGraph(kLivenessPassName);
- }
-
std::vector<uint8_t> mapping_table;
SrcMap src_mapping_table;
codegen->BuildMappingTable(&mapping_table,
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 2948496e15..a6c06359a0 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -70,7 +70,8 @@ bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph,
it.Advance()) {
HInstruction* current = it.Current();
if (current->GetType() == Primitive::kPrimLong && instruction_set != kX86_64) return false;
- if ((current->GetType() == Primitive::kPrimFloat || current->GetType() == Primitive::kPrimDouble)
+ if ((current->GetType() == Primitive::kPrimFloat
+ || current->GetType() == Primitive::kPrimDouble)
&& instruction_set != kX86_64) {
return false;
}
@@ -95,6 +96,25 @@ void RegisterAllocator::AllocateRegisters() {
ValidateInternal(true);
processing_core_registers_ = false;
ValidateInternal(true);
+ // Check that the linear order is still correct with regards to lifetime positions.
+ // Since only parallel moves have been inserted during the register allocation,
+ // these checks are mostly for making sure these moves have been added correctly.
+ size_t current_liveness = 0;
+ for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* instruction = inst_it.Current();
+ DCHECK_LE(current_liveness, instruction->GetLifetimePosition());
+ current_liveness = instruction->GetLifetimePosition();
+ }
+ for (HInstructionIterator inst_it(block->GetInstructions());
+ !inst_it.Done();
+ inst_it.Advance()) {
+ HInstruction* instruction = inst_it.Current();
+ DCHECK_LE(current_liveness, instruction->GetLifetimePosition()) << instruction->DebugName();
+ current_liveness = instruction->GetLifetimePosition();
+ }
+ }
}
}
@@ -189,11 +209,29 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
BlockRegister(temp, position, position + 1);
} else {
DCHECK(temp.IsUnallocated());
- DCHECK(temp.GetPolicy() == Location::kRequiresRegister);
- LiveInterval* interval = LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
- temp_intervals_.Add(interval);
- interval->AddRange(position, position + 1);
- unhandled_core_intervals_.Add(interval);
+ switch (temp.GetPolicy()) {
+ case Location::kRequiresRegister: {
+ LiveInterval* interval =
+ LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
+ temp_intervals_.Add(interval);
+ interval->AddRange(position, position + 1);
+ unhandled_core_intervals_.Add(interval);
+ break;
+ }
+
+ case Location::kRequiresFpuRegister: {
+ LiveInterval* interval =
+ LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble);
+ temp_intervals_.Add(interval);
+ interval->AddRange(position, position + 1);
+ unhandled_fp_intervals_.Add(interval);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected policy for temporary location "
+ << temp.GetPolicy();
+ }
}
}
@@ -215,14 +253,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
// By adding the following interval in the algorithm, we can compute this
// maximum before updating locations.
LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
- // The start of the interval must be after the position of the safepoint, so that
- // we can just check the number of active registers at that position. Note that this
- // will include the current interval in the computation of
- // `maximum_number_of_live_registers`, so we need a better strategy if this becomes
- // a problem.
- // TODO: We could put the logic in AddSorted, to ensure the safepoint range is
- // after all other intervals starting at that same position.
- interval->AddRange(position + 1, position + 2);
+ interval->AddRange(position, position + 1);
AddSorted(&unhandled_core_intervals_, interval);
AddSorted(&unhandled_fp_intervals_, interval);
}
@@ -484,16 +515,6 @@ void RegisterAllocator::LinearScan() {
DCHECK(!current->IsFixed() && !current->HasSpillSlot());
DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
- if (current->IsSlowPathSafepoint()) {
- // Synthesized interval to record the maximum number of live registers
- // at safepoints. No need to allocate a register for it.
- // We know that current actives are all live at the safepoint (modulo
- // the one created by the safepoint).
- maximum_number_of_live_registers_ =
- std::max(maximum_number_of_live_registers_, active_.Size());
- continue;
- }
-
size_t position = current->GetStart();
// Remember the inactive_ size here since the ones moved to inactive_ from
@@ -534,6 +555,15 @@ void RegisterAllocator::LinearScan() {
}
}
+ if (current->IsSlowPathSafepoint()) {
+ // Synthesized interval to record the maximum number of live registers
+ // at safepoints. No need to allocate a register for it.
+ maximum_number_of_live_registers_ =
+ std::max(maximum_number_of_live_registers_, active_.Size());
+ DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() > current->GetStart());
+ continue;
+ }
+
// (4) Try to find an available register.
bool success = TryAllocateFreeReg(current);
@@ -775,6 +805,12 @@ void RegisterAllocator::AddSorted(GrowableArray<LiveInterval*>* array, LiveInter
if (current->StartsAfter(interval)) {
insert_at = i;
break;
+ } else if ((current->GetStart() == interval->GetStart()) && current->IsSlowPathSafepoint()) {
+ // Ensure the slow path interval is the last to be processed at its location: we want the
+ // interval to know all live registers at this location.
+ DCHECK(i == 1 || array->Get(i - 2)->StartsAfter(current));
+ insert_at = i;
+ break;
}
}
array->InsertAt(insert_at, interval);
@@ -887,6 +923,14 @@ void RegisterAllocator::AddInputMoveFor(HInstruction* user,
move->AddMove(new (allocator_) MoveOperands(source, destination, nullptr));
}
+static bool IsInstructionStart(size_t position) {
+ return (position & 1) == 0;
+}
+
+static bool IsInstructionEnd(size_t position) {
+ return (position & 1) == 1;
+}
+
void RegisterAllocator::InsertParallelMoveAt(size_t position,
HInstruction* instruction,
Location source,
@@ -895,12 +939,29 @@ void RegisterAllocator::InsertParallelMoveAt(size_t position,
if (source.Equals(destination)) return;
HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
- if (at == nullptr) {
- // Block boundary, don't do anything the connection of split siblings will handle it.
- return;
- }
HParallelMove* move;
- if ((position & 1) == 1) {
+ if (at == nullptr) {
+ if (IsInstructionStart(position)) {
+ // Block boundary, don't do anything the connection of split siblings will handle it.
+ return;
+ } else {
+ // Move must happen before the first instruction of the block.
+ at = liveness_.GetInstructionFromPosition((position + 1) / 2);
+ // Note that parallel moves may have already been inserted, so we explicitly
+ // ask for the first instruction of the block: `GetInstructionFromPosition` does
+ // not contain the moves.
+ at = at->GetBlock()->GetFirstInstruction();
+ if (at->GetLifetimePosition() != position) {
+ DCHECK_GT(at->GetLifetimePosition(), position);
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(position);
+ at->GetBlock()->InsertInstructionBefore(move, at);
+ } else {
+ DCHECK(at->IsParallelMove());
+ move = at->AsParallelMove();
+ }
+ }
+ } else if (IsInstructionEnd(position)) {
// Move must happen after the instruction.
DCHECK(!at->IsControlFlow());
move = at->GetNext()->AsParallelMove();
@@ -952,10 +1013,11 @@ void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
HParallelMove* move;
// This is a parallel move for connecting blocks. We need to differentiate
// it with moves for connecting siblings in a same block, and output moves.
+ size_t position = last->GetLifetimePosition();
if (previous == nullptr || !previous->IsParallelMove()
- || previous->AsParallelMove()->GetLifetimePosition() != block->GetLifetimeEnd()) {
+ || previous->AsParallelMove()->GetLifetimePosition() != position) {
move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(block->GetLifetimeEnd());
+ move->SetLifetimePosition(position);
block->InsertInstructionBefore(move, last);
} else {
move = previous->AsParallelMove();
@@ -1074,6 +1136,7 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
case Location::kRegister: {
locations->AddLiveRegister(source);
DCHECK_LE(locations->GetNumberOfLiveRegisters(), maximum_number_of_live_registers_);
+
if (current->GetType() == Primitive::kPrimNot) {
locations->SetRegisterBit(source.reg());
}
@@ -1107,12 +1170,10 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
return;
}
+ // Intervals end at the lifetime end of a block. The decrement by one
+ // ensures the `Cover` call will return true.
size_t from_position = from->GetLifetimeEnd() - 1;
- // When an instruction dies at entry of another, and the latter is the beginning
- // of a block, the register allocator ensures the former has a register
- // at block->GetLifetimeStart() + 1. Since this is at a block boundary, it must
- // must be handled in this method.
- size_t to_position = to->GetLifetimeStart() + 1;
+ size_t to_position = to->GetLifetimeStart();
LiveInterval* destination = nullptr;
LiveInterval* source = nullptr;
@@ -1250,9 +1311,27 @@ void RegisterAllocator::Resolve() {
current = at;
}
LocationSummary* locations = at->GetLocations();
- DCHECK(temp->GetType() == Primitive::kPrimInt);
- locations->SetTempAt(
- temp_index++, Location::RegisterLocation(temp->GetRegister()));
+ switch (temp->GetType()) {
+ case Primitive::kPrimInt:
+ locations->SetTempAt(
+ temp_index++, Location::RegisterLocation(temp->GetRegister()));
+ break;
+
+ case Primitive::kPrimDouble:
+ // TODO: Support the case of ARM, where a double value
+ // requires an FPU register pair (note that the ARM back end
+ // does not yet use this register allocator when a method uses
+ // floats or doubles).
+ DCHECK(codegen_->GetInstructionSet() != kArm
+ && codegen_->GetInstructionSet() != kThumb2);
+ locations->SetTempAt(
+ temp_index++, Location::FpuRegisterLocation(temp->GetRegister()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type for temporary location "
+ << temp->GetType();
+ }
}
}
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index ba4be34ca3..8d75db91d2 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -41,7 +41,7 @@ static bool Check(const uint16_t* data) {
HGraph* graph = builder.BuildGraph(*item);
graph->BuildDominatorTree();
graph->TransformToSSA();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
x86::CodeGeneratorX86 codegen(graph);
SsaLivenessAnalysis liveness(*graph, &codegen);
liveness.Analyze();
@@ -255,7 +255,7 @@ static HGraph* BuildSSAGraph(const uint16_t* data, ArenaAllocator* allocator) {
HGraph* graph = builder.BuildGraph(*item);
graph->BuildDominatorTree();
graph->TransformToSSA();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
return graph;
}
@@ -494,7 +494,7 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
(*phi)->AddInput(*input2);
graph->BuildDominatorTree();
- graph->FindNaturalLoops();
+ graph->AnalyzeNaturalLoops();
return graph;
}
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 5ab328fe23..2cbd51aa10 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -22,20 +22,6 @@
namespace art {
-class TransformToSsa : public HOptimization {
- public:
- explicit TransformToSsa(HGraph* graph) : HOptimization(graph, true, "ssa transform") {}
-
- void Run() OVERRIDE {
- graph_->BuildDominatorTree();
- graph_->TransformToSSA();
- graph_->FindNaturalLoops();
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TransformToSsa);
-};
-
static constexpr int kDefaultNumberOfLoops = 2;
class SsaBuilder : public HGraphVisitor {
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 733b58fa5a..cb07ffae84 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -83,6 +83,7 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
break;
}
+ assembler->EmitSlowPaths();
size_t cs = assembler->CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 004af98852..a80ad938a6 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -189,6 +189,15 @@ Arena* ArenaPool::AllocArena(size_t size) {
return ret;
}
+size_t ArenaPool::GetBytesAllocated() const {
+ size_t total = 0;
+ MutexLock lock(Thread::Current(), lock_);
+ for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ total += arena->GetBytesAllocated();
+ }
+ return total;
+}
+
void ArenaPool::FreeArenaChain(Arena* first) {
if (UNLIKELY(RUNNING_ON_VALGRIND > 0)) {
for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 6d213991d3..7f5bc9ac4c 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -135,6 +135,10 @@ class Arena {
return Size() - bytes_allocated_;
}
+ size_t GetBytesAllocated() const {
+ return bytes_allocated_;
+ }
+
private:
size_t bytes_allocated_;
uint8_t* memory_;
@@ -153,11 +157,12 @@ class ArenaPool {
public:
ArenaPool();
~ArenaPool();
- Arena* AllocArena(size_t size);
- void FreeArenaChain(Arena* first);
+ Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
+ void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
+ size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
private:
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 0f28591775..05287732c5 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -165,36 +165,6 @@ uint32_t ShifterOperand::encodingThumb() const {
return 0;
}
-bool ShifterOperand::CanHoldThumb(Register rd, Register rn, Opcode opcode,
- uint32_t immediate, ShifterOperand* shifter_op) {
- shifter_op->type_ = kImmediate;
- shifter_op->immed_ = immediate;
- shifter_op->is_shift_ = false;
- shifter_op->is_rotate_ = false;
- switch (opcode) {
- case ADD:
- case SUB:
- if (rn == SP) {
- if (rd == SP) {
- return immediate < (1 << 9); // 9 bits allowed.
- } else {
- return immediate < (1 << 12); // 12 bits.
- }
- }
- if (immediate < (1 << 12)) { // Less than (or equal to) 12 bits can always be done.
- return true;
- }
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
-
- case MOV:
- // TODO: Support less than or equal to 12bits.
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
- case MVN:
- default:
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
- }
-}
-
uint32_t Address::encodingArm() const {
CHECK(IsAbsoluteUint(12, offset_));
uint32_t encoding;
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index d288b700ed..c86ec4b3d6 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -30,6 +30,9 @@
namespace art {
namespace arm {
+class Arm32Assembler;
+class Thumb2Assembler;
+
class ShifterOperand {
public:
ShifterOperand() : type_(kUnknown), rm_(kNoRegister), rs_(kNoRegister),
@@ -103,33 +106,6 @@ class ShifterOperand {
kImmediate
};
- static bool CanHoldArm(uint32_t immediate, ShifterOperand* shifter_op) {
- // Avoid the more expensive test for frequent small immediate values.
- if (immediate < (1 << kImmed8Bits)) {
- shifter_op->type_ = kImmediate;
- shifter_op->is_rotate_ = true;
- shifter_op->rotate_ = 0;
- shifter_op->immed_ = immediate;
- return true;
- }
- // Note that immediate must be unsigned for the test to work correctly.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
- if (imm8 < (1 << kImmed8Bits)) {
- shifter_op->type_ = kImmediate;
- shifter_op->is_rotate_ = true;
- shifter_op->rotate_ = rot;
- shifter_op->immed_ = imm8;
- return true;
- }
- }
- return false;
- }
-
- static bool CanHoldThumb(Register rd, Register rn, Opcode opcode,
- uint32_t immediate, ShifterOperand* shifter_op);
-
-
private:
Type type_;
Register rm_;
@@ -140,6 +116,9 @@ class ShifterOperand {
uint32_t rotate_;
uint32_t immed_;
+ friend class Arm32Assembler;
+ friend class Thumb2Assembler;
+
#ifdef SOURCE_ASSEMBLER_SUPPORT
friend class BinaryAssembler;
#endif
@@ -611,6 +590,14 @@ class ArmAssembler : public Assembler {
virtual void Ror(Register rd, Register rm, Register rn, bool setcc = false,
Condition cond = AL) = 0;
+ // Returns whether the `immediate` can fit in a `ShifterOperand`. If yes,
+ // `shifter_op` contains the operand.
+ virtual bool ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) = 0;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
virtual void Bind(Label* label) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index a541763881..8f6d45ab53 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -25,6 +25,37 @@
namespace art {
namespace arm {
+bool Arm32Assembler::ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op) {
+ // Avoid the more expensive test for frequent small immediate values.
+ if (immediate < (1 << kImmed8Bits)) {
+ shifter_op->type_ = ShifterOperand::kImmediate;
+ shifter_op->is_rotate_ = true;
+ shifter_op->rotate_ = 0;
+ shifter_op->immed_ = immediate;
+ return true;
+ }
+ // Note that immediate must be unsigned for the test to work correctly.
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
+ if (imm8 < (1 << kImmed8Bits)) {
+ shifter_op->type_ = ShifterOperand::kImmediate;
+ shifter_op->is_rotate_ = true;
+ shifter_op->rotate_ = rot;
+ shifter_op->immed_ = imm8;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Arm32Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
+ Register rn ATTRIBUTE_UNUSED,
+ Opcode opcode ATTRIBUTE_UNUSED,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) {
+ return ShifterOperandCanHoldArm32(immediate, shifter_op);
+}
+
void Arm32Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
Condition cond) {
EmitType01(cond, so.type(), AND, 0, rn, rd, so);
@@ -1291,16 +1322,16 @@ void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value,
// positive values and sub for negatives ones, which would slightly improve
// the readability of generated code for some constants.
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
add(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldArm(-value, &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
sub(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldArm(~value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
add(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldArm(~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
sub(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -1318,16 +1349,16 @@ void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value,
void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
adds(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldArm(-value, &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
subs(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldArm(~value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
adds(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldArm(~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
subs(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -1343,9 +1374,9 @@ void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value
void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
mov(rd, shifter_op, cond);
- } else if (ShifterOperand::CanHoldArm(~value, &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
mvn(rd, shifter_op, cond);
} else {
movw(rd, Low16Bits(value), cond);
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 0b009e16d9..6c8d41587b 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -273,6 +273,12 @@ class Arm32Assembler FINAL : public ArmAssembler {
int32_t offset,
Condition cond = AL) OVERRIDE;
+ bool ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) OVERRIDE;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
@@ -359,6 +365,7 @@ class Arm32Assembler FINAL : public ArmAssembler {
static int DecodeBranchOffset(int32_t inst);
int32_t EncodeTstOffset(int offset, int32_t inst);
int DecodeTstOffset(int32_t inst);
+ bool ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op);
};
} // namespace arm
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index 837fe1ec18..951792d45b 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -49,7 +49,8 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
}
std::string GetAssemblerParameters() OVERRIDE {
- return " -march=armv7-a -mcpu=cortex-a15"; // Arm-v7a, cortex-a15 (means we have sdiv).
+ // Arm-v7a, cortex-a15 (means we have sdiv).
+ return " -march=armv7-a -mcpu=cortex-a15 -mfpu=neon";
}
const char* GetAssemblyHeader() OVERRIDE {
@@ -688,4 +689,12 @@ TEST_F(AssemblerArm32Test, Bx) {
T2Helper(&arm::Arm32Assembler::bx, true, "bx{cond} {reg1}", "bx");
}
+TEST_F(AssemblerArm32Test, Vmstat) {
+ GetAssembler()->vmstat();
+
+ const char* expected = "vmrs APSR_nzcv, FPSCR\n";
+
+ DriverStr(expected, "vmrs");
+}
+
} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index a377cb2892..479186c5d7 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -25,6 +25,39 @@
namespace art {
namespace arm {
+bool Thumb2Assembler::ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) {
+ shifter_op->type_ = ShifterOperand::kImmediate;
+ shifter_op->immed_ = immediate;
+ shifter_op->is_shift_ = false;
+ shifter_op->is_rotate_ = false;
+ switch (opcode) {
+ case ADD:
+ case SUB:
+ if (rn == SP) {
+ if (rd == SP) {
+ return immediate < (1 << 9); // 9 bits allowed.
+ } else {
+ return immediate < (1 << 12); // 12 bits.
+ }
+ }
+ if (immediate < (1 << 12)) { // Less than (or equal to) 12 bits can always be done.
+ return true;
+ }
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+
+ case MOV:
+ // TODO: Support less than or equal to 12bits.
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+ case MVN:
+ default:
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+ }
+}
+
void Thumb2Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
Condition cond) {
EmitDataProcessing(cond, AND, 0, rn, rd, so);
@@ -374,16 +407,11 @@ void Thumb2Assembler::ldm(BlockAddressMode am,
Register base,
RegList regs,
Condition cond) {
- if (__builtin_popcount(regs) == 1) {
+ CHECK_NE(regs, 0u); // Do not use ldm if there's nothing to load.
+ if (IsPowerOfTwo(regs)) {
// Thumb doesn't support one reg in the list.
// Find the register number.
- int reg = 0;
- while (reg < 16) {
- if ((regs & (1 << reg)) != 0) {
- break;
- }
- ++reg;
- }
+ int reg = CTZ(static_cast<uint32_t>(regs));
CHECK_LT(reg, 16);
CHECK(am == DB_W); // Only writeback is supported.
ldr(static_cast<Register>(reg), Address(base, kRegisterSize, Address::PostIndex), cond);
@@ -397,16 +425,11 @@ void Thumb2Assembler::stm(BlockAddressMode am,
Register base,
RegList regs,
Condition cond) {
- if (__builtin_popcount(regs) == 1) {
+ CHECK_NE(regs, 0u); // Do not use stm if there's nothing to store.
+ if (IsPowerOfTwo(regs)) {
// Thumb doesn't support one reg in the list.
// Find the register number.
- int reg = 0;
- while (reg < 16) {
- if ((regs & (1 << reg)) != 0) {
- break;
- }
- ++reg;
- }
+ int reg = CTZ(static_cast<uint32_t>(regs));
CHECK_LT(reg, 16);
CHECK(am == IA || am == IA_W);
Address::Mode strmode = am == IA ? Address::PreIndex : Address::Offset;
@@ -813,6 +836,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
if (thumb_opcode == 255U /* 0b11111111 */) {
LOG(FATAL) << "Invalid thumb2 opcode " << opcode;
+ UNREACHABLE();
}
int32_t encoding = 0;
@@ -842,6 +866,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
uint32_t imm = ModifiedImmediate(so.encodingThumb());
if (imm == kInvalidModifiedImmediate) {
LOG(FATAL) << "Immediate value cannot fit in thumb2 modified immediate";
+ UNREACHABLE();
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
@@ -979,6 +1004,7 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
if (thumb_opcode == 255U /* 0b11111111 */) {
LOG(FATAL) << "Invalid thumb1 opcode " << opcode;
+ UNREACHABLE();
}
int16_t encoding = dp_opcode << 14 |
@@ -1116,7 +1142,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
break;
default:
LOG(FATAL) << "This opcode is not an ADD or SUB: " << opcode;
- return;
+ UNREACHABLE();
}
int16_t encoding = dp_opcode << 14 |
@@ -1157,6 +1183,7 @@ void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t a
case RRX: opcode = 3U /* 0b11 */; amount = 0; break;
default:
LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
// 32 bit.
int32_t encoding = B31 | B30 | B29 | B27 | B25 | B22 |
@@ -1174,7 +1201,8 @@ void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t a
case LSR: opcode = 1U /* 0b01 */; break;
case ASR: opcode = 2U /* 0b10 */; break;
default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
int16_t encoding = opcode << 11 | amount << 6 | static_cast<int16_t>(rm) << 3 |
static_cast<int16_t>(rd);
@@ -1198,6 +1226,7 @@ void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register
case ROR: opcode = 3U /* 0b11 */; break;
default:
LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
// 32 bit.
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 |
@@ -1212,7 +1241,8 @@ void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register
case LSR: opcode = 3U /* 0b0011 */; break;
case ASR: opcode = 4U /* 0b0100 */; break;
default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
int16_t encoding = B14 | opcode << 6 | static_cast<int16_t>(rm) << 3 |
static_cast<int16_t>(rd);
@@ -1241,6 +1271,7 @@ void Thumb2Assembler::Branch::Emit(AssemblerBuffer* buffer) const {
} else {
if (x) {
LOG(FATAL) << "Invalid use of BX";
+ UNREACHABLE();
} else {
if (cond_ == AL) {
// Can use the T4 encoding allowing a 24 bit offset.
@@ -1469,6 +1500,15 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
CheckCondition(cond);
bool must_be_32bit = force_32bit_;
+ if (!must_be_32bit && base == SP && bam == (load ? IA_W : DB_W) &&
+ (regs & 0xff00 & ~(1 << (load ? PC : LR))) == 0) {
+ // Use 16-bit PUSH/POP.
+ int16_t encoding = B15 | B13 | B12 | (load ? B11 : 0) | B10 |
+ ((regs & (1 << (load ? PC : LR))) != 0 ? B8 : 0) | (regs & 0x00ff);
+ Emit16(encoding);
+ return;
+ }
+
if ((regs & 0xff00) != 0) {
must_be_32bit = true;
}
@@ -1495,6 +1535,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
case DA_W:
case IB_W:
LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
+ UNREACHABLE();
}
if (load) {
// Cannot have SP in the list.
@@ -1981,8 +2022,13 @@ void Thumb2Assembler::EmitVFPds(Condition cond, int32_t opcode,
void Thumb2Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR.
+ CHECK_NE(cond, kNoCondition);
CheckCondition(cond);
- UNIMPLEMENTED(FATAL) << "Unimplemented thumb instruction";
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
+ (static_cast<int32_t>(PC)*B12) |
+ B11 | B9 | B4;
+ Emit32(encoding);
}
@@ -2068,6 +2114,7 @@ void Thumb2Assembler::cbz(Register rn, Label* label) {
CheckCondition(AL);
if (label->IsBound()) {
LOG(FATAL) << "cbz can only be used to branch forwards";
+ UNREACHABLE();
} else {
uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), false);
label->LinkTo(branchid);
@@ -2079,6 +2126,7 @@ void Thumb2Assembler::cbnz(Register rn, Label* label) {
CheckCondition(AL);
if (label->IsBound()) {
LOG(FATAL) << "cbnz can only be used to branch forwards";
+ UNREACHABLE();
} else {
uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), true);
label->LinkTo(branchid);
@@ -2360,16 +2408,16 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
// positive values and sub for negatives ones, which would slightly improve
// the readability of generated code for some constants.
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldThumb(rd, rn, ADD, value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
add(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, SUB, -value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, SUB, -value, &shifter_op)) {
sub(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
add(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
sub(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -2387,16 +2435,16 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldThumb(rd, rn, ADD, value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
adds(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, ADD, -value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, ADD, -value, &shifter_op)) {
subs(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
adds(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
subs(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -2410,11 +2458,12 @@ void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t valu
}
}
+
void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldThumb(rd, R0, MOV, value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, R0, MOV, value, &shifter_op)) {
mov(rd, shifter_op, cond);
- } else if (ShifterOperand::CanHoldThumb(rd, R0, MVN, ~value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, R0, MVN, ~value, &shifter_op)) {
mvn(rd, shifter_op, cond);
} else {
movw(rd, Low16Bits(value), cond);
@@ -2425,6 +2474,7 @@ void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond)
}
}
+
// Implementation note: this method must emit at most one instruction when
// Address::CanHoldLoadOffsetThumb.
void Thumb2Assembler::LoadFromOffset(LoadOperandType type,
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index cfa251acf2..48a3a7eeb2 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -304,6 +304,12 @@ class Thumb2Assembler FINAL : public ArmAssembler {
int32_t offset,
Condition cond = AL) OVERRIDE;
+ bool ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) OVERRIDE;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 65d6d45296..6ae95a40e6 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -30,7 +30,7 @@ class AssemblerThumb2Test : public AssemblerTest<arm::Thumb2Assembler,
}
std::string GetAssemblerParameters() OVERRIDE {
- return " -mthumb";
+ return " -mthumb -mfpu=neon";
}
std::string GetDisassembleParameters() OVERRIDE {
@@ -156,4 +156,12 @@ TEST_F(AssemblerThumb2Test, Ubfx) {
DriverStr(expected, "ubfx");
}
+TEST_F(AssemblerThumb2Test, Vmstat) {
+ GetAssembler()->vmstat();
+
+ const char* expected = "vmrs APSR_nzcv, FPSCR\n";
+
+ DriverStr(expected, "vmrs");
+}
+
} // namespace art
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 390f2ea449..21014c8bba 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -329,12 +329,12 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s
if (dst.IsXRegister()) {
if (size == 4) {
CHECK(src.IsWRegister());
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
} else {
if (src.IsXRegister()) {
___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
} else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
}
}
} else if (dst.IsWRegister()) {
@@ -484,9 +484,9 @@ void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
@@ -495,9 +495,9 @@ void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index a297ea3b6e..f0353f6cd2 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -613,6 +613,23 @@ void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
}
+void X86Assembler::ucomiss(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::ucomisd(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
@@ -1318,13 +1335,19 @@ void X86Assembler::AddImmediate(Register reg, const Immediate& imm) {
}
+void X86Assembler::LoadLongConstant(XmmRegister dst, int64_t value) {
+ // TODO: Need to have a code constants table.
+ pushl(Immediate(High32Bits(value)));
+ pushl(Immediate(Low32Bits(value)));
+ movsd(dst, Address(ESP, 0));
+ addl(ESP, Immediate(2 * sizeof(int32_t)));
+}
+
+
void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
// TODO: Need to have a code constants table.
int64_t constant = bit_cast<int64_t, double>(value);
- pushl(Immediate(High32Bits(constant)));
- pushl(Immediate(Low32Bits(constant)));
- movsd(dst, Address(ESP, 0));
- addl(ESP, Immediate(2 * sizeof(intptr_t)));
+ LoadLongConstant(dst, constant);
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 6ea66a5fa7..9fecf1edf0 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -42,8 +42,6 @@ class Immediate : public ValueObject {
private:
const int32_t value_;
-
- DISALLOW_COPY_AND_ASSIGN(Immediate);
};
@@ -301,6 +299,8 @@ class X86Assembler FINAL : public Assembler {
void comiss(XmmRegister a, XmmRegister b);
void comisd(XmmRegister a, XmmRegister b);
+ void ucomiss(XmmRegister a, XmmRegister b);
+ void ucomisd(XmmRegister a, XmmRegister b);
void sqrtsd(XmmRegister dst, XmmRegister src);
void sqrtss(XmmRegister dst, XmmRegister src);
@@ -441,6 +441,7 @@ class X86Assembler FINAL : public Assembler {
void AddImmediate(Register reg, const Immediate& imm);
+ void LoadLongConstant(XmmRegister dst, int64_t value);
void LoadDoubleConstant(XmmRegister dst, double value);
void DoubleNegate(XmmRegister d);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 5d8a3b1521..d901673691 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -16,7 +16,8 @@
#include "assembler_x86.h"
-#include "gtest/gtest.h"
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
namespace art {
@@ -29,4 +30,89 @@ TEST(AssemblerX86, CreateBuffer) {
ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
}
+class AssemblerX86Test : public AssemblerTest<x86::X86Assembler, x86::Register,
+ x86::XmmRegister, x86::Immediate> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "x86";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " --32";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mi386 --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new x86::Register(x86::EAX),
+ new x86::Register(x86::EBX),
+ new x86::Register(x86::ECX),
+ new x86::Register(x86::EDX),
+ new x86::Register(x86::EBP),
+ new x86::Register(x86::ESP),
+ new x86::Register(x86::ESI),
+ new x86::Register(x86::EDI)
+ });
+ }
+
+ if (fp_registers_.size() == 0) {
+ fp_registers_.insert(end(fp_registers_),
+ { // NOLINT(whitespace/braces)
+ new x86::XmmRegister(x86::XMM0),
+ new x86::XmmRegister(x86::XMM1),
+ new x86::XmmRegister(x86::XMM2),
+ new x86::XmmRegister(x86::XMM3),
+ new x86::XmmRegister(x86::XMM4),
+ new x86::XmmRegister(x86::XMM5),
+ new x86::XmmRegister(x86::XMM6),
+ new x86::XmmRegister(x86::XMM7)
+ });
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ STLDeleteElements(&fp_registers_);
+ }
+
+ std::vector<x86::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ std::vector<x86::XmmRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
+ x86::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ return x86::Immediate(imm_value);
+ }
+
+ private:
+ std::vector<x86::Register*> registers_;
+ std::vector<x86::XmmRegister*> fp_registers_;
+};
+
+
+TEST_F(AssemblerX86Test, Movl) {
+ GetAssembler()->movl(x86::EAX, x86::EBX);
+ const char* expected = "mov %ebx, %eax\n";
+ DriverStr(expected, "movl");
+}
+
+TEST_F(AssemblerX86Test, LoadLongConstant) {
+ GetAssembler()->LoadLongConstant(x86::XMM0, 51);
+ const char* expected =
+ "push $0x0\n"
+ "push $0x33\n"
+ "movsd 0(%esp), %xmm0\n"
+ "add $8, %esp\n";
+ DriverStr(expected, "LoadLongConstant");
+}
+
} // namespace art
diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h
index 45c3834a98..2dfb65c479 100644
--- a/compiler/utils/x86/constants_x86.h
+++ b/compiler/utils/x86/constants_x86.h
@@ -96,7 +96,8 @@ enum Condition {
kZero = kEqual,
kNotZero = kNotEqual,
kNegative = kSign,
- kPositive = kNotSign
+ kPositive = kNotSign,
+ kUnordered = kParityEven
};
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index dff3849076..474d8a909e 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -593,9 +593,19 @@ void X86_64Assembler::divsd(XmmRegister dst, const Address& src) {
void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src) {
+ cvtsi2ss(dst, src, false);
+}
+
+
+void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
- EmitOptionalRex32(dst, src);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
EmitUint8(0x0F);
EmitUint8(0x2A);
EmitOperand(dst.LowBits(), Operand(src));
@@ -603,9 +613,19 @@ void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src) {
void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src) {
+ cvtsi2sd(dst, src, false);
+}
+
+
+void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
- EmitOptionalRex32(dst, src);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
EmitUint8(0x0F);
EmitUint8(0x2A);
EmitOperand(dst.LowBits(), Operand(src));
@@ -700,6 +720,24 @@ void X86_64Assembler::comisd(XmmRegister a, XmmRegister b) {
EmitXmmRegisterOperand(a.LowBits(), b);
}
+void X86_64Assembler::ucomiss(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a.LowBits(), b);
+}
+
+
+void X86_64Assembler::ucomisd(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a.LowBits(), b);
+}
+
void X86_64Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index ab1bc9e97d..6e71e4a5bb 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -329,7 +329,9 @@ class X86_64Assembler FINAL : public Assembler {
void divsd(XmmRegister dst, const Address& src);
void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
+ void cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit);
void cvtsi2sd(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
+ void cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit);
void cvtss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtss2sd(XmmRegister dst, XmmRegister src);
@@ -344,6 +346,8 @@ class X86_64Assembler FINAL : public Assembler {
void comiss(XmmRegister a, XmmRegister b);
void comisd(XmmRegister a, XmmRegister b);
+ void ucomiss(XmmRegister a, XmmRegister b);
+ void ucomisd(XmmRegister a, XmmRegister b);
void sqrtsd(XmmRegister dst, XmmRegister src);
void sqrtss(XmmRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 14a98b9359..c8e923c9d6 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -660,6 +660,14 @@ TEST_F(AssemblerX86_64Test, Comisd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::comisd, "comisd %{reg2}, %{reg1}"), "comisd");
}
+TEST_F(AssemblerX86_64Test, Ucomiss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::ucomiss, "ucomiss %{reg2}, %{reg1}"), "ucomiss");
+}
+
+TEST_F(AssemblerX86_64Test, Ucomisd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::ucomisd, "ucomisd %{reg2}, %{reg1}"), "ucomisd");
+}
+
TEST_F(AssemblerX86_64Test, Sqrtss) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::sqrtss, "sqrtss %{reg2}, %{reg1}"), "sqrtss");
}
diff --git a/compiler/utils/x86_64/constants_x86_64.h b/compiler/utils/x86_64/constants_x86_64.h
index 2a5b43da46..0c782d46cd 100644
--- a/compiler/utils/x86_64/constants_x86_64.h
+++ b/compiler/utils/x86_64/constants_x86_64.h
@@ -105,7 +105,8 @@ enum Condition {
kZero = kEqual,
kNotZero = kNotEqual,
kNegative = kSign,
- kPositive = kNotSign
+ kPositive = kNotSign,
+ kUnordered = kParityEven
};