summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/quick/arm/arm_lir.h2
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h4
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc26
-rw-r--r--compiler/dex/quick/arm/int_arm.cc2
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h4
-rw-r--r--compiler/dex/quick/arm64/fp_arm64.cc26
-rw-r--r--compiler/dex/quick/gen_common.cc28
-rw-r--r--compiler/dex/quick/gen_loadstore.cc6
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h4
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc14
-rw-r--r--compiler/dex/quick/mir_to_lir.cc16
-rw-r--r--compiler/dex/quick/mir_to_lir.h25
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc2
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h4
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc14
-rw-r--r--compiler/optimizing/builder.cc220
-rw-r--r--compiler/optimizing/builder.h13
-rw-r--r--compiler/optimizing/code_generator.cc41
-rw-r--r--compiler/optimizing/code_generator.h1
-rw-r--r--compiler/optimizing/code_generator_arm.cc189
-rw-r--r--compiler/optimizing/code_generator_arm.h52
-rw-r--r--compiler/optimizing/code_generator_arm64.cc9
-rw-r--r--compiler/optimizing/code_generator_arm64.h47
-rw-r--r--compiler/optimizing/code_generator_x86.cc225
-rw-r--r--compiler/optimizing/code_generator_x86.h52
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc249
-rw-r--r--compiler/optimizing/code_generator_x86_64.h56
-rw-r--r--compiler/optimizing/codegen_test.cc120
-rw-r--r--compiler/optimizing/constant_folding_test.cc3
-rw-r--r--compiler/optimizing/nodes.h105
-rw-r--r--compiler/optimizing/optimizing_compiler.cc33
-rw-r--r--compiler/optimizing/optimizing_unit_test.h18
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc4
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h1
-rw-r--r--compiler/optimizing/register_allocator.cc14
-rw-r--r--compiler/optimizing/register_allocator_test.cc41
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc16
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h9
-rw-r--r--runtime/arch/arm/asm_support_arm.h2
-rw-r--r--runtime/arch/arm/memcmp16_arm.S45
-rw-r--r--runtime/mirror/art_method.cc10
-rw-r--r--runtime/mirror/art_method.h3
-rw-r--r--runtime/oat.cc2
-rw-r--r--test/004-ReferenceMap/stack_walk_refmap_jni.cc45
-rw-r--r--test/401-optimizing-compiler/src/Main.java12
-rw-r--r--test/415-optimizing-arith-neg/src/Main.java135
-rw-r--r--test/417-optimizing-arith-div/src/Main.java55
-rw-r--r--test/421-exceptions/expected.txt20
-rw-r--r--test/421-exceptions/info.txt1
-rw-r--r--test/421-exceptions/src/Main.java65
-rw-r--r--test/422-type-conversion/expected.txt0
-rw-r--r--test/422-type-conversion/info.txt1
-rw-r--r--test/422-type-conversion/src/Main.java80
-rw-r--r--test/703-floating-point-div/expected.txt1
-rw-r--r--test/703-floating-point-div/info.txt1
-rw-r--r--test/703-floating-point-div/src/Main.java90
-rw-r--r--test/Android.run-test.mk35
57 files changed, 1994 insertions, 304 deletions
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index b2db36d831..4c7f87433d 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -97,7 +97,7 @@ namespace art {
// First FP callee save.
#define ARM_FP_CALLEE_SAVE_BASE 16
// Flag for using R4 to do suspend check
-#define ARM_R4_SUSPEND_FLAG
+// #define ARM_R4_SUSPEND_FLAG
enum ArmResourceEncodingPos {
kArmGPReg0 = 0,
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 179ba02175..d2351997b7 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -90,6 +90,10 @@ class ArmMir2Lir FINAL : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 3eb7c83c11..2b2592d5db 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -113,6 +113,32 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode,
StoreValueWide(rl_dest, rl_result);
}
+void ArmMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempSingle();
+ LoadConstantNoClobber(r_tmp, constant);
+ rl_src1 = LoadValue(rl_src1, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR3(kThumb2Vmuls, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempDouble();
+ DCHECK(r_tmp.IsDouble());
+ LoadConstantWide(r_tmp, constant);
+ rl_src1 = LoadValueWide(rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(kThumb2Vmuld, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) {
int op = kThumbBkpt;
int src_reg;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index ebf1905579..57544b5187 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1102,7 +1102,7 @@ LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
#else
RegStorage t_reg = AllocTemp();
LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
- t_reg, kUnsignedHalf);
+ t_reg, kUnsignedHalf, kNotVolatile);
LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
0, target);
FreeTemp(t_reg);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index bd363c4fd2..5182a89474 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -71,6 +71,10 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int64_t lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index db24d124ab..ff692b77ac 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -116,6 +116,32 @@ void Arm64Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
StoreValueWide(rl_dest, rl_result);
}
+void Arm64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempSingle();
+ LoadConstantNoClobber(r_tmp, constant);
+ rl_src1 = LoadValue(rl_src1, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR3(kA64Fmul3fff, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempDouble();
+ DCHECK(r_tmp.IsDouble());
+ LoadConstantWide(r_tmp, constant);
+ rl_src1 = LoadValueWide(rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(WIDE(kA64Fmul3fff), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
void Arm64Mir2Lir::GenConversion(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src) {
int op = kA64Brk1d;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index c5aa27c324..061ee0747a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1785,6 +1785,34 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li
return true;
}
+// Returns true if it generates instructions.
+bool Mir2Lir::HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ if (!rl_src2.is_const ||
+ ((cu_->instruction_set != kThumb2) && (cu_->instruction_set != kArm64))) {
+ return false;
+ }
+
+ if (!rl_src2.wide) {
+ int32_t divisor = mir_graph_->ConstantValue(rl_src2);
+ if (CanDivideByReciprocalMultiplyFloat(divisor)) {
+ // Generate multiply by reciprocal instead of div.
+ float recip = 1.0f/bit_cast<int32_t, float>(divisor);
+ GenMultiplyByConstantFloat(rl_dest, rl_src1, bit_cast<float, int32_t>(recip));
+ return true;
+ }
+ } else {
+ int64_t divisor = mir_graph_->ConstantValueWide(rl_src2);
+ if (CanDivideByReciprocalMultiplyDouble(divisor)) {
+ // Generate multiply by reciprocal instead of div.
+ double recip = 1.0/bit_cast<double, int64_t>(divisor);
+ GenMultiplyByConstantDouble(rl_dest, rl_src1, bit_cast<double, int64_t>(recip));
+ return true;
+ }
+ }
+ return false;
+}
+
void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
int lit) {
RegLocation rl_result;
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 39b40a09a2..d3146018d6 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -149,8 +149,9 @@ RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) {
// Wrong register class, realloc, copy and transfer ownership.
RegStorage new_reg = AllocTypedTemp(rl_src.fp, op_kind);
OpRegCopy(new_reg, rl_src.reg);
- // Clobber the old reg.
+ // Clobber the old regs and free it.
Clobber(rl_src.reg);
+ FreeTemp(rl_src.reg);
// ...and mark the new one live.
rl_src.reg = new_reg;
MarkLive(rl_src);
@@ -232,8 +233,9 @@ RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) {
// Wrong register class, realloc, copy and transfer ownership.
RegStorage new_regs = AllocTypedTempWide(rl_src.fp, op_kind);
OpRegCopyWide(new_regs, rl_src.reg);
- // Clobber the old regs.
+ // Clobber the old regs and free it.
Clobber(rl_src.reg);
+ FreeTemp(rl_src.reg);
// ...and mark the new ones live.
rl_src.reg = new_regs;
MarkLive(rl_src);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index dc6930c45b..7e9d80df65 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -31,6 +31,10 @@ class MipsMir2Lir FINAL : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 431591512e..0a7aa99c98 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -113,6 +113,20 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
StoreValueWide(rl_dest, rl_result);
}
+void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ // TODO: need mips implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips";
+}
+
+void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ // TODO: need mips implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips";
+}
+
void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src) {
int op = kMipsNop;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 533a6778b5..ccaa167d6a 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1052,28 +1052,36 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
}
break;
+ case Instruction::DIV_FLOAT:
+ case Instruction::DIV_FLOAT_2ADDR:
+ if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_FLOAT:
case Instruction::SUB_FLOAT:
case Instruction::MUL_FLOAT:
- case Instruction::DIV_FLOAT:
case Instruction::REM_FLOAT:
case Instruction::ADD_FLOAT_2ADDR:
case Instruction::SUB_FLOAT_2ADDR:
case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT_2ADDR:
case Instruction::REM_FLOAT_2ADDR:
GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
break;
+ case Instruction::DIV_DOUBLE:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_DOUBLE:
case Instruction::SUB_DOUBLE:
case Instruction::MUL_DOUBLE:
- case Instruction::DIV_DOUBLE:
case Instruction::REM_DOUBLE:
case Instruction::ADD_DOUBLE_2ADDR:
case Instruction::SUB_DOUBLE_2ADDR:
case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE_2ADDR:
GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
break;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4623f79466..bacc6d2e41 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -789,6 +789,7 @@ class Mir2Lir : public Backend {
virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit);
bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
+ bool HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
virtual void HandleSlowPaths();
void GenBarrier();
void GenDivZeroException();
@@ -1120,6 +1121,10 @@ class Mir2Lir : public Backend {
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
+ virtual void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) = 0;
+ virtual void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) = 0;
virtual LIR* CheckSuspendUsingLoad() = 0;
virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0;
@@ -1439,6 +1444,26 @@ class Mir2Lir : public Backend {
return InexpensiveConstantInt(value);
}
+ /**
+ * @brief Whether division by the given divisor can be converted to multiply by its reciprocal.
+ * @param divisor A constant divisor bits of float type.
+ * @return Returns true iff, x/divisor == x*(1.0f/divisor), for every float x.
+ */
+ bool CanDivideByReciprocalMultiplyFloat(int32_t divisor) {
+ // True, if float value significand bits are 0.
+ return ((divisor & 0x7fffff) == 0);
+ }
+
+ /**
+ * @brief Whether division by the given divisor can be converted to multiply by its reciprocal.
+ * @param divisor A constant divisor bits of double type.
+ * @return Returns true iff, x/divisor == x*(1.0/divisor), for every double x.
+ */
+ bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) {
+ // True, if double value significand bits are 0.
+ return ((divisor & ((UINT64_C(1) << 52) - 1)) == 0);
+ }
+
// May be optimized by targets.
virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 85a3b3210d..ef55054d6d 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -541,7 +541,7 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86CallI, kCall, IS_UNARY_OP | IS_BRANCH, { 0, 0, 0xE8, 0, 0, 0, 0, 4, false }, "CallI", "!0d" },
{ kX86Ret, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xC3, 0, 0, 0, 0, 0, false }, "Ret", "" },
- { kX86StartOfMethod, kMacro, IS_UNARY_OP | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0, false }, "StartOfMethod", "!0r" },
+ { kX86StartOfMethod, kMacro, IS_UNARY_OP | REG_DEF0 | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0, false }, "StartOfMethod", "!0r" },
{ kX86PcRelLoadRA, kPcRel, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "PcRelLoadRA", "!0r,[!1r+!2r<<!3d+!4p]" },
{ kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4, false }, "PcRelAdr", "!0r,!1p" },
{ kX86RepneScasw, kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index dec99aefab..4412a1e254 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -78,6 +78,10 @@ class X86Mir2Lir : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit) OVERRIDE;
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 254d90fa8c..33bb0eeb76 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -122,6 +122,20 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
StoreValueWide(rl_dest, rl_result);
}
+void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ // TODO: need x86 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in x86";
+}
+
+void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ // TODO: need x86 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in x86";
+}
+
void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double) {
// Compute offsets to the source and destination VRs on stack
int src_v_reg_offset = SRegOffset(rl_src.s_reg_low);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index d168fc80f1..64fb764937 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -119,13 +119,6 @@ bool HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
return true;
}
-static bool CanHandleCodeItem(const DexFile::CodeItem& code_item) {
- if (code_item.tries_size_ > 0) {
- return false;
- }
- return true;
-}
-
template<typename T>
void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
int32_t target_offset = instruction.GetTargetOffset();
@@ -164,10 +157,6 @@ void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_offset)
}
HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
- if (!CanHandleCodeItem(code_item)) {
- return nullptr;
- }
-
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
code_start_ = code_ptr;
@@ -187,6 +176,25 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
// start a new block, and create these blocks.
ComputeBranchTargets(code_ptr, code_end);
+ // Also create blocks for catch handlers.
+ if (code_item.tries_size_ != 0) {
+ const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(code_item, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+ for (uint32_t idx = 0; idx < handlers_size; ++idx) {
+ CatchHandlerIterator iterator(handlers_ptr);
+ for (; iterator.HasNext(); iterator.Next()) {
+ uint32_t address = iterator.GetHandlerAddress();
+ HBasicBlock* block = FindBlockStartingAt(address);
+ if (block == nullptr) {
+ block = new (arena_) HBasicBlock(graph_, address);
+ branch_targets_.Put(address, block);
+ }
+ block->SetIsCatchBlock();
+ }
+ handlers_ptr = iterator.EndDataPointer();
+ }
+ }
+
if (!InitializeParameters(code_item.ins_size_)) {
return nullptr;
}
@@ -273,6 +281,14 @@ void HGraphBuilder::Unop_12x(const Instruction& instruction, Primitive::Type typ
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
+void HGraphBuilder::Conversion_12x(const Instruction& instruction,
+ Primitive::Type input_type,
+ Primitive::Type result_type) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
+ current_block_->AddInstruction(new (arena_) HTypeConversion(result_type, first));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
template<typename T>
void HGraphBuilder::Binop_23x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
@@ -529,6 +545,27 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
return true;
}
+void HGraphBuilder::BuildCheckedDiv(uint16_t out_reg,
+ uint16_t first_reg,
+ int32_t second_reg,
+ uint32_t dex_offset,
+ Primitive::Type type,
+ bool second_is_lit) {
+ DCHECK(type == Primitive::kPrimInt);
+
+ HInstruction* first = LoadLocal(first_reg, type);
+ HInstruction* second = second_is_lit ? GetIntConstant(second_reg) : LoadLocal(second_reg, type);
+ if (!second->IsIntConstant() || (second->AsIntConstant()->GetValue() == 0)) {
+ second = new (arena_) HDivZeroCheck(second, dex_offset);
+ Temporaries temps(graph_, 1);
+ current_block_->AddInstruction(second);
+ temps.Add(current_block_->GetLastInstruction());
+ }
+
+ current_block_->AddInstruction(new (arena_) HDiv(type, first, second));
+ UpdateLocal(out_reg, current_block_->GetLastInstruction());
+}
+
void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
uint32_t dex_offset,
bool is_put,
@@ -609,6 +646,60 @@ void HGraphBuilder::BuildFillArrayData(HInstruction* object,
}
}
+void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset) {
+ Temporaries temps(graph_, 1);
+ HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
+ HNullCheck* null_check = new (arena_) HNullCheck(array, dex_offset);
+ current_block_->AddInstruction(null_check);
+ temps.Add(null_check);
+
+ HInstruction* length = new (arena_) HArrayLength(null_check);
+ current_block_->AddInstruction(length);
+
+ int32_t payload_offset = instruction.VRegB_31t() + dex_offset;
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
+ const uint8_t* data = payload->data;
+ uint32_t element_count = payload->element_count;
+
+ // Implementation of this DEX instruction seems to be that the bounds check is
+ // done before doing any stores.
+ HInstruction* last_index = GetIntConstant(payload->element_count - 1);
+ current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_offset));
+
+ switch (payload->element_width) {
+ case 1:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int8_t*>(data),
+ element_count,
+ Primitive::kPrimByte,
+ dex_offset);
+ break;
+ case 2:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int16_t*>(data),
+ element_count,
+ Primitive::kPrimShort,
+ dex_offset);
+ break;
+ case 4:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int32_t*>(data),
+ element_count,
+ Primitive::kPrimInt,
+ dex_offset);
+ break;
+ case 8:
+ BuildFillWideArrayData(null_check,
+ reinterpret_cast<const int64_t*>(data),
+ element_count,
+ dex_offset);
+ break;
+ default:
+ LOG(FATAL) << "Unknown element width for " << payload->element_width;
+ }
+}
+
void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
@@ -813,6 +904,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::NEG_FLOAT: {
+ Unop_12x<HNeg>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::NEG_DOUBLE: {
+ Unop_12x<HNeg>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
case Instruction::NOT_INT: {
Unop_12x<HNot>(instruction, Primitive::kPrimInt);
break;
@@ -823,6 +924,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::INT_TO_LONG: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimLong);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -888,6 +994,12 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::DIV_INT: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_offset, Primitive::kPrimInt, false);
+ break;
+ }
+
case Instruction::DIV_FLOAT: {
Binop_23x<HDiv>(instruction, Primitive::kPrimFloat);
break;
@@ -953,6 +1065,12 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::DIV_INT_2ADDR: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_offset, Primitive::kPrimInt, false);
+ break;
+ }
+
case Instruction::DIV_FLOAT_2ADDR: {
Binop_12x<HDiv>(instruction, Primitive::kPrimFloat);
break;
@@ -993,6 +1111,13 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::DIV_INT_LIT8: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_offset, Primitive::kPrimInt, true);
+ break;
+ }
+
case Instruction::NEW_INSTANCE: {
current_block_->AddInstruction(
new (arena_) HNewInstance(dex_offset, instruction.VRegB_21c()));
@@ -1027,57 +1152,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::FILL_ARRAY_DATA: {
- Temporaries temps(graph_, 1);
- HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(array, dex_offset);
- current_block_->AddInstruction(null_check);
- temps.Add(null_check);
-
- HInstruction* length = new (arena_) HArrayLength(null_check);
- current_block_->AddInstruction(length);
-
- int32_t payload_offset = instruction.VRegB_31t() + dex_offset;
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
- const uint8_t* data = payload->data;
- uint32_t element_count = payload->element_count;
-
- // Implementation of this DEX instruction seems to be that the bounds check is
- // done before doing any stores.
- HInstruction* last_index = GetIntConstant(payload->element_count - 1);
- current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_offset));
-
- switch (payload->element_width) {
- case 1:
- BuildFillArrayData(null_check,
- reinterpret_cast<const int8_t*>(data),
- element_count,
- Primitive::kPrimByte,
- dex_offset);
- break;
- case 2:
- BuildFillArrayData(null_check,
- reinterpret_cast<const int16_t*>(data),
- element_count,
- Primitive::kPrimShort,
- dex_offset);
- break;
- case 4:
- BuildFillArrayData(null_check,
- reinterpret_cast<const int32_t*>(data),
- element_count,
- Primitive::kPrimInt,
- dex_offset);
- break;
- case 8:
- BuildFillWideArrayData(null_check,
- reinterpret_cast<const int64_t*>(data),
- element_count,
- dex_offset);
- break;
- default:
- LOG(FATAL) << "Unknown element width for " << payload->element_width;
- }
+ BuildFillArrayData(instruction, dex_offset);
break;
}
@@ -1168,6 +1243,10 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::ARRAY_LENGTH: {
HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot);
+ // No need for a temporary for the null check, it is the only input of the following
+ // instruction.
+ object = new (arena_) HNullCheck(object, dex_offset);
+ current_block_->AddInstruction(object);
current_block_->AddInstruction(new (arena_) HArrayLength(object));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
break;
@@ -1202,6 +1281,23 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::MOVE_EXCEPTION: {
+ current_block_->AddInstruction(new (arena_) HLoadException());
+ UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::THROW: {
+ HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot);
+ current_block_->AddInstruction(new (arena_) HThrow(exception, dex_offset));
+ // A throw instruction must branch to the exit block.
+ current_block_->AddSuccessor(exit_block_);
+ // We finished building this block. Set the current block to null to avoid
+ // adding dead instructions to it.
+ current_block_ = nullptr;
+ break;
+ }
+
default:
return false;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index eea762f834..030f45b609 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -116,6 +116,17 @@ class HGraphBuilder : public ValueObject {
template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_offset);
template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_offset);
+ void Conversion_12x(const Instruction& instruction,
+ Primitive::Type input_type,
+ Primitive::Type result_type);
+
+ void BuildCheckedDiv(uint16_t out_reg,
+ uint16_t first_reg,
+ int32_t second_reg, // can be a constant
+ uint32_t dex_offset,
+ Primitive::Type type,
+ bool second_is_lit);
+
void BuildReturn(const Instruction& instruction, Primitive::Type type);
// Builds an instance field access node and returns whether the instruction is supported.
@@ -146,6 +157,8 @@ class HGraphBuilder : public ValueObject {
uint32_t* args,
uint32_t register_index);
+ void BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset);
+
// Fills the given object with data as specified in the fill-array-data
// instruction. Currently only used for non-reference and non-floating point
// arrays.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ac72a333c3..c75980d856 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -356,12 +356,13 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_ma
int32_t pc2dex_dalvik_offset = 0;
uint32_t dex2pc_data_size = 0u;
uint32_t dex2pc_entries = 0u;
+ uint32_t dex2pc_offset = 0u;
+ int32_t dex2pc_dalvik_offset = 0;
if (src_map != nullptr) {
src_map->reserve(pc2dex_entries);
}
- // We currently only have pc2dex entries.
for (size_t i = 0; i < pc2dex_entries; i++) {
struct PcInfo pc_info = pc_infos_.Get(i);
pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
@@ -373,6 +374,19 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_ma
}
}
+ // Walk over the blocks and find which ones correspond to catch block entries.
+ for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+ HBasicBlock* block = graph_->GetBlocks().Get(i);
+ if (block->IsCatchBlock()) {
+ intptr_t native_pc = GetAddressOf(block);
+ ++dex2pc_entries;
+ dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
+ dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
+ dex2pc_offset = native_pc;
+ dex2pc_dalvik_offset = block->GetDexPc();
+ }
+ }
+
uint32_t total_entries = pc2dex_entries + dex2pc_entries;
uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
@@ -380,6 +394,7 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_ma
uint8_t* data_ptr = &(*data)[0];
uint8_t* write_pos = data_ptr;
+
write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
@@ -387,6 +402,9 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_ma
pc2dex_offset = 0u;
pc2dex_dalvik_offset = 0u;
+ dex2pc_offset = 0u;
+ dex2pc_dalvik_offset = 0u;
+
for (size_t i = 0; i < pc2dex_entries; i++) {
struct PcInfo pc_info = pc_infos_.Get(i);
DCHECK(pc2dex_offset <= pc_info.native_pc);
@@ -395,6 +413,19 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_ma
pc2dex_offset = pc_info.native_pc;
pc2dex_dalvik_offset = pc_info.dex_pc;
}
+
+ for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+ HBasicBlock* block = graph_->GetBlocks().Get(i);
+ if (block->IsCatchBlock()) {
+ intptr_t native_pc = GetAddressOf(block);
+ write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
+ write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
+ dex2pc_offset = native_pc;
+ dex2pc_dalvik_offset = block->GetDexPc();
+ }
+ }
+
+
DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
@@ -411,6 +442,14 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_ma
CHECK_EQ(pc_info.dex_pc, it.DexPc());
++it;
}
+ for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+ HBasicBlock* block = graph_->GetBlocks().Get(i);
+ if (block->IsCatchBlock()) {
+ CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
+ CHECK_EQ(block->GetDexPc(), it2.DexPc());
+ ++it2;
+ }
+ }
CHECK(it == table.PcToDexEnd());
CHECK(it2 == table.DexToPcEnd());
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 01c5cc9637..fc4ea4b5d3 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -92,6 +92,7 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
virtual HGraphVisitor* GetInstructionVisitor() = 0;
virtual Assembler* GetAssembler() = 0;
virtual size_t GetWordSize() const = 0;
+ virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
void ComputeFrameSize(size_t number_of_spill_slots,
size_t maximum_number_of_live_registers,
size_t number_of_out_slots);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6e6d64cbfc..fef7f0e80f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -92,6 +92,22 @@ class NullCheckSlowPathARM : public SlowPathCodeARM {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
+class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
+};
+
class StackOverflowCheckSlowPathARM : public SlowPathCodeARM {
public:
StackOverflowCheckSlowPathARM() {}
@@ -373,9 +389,6 @@ void CodeGeneratorARM::SetupBlockedRegisters() const {
blocked_core_registers_[LR] = true;
blocked_core_registers_[PC] = true;
- // Reserve R4 for suspend check.
- blocked_core_registers_[R4] = true;
-
// Reserve thread register.
blocked_core_registers_[TR] = true;
@@ -385,6 +398,7 @@ void CodeGeneratorARM::SetupBlockedRegisters() const {
// TODO: We currently don't use Quick's callee saved registers.
// We always save and restore R6 and R7 to make sure we can use three
// register pairs for long operations.
+ blocked_core_registers_[R4] = true;
blocked_core_registers_[R5] = true;
blocked_core_registers_[R8] = true;
blocked_core_registers_[R10] = true;
@@ -785,6 +799,7 @@ void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
DCHECK(instruction->IsSuspendCheck()
|| instruction->IsBoundsCheck()
|| instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
|| !IsLeafMethod());
}
@@ -1174,7 +1189,8 @@ void LocationsBuilderARM::VisitNeg(HNeg* neg) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
@@ -1214,8 +1230,14 @@ void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
break;
case Primitive::kPrimFloat:
+ DCHECK(in.IsFpuRegister());
+ __ vnegs(out.As<SRegister>(), in.As<SRegister>());
+ break;
+
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ DCHECK(in.IsFpuRegisterPair());
+ __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
break;
default:
@@ -1223,6 +1245,94 @@ void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
}
}
+void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ DCHECK(out.IsRegisterPair());
+ DCHECK(in.IsRegister());
+ __ Mov(out.AsRegisterPairLow<Register>(), in.As<Register>());
+ // Sign extension.
+ __ Asr(out.AsRegisterPairHigh<Register>(),
+ out.AsRegisterPairLow<Register>(),
+ 31);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderARM::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -1445,7 +1555,12 @@ void LocationsBuilderARM::VisitDiv(HDiv* div) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1470,7 +1585,11 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
Location second = locations->InAt(1);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ __ sdiv(out.As<Register>(), first.As<Register>(), second.As<Register>());
+ break;
+ }
+
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1493,6 +1612,27 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
}
}
+void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ DCHECK(value.IsRegister()) << value;
+ __ cmp(value.As<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+}
+
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -2069,12 +2209,15 @@ void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instructio
new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
codegen_->AddSlowPath(slow_path);
- __ subs(R4, R4, ShifterOperand(1));
+ __ LoadFromOffset(
+ kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
+ __ cmp(IP, ShifterOperand(0));
+ // TODO: Figure out the branch offsets and use cbz/cbnz.
if (successor == nullptr) {
- __ b(slow_path->GetEntryLabel(), EQ);
+ __ b(slow_path->GetEntryLabel(), NE);
__ Bind(slow_path->GetReturnLabel());
} else {
- __ b(codegen_->GetLabelOf(successor), NE);
+ __ b(codegen_->GetLabelOf(successor), EQ);
__ b(slow_path->GetEntryLabel());
}
}
@@ -2373,5 +2516,31 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
__ Bind(slow_path->GetExitLabel());
}
+void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
+ Register out = load->GetLocations()->Out().As<Register>();
+ int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+ __ LoadFromOffset(kLoadWord, out, TR, offset);
+ __ LoadImmediate(IP, 0);
+ __ StoreToOffset(kStoreWord, IP, TR, offset);
+}
+
+void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5076a4bc38..5d519937f4 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -77,10 +77,10 @@ class ParallelMoveResolverARM : public ParallelMoveResolver {
ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
: ParallelMoveResolver(allocator), codegen_(codegen) {}
- virtual void EmitMove(size_t index) OVERRIDE;
- virtual void EmitSwap(size_t index) OVERRIDE;
- virtual void SpillScratch(int reg) OVERRIDE;
- virtual void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
ArmAssembler* GetAssembler() const;
@@ -99,7 +99,7 @@ class LocationsBuilderARM : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -119,7 +119,7 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -145,39 +145,43 @@ class CodeGeneratorARM : public CodeGenerator {
explicit CodeGeneratorARM(HGraph* graph);
virtual ~CodeGeneratorARM() {}
- virtual void GenerateFrameEntry() OVERRIDE;
- virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(HBasicBlock* block) OVERRIDE;
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
- virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const OVERRIDE {
return kArmWordSize;
}
- virtual size_t FrameEntrySpillSize() const OVERRIDE;
+ size_t FrameEntrySpillSize() const OVERRIDE;
- virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
- virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE {
return &instruction_visitor_;
}
- virtual ArmAssembler* GetAssembler() OVERRIDE {
+ ArmAssembler* GetAssembler() OVERRIDE {
return &assembler_;
}
- virtual void SetupBlockedRegisters() const OVERRIDE;
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return GetLabelOf(block)->Position();
+ }
+
+ void SetupBlockedRegisters() const OVERRIDE;
- virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
- virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
@@ -186,7 +190,7 @@ class CodeGeneratorARM : public CodeGenerator {
return &move_resolver_;
}
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const OVERRIDE {
return InstructionSet::kThumb2;
}
@@ -208,7 +212,7 @@ class CodeGeneratorARM : public CodeGenerator {
return block_labels_.GetRawStorage() + block->GetBlockId();
}
- virtual void Initialize() OVERRIDE {
+ void Initialize() OVERRIDE {
block_labels_.SetSize(GetGraph()->GetBlocks().Size());
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 90d7c35975..1be5717737 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -537,14 +537,18 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
M(ClinitCheck) \
M(DoubleConstant) \
M(Div) \
+ M(DivZeroCheck) \
M(FloatConstant) \
M(LoadClass) \
+ M(LoadException) \
M(LoadString) \
M(Neg) \
M(NewArray) \
M(ParallelMove) \
M(StaticFieldGet) \
M(StaticFieldSet) \
+ M(Throw) \
+ M(TypeConversion) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
@@ -1065,11 +1069,8 @@ void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
break;
case Primitive::kPrimInt:
- __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
- break;
-
case Primitive::kPrimLong:
- LOG(FATAL) << "Not yet implemented type for not operation " << instruction->GetResultType();
+ __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
break;
default:
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 5530f46065..4a41000e8d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -95,7 +95,7 @@ class InstructionCodeGeneratorARM64 : public HGraphVisitor {
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -118,7 +118,7 @@ class LocationsBuilderARM64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -135,10 +135,10 @@ class LocationsBuilderARM64 : public HGraphVisitor {
class CodeGeneratorARM64 : public CodeGenerator {
public:
explicit CodeGeneratorARM64(HGraph* graph);
- virtual ~CodeGeneratorARM64() { }
+ virtual ~CodeGeneratorARM64() {}
- virtual void GenerateFrameEntry() OVERRIDE;
- virtual void GenerateFrameExit() OVERRIDE;
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
static const vixl::CPURegList& GetFramePreservedRegisters() {
static const vixl::CPURegList frame_preserved_regs =
@@ -149,44 +149,49 @@ class CodeGeneratorARM64 : public CodeGenerator {
return GetFramePreservedRegisters().TotalSizeInBytes();
}
- virtual void Bind(HBasicBlock* block) OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
vixl::Label* GetLabelOf(HBasicBlock* block) const {
return block_labels_ + block->GetBlockId();
}
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
- virtual size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const OVERRIDE {
return kArm64WordSize;
}
- virtual size_t FrameEntrySpillSize() const OVERRIDE;
+ uintptr_t GetAddressOf(HBasicBlock* block ATTRIBUTE_UNUSED) const OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: GetAddressOf";
+ return 0u;
+ }
+
+ size_t FrameEntrySpillSize() const OVERRIDE;
- virtual HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
- virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
- virtual Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
// Emit a write barrier.
void MarkGCCard(vixl::Register object, vixl::Register value);
// Register allocation.
- virtual void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
// AllocateFreeRegister() is only used when allocating registers locally
// during CompileBaseline().
- virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
- virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
UNUSED(stack_index);
UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
return 0;
}
- virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
UNUSED(stack_index);
UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
@@ -205,16 +210,16 @@ class CodeGeneratorARM64 : public CodeGenerator {
kNumberOfAllocatableCoreRegisters + kNumberOfAllocatableFloatingPointRegisters;
static constexpr int kNumberOfAllocatableRegisterPairs = 0;
- virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const OVERRIDE {
return InstructionSet::kArm64;
}
void MoveHelper(Location destination, Location source, Primitive::Type type);
- virtual void Initialize() OVERRIDE {
+ void Initialize() OVERRIDE {
HGraph* graph = GetGraph();
int length = graph->GetBlocks().Size();
block_labels_ = graph->GetArena()->AllocArray<vixl::Label>(length);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1e37909be9..127ddbeab1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -85,6 +85,36 @@ class NullCheckSlowPathX86 : public SlowPathCodeX86 {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
};
+class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero)));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
+};
+
+class DivMinusOneSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit DivMinusOneSlowPathX86(Register reg) : reg_(reg) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ negl(reg_);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ Register reg_;
+ DISALLOW_COPY_AND_ASSIGN(DivMinusOneSlowPathX86);
+};
+
class StackOverflowCheckSlowPathX86 : public SlowPathCodeX86 {
public:
StackOverflowCheckSlowPathX86() {}
@@ -1086,7 +1116,10 @@ void LocationsBuilderX86::VisitNeg(HNeg* neg) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ // Output overlaps as we need a fresh (zero-initialized)
+ // register to perform subtraction from zero.
+ locations->SetOut(Location::RequiresFpuRegister());
break;
default:
@@ -1101,11 +1134,13 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) {
switch (neg->GetResultType()) {
case Primitive::kPrimInt:
DCHECK(in.IsRegister());
+ DCHECK(in.Equals(out));
__ negl(out.As<Register>());
break;
case Primitive::kPrimLong:
DCHECK(in.IsRegisterPair());
+ DCHECK(in.Equals(out));
__ negl(out.AsRegisterPairLow<Register>());
// Negation is similar to subtraction from zero. The least
// significant byte triggers a borrow when it is different from
@@ -1117,8 +1152,19 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) {
break;
case Primitive::kPrimFloat:
+ DCHECK(!in.Equals(out));
+ // out = 0
+ __ xorps(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subss(out.As<XmmRegister>(), in.As<XmmRegister>());
+ break;
+
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ DCHECK(!in.Equals(out));
+ // out = 0
+ __ xorpd(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subsd(out.As<XmmRegister>(), in.As<XmmRegister>());
break;
default:
@@ -1126,6 +1172,91 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) {
}
}
+void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
+ DCHECK_EQ(in.As<Register>(), EAX);
+ __ cdq();
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -1386,7 +1517,14 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Intel uses edx:eax as the dividend.
+ locations->AddTemp(Location::RegisterLocation(EDX));
+ break;
+ }
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1411,7 +1549,32 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
DCHECK(first.Equals(locations->Out()));
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ Register first_reg = first.As<Register>();
+ Register second_reg = second.As<Register>();
+ DCHECK_EQ(EAX, first_reg);
+ DCHECK_EQ(EDX, locations->GetTemp(0).As<Register>());
+
+ SlowPathCodeX86* slow_path =
+ new (GetGraph()->GetArena()) DivMinusOneSlowPathX86(first_reg);
+ codegen_->AddSlowPath(slow_path);
+
+ // 0x80000000/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
+ // it's safe to just use negl instead of more complex comparisons.
+
+ __ cmpl(second_reg, Immediate(-1));
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+
+ __ Bind(slow_path->GetExitLabel());
+ break;
+ }
+
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1432,6 +1595,36 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
}
}
+void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::Any());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ if (value.IsRegister()) {
+ __ testl(value.As<Register>(), value.As<Register>());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ return;
+ }
+ __ j(kEqual, slow_path->GetEntryLabel());
+}
+
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -2419,5 +2612,29 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
__ Bind(slow_path->GetExitLabel());
}
+void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) {
+ Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+ __ fs()->movl(load->GetLocations()->Out().As<Register>(), address);
+ __ fs()->movl(address, Immediate(0));
+}
+
+void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeliverException)));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 176a269ac4..85fe21ca76 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -71,10 +71,10 @@ class ParallelMoveResolverX86 : public ParallelMoveResolver {
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
: ParallelMoveResolver(allocator), codegen_(codegen) {}
- virtual void EmitMove(size_t index) OVERRIDE;
- virtual void EmitSwap(size_t index) OVERRIDE;
- virtual void SpillScratch(int reg) OVERRIDE;
- virtual void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
X86Assembler* GetAssembler() const;
@@ -94,7 +94,7 @@ class LocationsBuilderX86 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -114,7 +114,7 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -140,39 +140,43 @@ class CodeGeneratorX86 : public CodeGenerator {
explicit CodeGeneratorX86(HGraph* graph);
virtual ~CodeGeneratorX86() {}
- virtual void GenerateFrameEntry() OVERRIDE;
- virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(HBasicBlock* block) OVERRIDE;
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
- virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const OVERRIDE {
return kX86WordSize;
}
- virtual size_t FrameEntrySpillSize() const OVERRIDE;
+ size_t FrameEntrySpillSize() const OVERRIDE;
- virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
- virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE {
return &instruction_visitor_;
}
- virtual X86Assembler* GetAssembler() OVERRIDE {
+ X86Assembler* GetAssembler() OVERRIDE {
return &assembler_;
}
- virtual void SetupBlockedRegisters() const OVERRIDE;
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return GetLabelOf(block)->Position();
+ }
+
+ void SetupBlockedRegisters() const OVERRIDE;
- virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
- virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
@@ -181,7 +185,7 @@ class CodeGeneratorX86 : public CodeGenerator {
return &move_resolver_;
}
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const OVERRIDE {
return InstructionSet::kX86;
}
@@ -199,7 +203,7 @@ class CodeGeneratorX86 : public CodeGenerator {
return block_labels_.GetRawStorage() + block->GetBlockId();
}
- virtual void Initialize() OVERRIDE {
+ void Initialize() OVERRIDE {
block_labels_.SetSize(GetGraph()->GetBlocks().Size());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 40eec9b15d..8c0842cb89 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -90,6 +90,37 @@ class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
};
+class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
+};
+
+class DivMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit DivMinusOneSlowPathX86_64(Register reg) : reg_(reg) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ negl(CpuRegister(reg_));
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ Register reg_;
+ DISALLOW_COPY_AND_ASSIGN(DivMinusOneSlowPathX86_64);
+};
+
class StackOverflowCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
StackOverflowCheckSlowPathX86_64() {}
@@ -1071,7 +1102,10 @@ void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ // Output overlaps as we need a fresh (zero-initialized)
+ // register to perform subtraction from zero.
+ locations->SetOut(Location::RequiresFpuRegister());
break;
default:
@@ -1086,17 +1120,49 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) {
switch (neg->GetResultType()) {
case Primitive::kPrimInt:
DCHECK(in.IsRegister());
+ DCHECK(in.Equals(out));
__ negl(out.As<CpuRegister>());
break;
case Primitive::kPrimLong:
DCHECK(in.IsRegister());
+ DCHECK(in.Equals(out));
__ negq(out.As<CpuRegister>());
break;
case Primitive::kPrimFloat:
+ DCHECK(in.IsFpuRegister());
+ DCHECK(out.IsFpuRegister());
+ DCHECK(!in.Equals(out));
+ // TODO: Instead of computing negation as a subtraction from
+ // zero, implement it with an exclusive or with value 0x80000000
+ // (mask for bit 31, representing the sign of a single-precision
+ // floating-point number), fetched from a constant pool:
+ //
+ // xorps out, [RIP:...] // value at RIP is 0x80 00 00 00
+
+ // out = 0
+ __ xorps(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subss(out.As<XmmRegister>(), in.As<XmmRegister>());
+ break;
+
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ DCHECK(in.IsFpuRegister());
+ DCHECK(out.IsFpuRegister());
+ DCHECK(!in.Equals(out));
+ // TODO: Instead of computing negation as a subtraction from
+ // zero, implement it with an exclusive or with value
+ // 0x8000000000000000 (mask for bit 63, representing the sign of
+ // a double-precision floating-point number), fetched from a
+ // constant pool:
+ //
+ // xorpd out, [RIP:...] // value at RIP is 0x80 00 00 00 00 00 00 00
+
+ // out = 0
+ __ xorpd(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subsd(out.As<XmmRegister>(), in.As<XmmRegister>());
break;
default:
@@ -1104,6 +1170,92 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) {
}
}
+void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ // TODO: We would benefit from a (to-be-implemented)
+ // Location::RegisterOrStackSlot requirement for this input.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ DCHECK(out.IsRegister());
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ DCHECK(in.IsRegister());
+ __ movsxd(out.As<CpuRegister>(), in.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -1310,7 +1462,14 @@ void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RegisterLocation(RAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Intel uses edx:eax as the dividend.
+ locations->AddTemp(Location::RegisterLocation(RDX));
+ break;
+ }
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1335,7 +1494,32 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) {
DCHECK(first.Equals(locations->Out()));
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ CpuRegister first_reg = first.As<CpuRegister>();
+ CpuRegister second_reg = second.As<CpuRegister>();
+ DCHECK_EQ(RAX, first_reg.AsRegister());
+ DCHECK_EQ(RDX, locations->GetTemp(0).As<CpuRegister>().AsRegister());
+
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister());
+ codegen_->AddSlowPath(slow_path);
+
+ // 0x80000000/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
+ // it's safe to just use negl instead of more complex comparisons.
+
+ __ cmpl(second_reg, Immediate(-1));
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+
+ __ Bind(slow_path->GetExitLabel());
+ break;
+ }
+
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1356,6 +1540,37 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) {
}
}
+void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::Any());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86_64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ if (value.IsRegister()) {
+ __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ return;
+ }
+ __ j(kEqual, slow_path->GetEntryLabel());
+}
+
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -2369,5 +2584,31 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
__ Bind(slow_path->GetExitLabel());
}
+void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) {
+ Address address = Address::Absolute(
+ Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
+ __ gs()->movl(load->GetLocations()->Out().As<CpuRegister>(), address);
+ __ gs()->movl(address, Immediate(0));
+}
+
+void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeliverException), true));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 0de304538f..9565b6f876 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -72,10 +72,10 @@ class ParallelMoveResolverX86_64 : public ParallelMoveResolver {
ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
: ParallelMoveResolver(allocator), codegen_(codegen) {}
- virtual void EmitMove(size_t index) OVERRIDE;
- virtual void EmitSwap(size_t index) OVERRIDE;
- virtual void SpillScratch(int reg) OVERRIDE;
- virtual void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
X86_64Assembler* GetAssembler() const;
@@ -98,7 +98,7 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -118,7 +118,7 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -144,30 +144,30 @@ class CodeGeneratorX86_64 : public CodeGenerator {
explicit CodeGeneratorX86_64(HGraph* graph);
virtual ~CodeGeneratorX86_64() {}
- virtual void GenerateFrameEntry() OVERRIDE;
- virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(HBasicBlock* block) OVERRIDE;
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
- virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const OVERRIDE {
return kX86_64WordSize;
}
- virtual size_t FrameEntrySpillSize() const OVERRIDE;
+ size_t FrameEntrySpillSize() const OVERRIDE;
- virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
- virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE {
return &instruction_visitor_;
}
- virtual X86_64Assembler* GetAssembler() OVERRIDE {
+ X86_64Assembler* GetAssembler() OVERRIDE {
return &assembler_;
}
@@ -175,14 +175,18 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return &move_resolver_;
}
- virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return GetLabelOf(block)->Position();
+ }
+
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual void SetupBlockedRegisters() const OVERRIDE;
- virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
- virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const OVERRIDE {
return InstructionSet::kX86_64;
}
@@ -198,7 +202,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return block_labels_.GetRawStorage() + block->GetBlockId();
}
- virtual void Initialize() OVERRIDE {
+ void Initialize() OVERRIDE {
block_labels_.SetSize(GetGraph()->GetBlocks().Size());
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 68fcb25036..ecee44392e 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -31,6 +31,7 @@
#include "prepare_for_register_allocation.h"
#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
+#include "utils.h"
#include "gtest/gtest.h"
@@ -56,24 +57,26 @@ class InternalCodeAllocator : public CodeAllocator {
DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
};
+template <typename Expected>
static void Run(const InternalCodeAllocator& allocator,
const CodeGenerator& codegen,
bool has_result,
- int32_t expected) {
- typedef int32_t (*fptr)();
+ Expected expected) {
+ typedef Expected (*fptr)();
CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
if (codegen.GetInstructionSet() == kThumb2) {
// For thumb we need the bottom bit set.
f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
}
- int32_t result = f();
+ Expected result = f();
if (has_result) {
ASSERT_EQ(result, expected);
}
}
-static void RunCodeBaseline(HGraph* graph, bool has_result, int32_t expected) {
+template <typename Expected>
+static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
InternalCodeAllocator allocator;
x86::CodeGeneratorX86 codegenX86(graph);
@@ -103,11 +106,12 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, int32_t expected) {
}
}
+template <typename Expected>
static void RunCodeOptimized(CodeGenerator* codegen,
HGraph* graph,
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
- int32_t expected) {
+ Expected expected) {
SsaLivenessAnalysis liveness(*graph, codegen);
liveness.Analyze();
@@ -120,10 +124,11 @@ static void RunCodeOptimized(CodeGenerator* codegen,
Run(allocator, *codegen, has_result, expected);
}
+template <typename Expected>
static void RunCodeOptimized(HGraph* graph,
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
- int32_t expected) {
+ Expected expected) {
if (kRuntimeISA == kX86) {
x86::CodeGeneratorX86 codegenX86(graph);
RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
@@ -148,6 +153,18 @@ static void TestCode(const uint16_t* data, bool has_result = false, int32_t expe
RunCodeBaseline(graph, has_result, expected);
}
+static void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraphBuilder builder(&arena, Primitive::kPrimLong);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = builder.BuildGraph(*item);
+ ASSERT_NE(graph, nullptr);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
+ RunCodeBaseline(graph, has_result, expected);
+}
+
TEST(CodegenTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
TestCode(data);
@@ -272,8 +289,8 @@ TEST(CodegenTest, ReturnIf2) {
#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
TEST(CodegenTest, TEST_NAME) { \
const int32_t input = INPUT; \
- const uint16_t input_lo = input & 0x0000FFFF; \
- const uint16_t input_hi = input >> 16; \
+ const uint16_t input_lo = Low16Bits(input); \
+ const uint16_t input_hi = High16Bits(input); \
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
Instruction::CONST | 0 << 8, input_lo, input_hi, \
Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
@@ -286,13 +303,65 @@ NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
NOT_INT_TEST(ReturnNotIntMinus1, -1, 0)
NOT_INT_TEST(ReturnNotInt0, 0, -1)
NOT_INT_TEST(ReturnNotInt1, 1, -2)
-NOT_INT_TEST(ReturnNotIntINT_MIN, -2147483648, 2147483647) // (2^31) - 1
-NOT_INT_TEST(ReturnNotIntINT_MINPlus1, -2147483647, 2147483646) // (2^31) - 2
-NOT_INT_TEST(ReturnNotIntINT_MAXMinus1, 2147483646, -2147483647) // -(2^31) - 1
-NOT_INT_TEST(ReturnNotIntINT_MAX, 2147483647, -2147483648) // -(2^31)
+NOT_INT_TEST(ReturnNotIntINT32_MIN, -2147483648, 2147483647) // (2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT32_MINPlus1, -2147483647, 2147483646) // (2^31) - 2
+NOT_INT_TEST(ReturnNotIntINT32_MAXMinus1, 2147483646, -2147483647) // -(2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT32_MAX, 2147483647, -2147483648) // -(2^31)
#undef NOT_INT_TEST
+// Exercise bit-wise (one's complement) not-long instruction.
+#define NOT_LONG_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
+TEST(CodegenTest, TEST_NAME) { \
+ const int64_t input = INPUT; \
+ const uint16_t word0 = Low16Bits(Low32Bits(input)); /* LSW. */ \
+ const uint16_t word1 = High16Bits(Low32Bits(input)); \
+ const uint16_t word2 = Low16Bits(High32Bits(input)); \
+ const uint16_t word3 = High16Bits(High32Bits(input)); /* MSW. */ \
+ const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM( \
+ Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3, \
+ Instruction::NOT_LONG | 2 << 8 | 0 << 12, \
+ Instruction::RETURN_WIDE | 2 << 8); \
+ \
+ TestCodeLong(data, true, EXPECTED_OUTPUT); \
+}
+
+NOT_LONG_TEST(ReturnNotLongMinus2, INT64_C(-2), INT64_C(1))
+NOT_LONG_TEST(ReturnNotLongMinus1, INT64_C(-1), INT64_C(0))
+NOT_LONG_TEST(ReturnNotLong0, INT64_C(0), INT64_C(-1))
+NOT_LONG_TEST(ReturnNotLong1, INT64_C(1), INT64_C(-2))
+
+NOT_LONG_TEST(ReturnNotLongINT32_MIN,
+ INT64_C(-2147483648),
+ INT64_C(2147483647)) // (2^31) - 1
+NOT_LONG_TEST(ReturnNotLongINT32_MINPlus1,
+ INT64_C(-2147483647),
+ INT64_C(2147483646)) // (2^31) - 2
+NOT_LONG_TEST(ReturnNotLongINT32_MAXMinus1,
+ INT64_C(2147483646),
+ INT64_C(-2147483647)) // -(2^31) - 1
+NOT_LONG_TEST(ReturnNotLongINT32_MAX,
+ INT64_C(2147483647),
+ INT64_C(-2147483648)) // -(2^31)
+
+// Note that the C++ compiler won't accept
+// INT64_C(-9223372036854775808) (that is, INT64_MIN) as a valid
+// int64_t literal, so we use INT64_C(-9223372036854775807)-1 instead.
+NOT_LONG_TEST(ReturnNotINT64_MIN,
+ INT64_C(-9223372036854775807)-1,
+ INT64_C(9223372036854775807)); // (2^63) - 1
+NOT_LONG_TEST(ReturnNotINT64_MINPlus1,
+ INT64_C(-9223372036854775807),
+ INT64_C(9223372036854775806)); // (2^63) - 2
+NOT_LONG_TEST(ReturnNotLongINT64_MAXMinus1,
+ INT64_C(9223372036854775806),
+ INT64_C(-9223372036854775807)); // -(2^63) - 1
+NOT_LONG_TEST(ReturnNotLongINT64_MAX,
+ INT64_C(9223372036854775807),
+ INT64_C(-9223372036854775807)-1); // -(2^63)
+
+#undef NOT_LONG_TEST
+
TEST(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
@@ -543,4 +612,31 @@ TEST(CodegenTest, MaterializedCondition2) {
}
}
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnDivIntLit8) {
+#else
+TEST(CodegenTest, ReturnDivIntLit8) {
+#endif
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 4 << 12 | 0 << 8,
+ Instruction::DIV_INT_LIT8, 3 << 8 | 0,
+ Instruction::RETURN);
+
+ TestCode(data, true, 1);
+}
+
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnDivInt2Addr) {
+#else
+TEST(CodegenTest, ReturnDivInt2Addr) {
+#endif
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::CONST_4 | 2 << 12 | 1 << 8,
+ Instruction::DIV_INT_2ADDR | 1 << 12,
+ Instruction::RETURN);
+
+ TestCode(data, true, 2);
+}
+
} // namespace art
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 09bf2c8d7d..856c5165a3 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -332,9 +332,6 @@ TEST(ConstantFolding, IntConstantFoldingOnSubtraction) {
check_after_cf);
}
-#define SIX_REGISTERS_CODE_ITEM(...) \
- { 6, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
-
/**
* Tiny three-register-pair program exercising long constant folding
* on addition.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 79638b3545..37e5e6b9aa 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -292,7 +292,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
block_id_(-1),
dex_pc_(dex_pc),
lifetime_start_(kNoLifetime),
- lifetime_end_(kNoLifetime) {}
+ lifetime_end_(kNoLifetime),
+ is_catch_block_(false) {}
const GrowableArray<HBasicBlock*>& GetPredecessors() const {
return predecessors_;
@@ -450,6 +451,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
uint32_t GetDexPc() const { return dex_pc_; }
+ bool IsCatchBlock() const { return is_catch_block_; }
+ void SetIsCatchBlock() { is_catch_block_ = true; }
+
private:
HGraph* const graph_;
GrowableArray<HBasicBlock*> predecessors_;
@@ -464,6 +468,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
const uint32_t dex_pc_;
size_t lifetime_start_;
size_t lifetime_end_;
+ bool is_catch_block_;
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
@@ -478,6 +483,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
M(Div, BinaryOperation) \
+ M(DivZeroCheck, Instruction) \
M(DoubleConstant, Constant) \
M(Equal, Condition) \
M(Exit, Instruction) \
@@ -494,6 +500,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
M(LoadClass, Instruction) \
+ M(LoadException, Instruction) \
M(LoadLocal, Instruction) \
M(LoadString, Instruction) \
M(Local, Instruction) \
@@ -516,6 +523,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(Sub, BinaryOperation) \
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
+ M(Throw, Instruction) \
+ M(TypeConversion, Instruction) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1050,7 +1059,7 @@ class HReturn : public HTemplateInstruction<1> {
};
// The exit instruction is the only instruction of the exit block.
-// Instructions aborting the method (HTrow and HReturn) must branch to the
+// Instructions aborting the method (HThrow and HReturn) must branch to the
// exit block.
class HExit : public HTemplateInstruction<0> {
public:
@@ -1069,7 +1078,7 @@ class HGoto : public HTemplateInstruction<0> {
public:
HGoto() : HTemplateInstruction(SideEffects::None()) {}
- virtual bool IsControlFlow() const { return true; }
+ bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* GetSuccessor() const {
return GetBlock()->GetSuccessors().Get(0);
@@ -1090,7 +1099,7 @@ class HIf : public HTemplateInstruction<1> {
SetRawInputAt(0, input);
}
- virtual bool IsControlFlow() const { return true; }
+ bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* IfTrueSuccessor() const {
return GetBlock()->GetSuccessors().Get(0);
@@ -1713,7 +1722,12 @@ class HDiv : public HBinaryOperation {
HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right)
: HBinaryOperation(result_type, left, right) {}
- virtual int32_t Evaluate(int32_t x, int32_t y) const { return x / y; }
+ virtual int32_t Evaluate(int32_t x, int32_t y) const {
+ // Our graph structure ensures we never have 0 for `y` during constant folding.
+ DCHECK_NE(y, 0);
+ // Special case -1 to avoid getting a SIGFPE on x86.
+ return (y == -1) ? -x : x / y;
+ }
virtual int64_t Evaluate(int64_t x, int64_t y) const { return x / y; }
DECLARE_INSTRUCTION(Div);
@@ -1722,6 +1736,33 @@ class HDiv : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HDiv);
};
+class HDivZeroCheck : public HExpression<1> {
+ public:
+ HDivZeroCheck(HInstruction* value, uint32_t dex_pc)
+ : HExpression(value->GetType(), SideEffects::None()), dex_pc_(dex_pc) {
+ SetRawInputAt(0, value);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(DivZeroCheck);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
+};
+
// The value of a parameter in this method. Its location depends on
// the calling convention.
class HParameterValue : public HExpression<0> {
@@ -1761,6 +1802,28 @@ class HNot : public HUnaryOperation {
DISALLOW_COPY_AND_ASSIGN(HNot);
};
+class HTypeConversion : public HExpression<1> {
+ public:
+ // Instantiate a type conversion of `input` to `result_type`.
+ HTypeConversion(Primitive::Type result_type, HInstruction* input)
+ : HExpression(result_type, SideEffects::None()) {
+ SetRawInputAt(0, input);
+ DCHECK_NE(input->GetType(), result_type);
+ }
+
+ HInstruction* GetInput() const { return InputAt(0); }
+ Primitive::Type GetInputType() const { return GetInput()->GetType(); }
+ Primitive::Type GetResultType() const { return GetType(); }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+
+ DECLARE_INSTRUCTION(TypeConversion);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HTypeConversion);
+};
+
class HPhi : public HInstruction {
public:
HPhi(ArenaAllocator* arena, uint32_t reg_number, size_t number_of_inputs, Primitive::Type type)
@@ -2228,6 +2291,38 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
+// Implement the move-exception DEX instruction.
+class HLoadException : public HExpression<0> {
+ public:
+ HLoadException() : HExpression(Primitive::kPrimNot, SideEffects::None()) {}
+
+ DECLARE_INSTRUCTION(LoadException);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HLoadException);
+};
+
+class HThrow : public HTemplateInstruction<1> {
+ public:
+ HThrow(HInstruction* exception, uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {
+ SetRawInputAt(0, exception);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(Throw);
+
+ private:
+ uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HThrow);
+};
+
class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 08b74c7988..6e3653a359 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -206,6 +206,11 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
|| instruction_set == kX86_64;
}
+static bool CanOptimize(const DexFile::CodeItem& code_item) {
+ // TODO: We currently cannot optimize methods with try/catch.
+ return code_item.tries_size_ == 0;
+}
+
CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -264,7 +269,9 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
CodeVectorAllocator allocator;
- if (run_optimizations_ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ if (run_optimizations_
+ && CanOptimize(*code_item)
+ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
optimized_compiled_methods_++;
graph->BuildDominatorTree();
graph->TransformToSSA();
@@ -315,17 +322,19 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
unoptimized_compiled_methods_++;
codegen->CompileBaseline(&allocator);
- // Run these phases to get some test coverage.
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- visualizer.DumpGraph("ssa");
- graph->FindNaturalLoops();
- SsaRedundantPhiElimination(graph).Run();
- SsaDeadPhiElimination(graph).Run();
- GlobalValueNumberer(graph->GetArena(), graph).Run();
- SsaLivenessAnalysis liveness(*graph, codegen);
- liveness.Analyze();
- visualizer.DumpGraph(kLivenessPassName);
+ if (CanOptimize(*code_item)) {
+ // Run these phases to get some test coverage.
+ graph->BuildDominatorTree();
+ graph->TransformToSSA();
+ visualizer.DumpGraph("ssa");
+ graph->FindNaturalLoops();
+ SsaRedundantPhiElimination(graph).Run();
+ SsaDeadPhiElimination(graph).Run();
+ GlobalValueNumberer(graph->GetArena(), graph).Run();
+ SsaLivenessAnalysis liveness(*graph, codegen);
+ liveness.Analyze();
+ visualizer.DumpGraph(kLivenessPassName);
+ }
std::vector<uint8_t> mapping_table;
SrcMap src_mapping_table;
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index aae7f9b95e..c4106b72b5 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -30,17 +30,17 @@ namespace art {
#define NUM_INSTRUCTIONS(...) \
(sizeof((uint16_t[]) {__VA_ARGS__}) /sizeof(uint16_t))
-#define ZERO_REGISTER_CODE_ITEM(...) \
- { 0, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+#define N_REGISTERS_CODE_ITEM(NUM_REGS, ...) \
+ { NUM_REGS, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
-#define ONE_REGISTER_CODE_ITEM(...) \
- { 1, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+#define ZERO_REGISTER_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(0, __VA_ARGS__)
+#define ONE_REGISTER_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(1, __VA_ARGS__)
+#define TWO_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(2, __VA_ARGS__)
+#define THREE_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(3, __VA_ARGS__)
+#define FOUR_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(4, __VA_ARGS__)
+#define FIVE_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(5, __VA_ARGS__)
+#define SIX_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(6, __VA_ARGS__)
-#define TWO_REGISTERS_CODE_ITEM(...) \
- { 2, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
-
-#define THREE_REGISTERS_CODE_ITEM(...) \
- { 3, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
LiveInterval* BuildInterval(const size_t ranges[][2],
size_t number_of_ranges,
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index c4db840f33..7186dbe85e 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -34,6 +34,10 @@ void PrepareForRegisterAllocation::VisitNullCheck(HNullCheck* check) {
check->ReplaceWith(check->InputAt(0));
}
+void PrepareForRegisterAllocation::VisitDivZeroCheck(HDivZeroCheck* check) {
+ check->ReplaceWith(check->InputAt(0));
+}
+
void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
check->ReplaceWith(check->InputAt(0));
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 3e63ecb4f4..0fdb65ffe0 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -34,6 +34,7 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
private:
virtual void VisitNullCheck(HNullCheck* check) OVERRIDE;
+ virtual void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
virtual void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
virtual void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
virtual void VisitCondition(HCondition* condition) OVERRIDE;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 2a9c88506d..0745f9c5da 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -257,7 +257,13 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
//
// The backwards walking ensures the ranges are ordered on increasing start positions.
Location output = locations->Out();
- if (output.IsRegister() || output.IsFpuRegister()) {
+ if (output.IsUnallocated() && output.GetPolicy() == Location::kSameAsFirstInput) {
+ Location first = locations->InAt(0);
+ if (first.IsRegister() || first.IsFpuRegister()) {
+ current->SetFrom(position + 1);
+ current->SetRegister(first.reg());
+ }
+ } else if (output.IsRegister() || output.IsFpuRegister()) {
// Shift the interval's start by one to account for the blocked register.
current->SetFrom(position + 1);
current->SetRegister(output.reg());
@@ -1172,7 +1178,11 @@ void RegisterAllocator::Resolve() {
if (location.IsUnallocated()) {
if (location.GetPolicy() == Location::kSameAsFirstInput) {
- locations->SetInAt(0, source);
+ if (locations->InAt(0).IsUnallocated()) {
+ locations->SetInAt(0, source);
+ } else {
+ DCHECK(locations->InAt(0).Equals(source));
+ }
}
locations->SetOut(source);
} else {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 6845deacb9..9b1a121fbe 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -696,4 +696,45 @@ TEST(RegisterAllocatorTest, SameAsFirstInputHint) {
}
}
+static HGraph* BuildDiv(ArenaAllocator* allocator,
+ HInstruction** div) {
+ HGraph* graph = new (allocator) HGraph(allocator);
+ HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->SetEntryBlock(entry);
+ HInstruction* first = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* second = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ entry->AddInstruction(first);
+ entry->AddInstruction(second);
+
+ HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ graph->AddBlock(block);
+ entry->AddSuccessor(block);
+
+ *div = new (allocator) HDiv(Primitive::kPrimInt, first, second);
+ block->AddInstruction(*div);
+
+ block->AddInstruction(new (allocator) HExit());
+ return graph;
+}
+
+TEST(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HInstruction *div;
+
+ {
+ HGraph* graph = BuildDiv(&allocator, &div);
+ x86::CodeGeneratorX86 codegen(graph);
+ SsaLivenessAnalysis liveness(*graph, &codegen);
+ liveness.Analyze();
+
+ RegisterAllocator register_allocator(&allocator, &codegen, liveness);
+ register_allocator.AllocateRegisters();
+
+ // div on x86 requires its first input in eax and the output be the same as the first input.
+ ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
+ }
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 2e951ddaea..5b706584bd 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -351,6 +351,22 @@ void X86_64Assembler::movss(XmmRegister dst, XmmRegister src) {
}
+void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0x63);
+ EmitRegisterOperand(dst.LowBits(), src.LowBits());
+}
+
+
+void X86_64Assembler::movsxd(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0x63);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::movd(XmmRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 5b16f0891c..42d774a558 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -300,6 +300,9 @@ class X86_64Assembler FINAL : public Assembler {
void movss(const Address& dst, XmmRegister src);
void movss(XmmRegister dst, XmmRegister src);
+ void movsxd(CpuRegister dst, CpuRegister src);
+ void movsxd(CpuRegister dst, const Address& src);
+
void movd(XmmRegister dst, CpuRegister src);
void movd(CpuRegister dst, XmmRegister src);
@@ -684,7 +687,11 @@ inline void X86_64Assembler::EmitInt32(int32_t value) {
}
inline void X86_64Assembler::EmitInt64(int64_t value) {
- buffer_.Emit<int64_t>(value);
+ // Write this 64-bit value as two 32-bit words for alignment reasons
+ // (this is essentially when running on ARM, which does not allow
+ // 64-bit unaligned accesses). We assume little-endianness here.
+ EmitInt32(Low32Bits(value));
+ EmitInt32(High32Bits(value));
}
inline void X86_64Assembler::EmitRegisterOperand(uint8_t rm, uint8_t reg) {
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 8cd2a27b9b..1fa566bb5c 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -24,6 +24,6 @@
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 112
// Flag for enabling R4 optimization in arm runtime
-#define ARM_R4_SUSPEND_FLAG
+// #define ARM_R4_SUSPEND_FLAG
#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/arch/arm/memcmp16_arm.S b/runtime/arch/arm/memcmp16_arm.S
index 37621946ef..b623a2a2cb 100644
--- a/runtime/arch/arm/memcmp16_arm.S
+++ b/runtime/arch/arm/memcmp16_arm.S
@@ -65,7 +65,7 @@ ARM_ENTRY __memcmp16
/* save registers */
-0: stmfd sp!, {r4, lr}
+0: push {r4, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset lr, 4
@@ -79,7 +79,7 @@ ARM_ENTRY __memcmp16
sub r2, r2, #1
subs r0, r0, ip
/* restore registers and return */
- ldmnefd sp!, {r4, lr}
+ popne {r4, lr}
bxne lr
@@ -110,25 +110,25 @@ ARM_ENTRY __memcmp16
eors r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
bne 2f
subs r2, r2, #16
bhs 0b
@@ -150,18 +150,24 @@ ARM_ENTRY __memcmp16
bne 8f
/* restore registers and return */
mov r0, #0
- ldmfd sp!, {r4, lr}
+ pop {r4, lr}
+ .cfi_restore r4
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
bx lr
2: /* the last 2 words are different, restart them */
ldrh r0, [r3, #-4]
ldrh ip, [r1, #-4]
subs r0, r0, ip
- ldreqh r0, [r3, #-2]
- ldreqh ip, [r1, #-2]
- subeqs r0, r0, ip
+ ldrheq r0, [r3, #-2]
+ ldrheq ip, [r1, #-2]
+ subseq r0, r0, ip
/* restore registers and return */
- ldmfd sp!, {r4, lr}
+ pop {r4, lr}
+ .cfi_restore r4
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
bx lr
/* process the last few words */
@@ -173,7 +179,10 @@ ARM_ENTRY __memcmp16
bne 8b
9: /* restore registers and return */
- ldmfd sp!, {r4, lr}
+ pop {r4, lr}
+ .cfi_restore r4
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
bx lr
@@ -196,17 +205,17 @@ ARM_ENTRY __memcmp16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
bne 7f
subs r2, r2, #8
bhs 6b
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index acd104383c..a742aaad9a 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -197,7 +197,7 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
return DexFile::kDexNoIndex;
}
-uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc) {
+uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure) {
const void* entry_point = GetQuickOatEntryPoint();
MappingTable table(
entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
@@ -219,9 +219,11 @@ uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc) {
return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
}
}
- LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
- << " in " << PrettyMethod(this);
- return 0;
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+ << " in " << PrettyMethod(this);
+ }
+ return UINTPTR_MAX;
}
uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> exception_type,
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 08c099629c..d92d00a8cb 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -453,7 +453,8 @@ class MANAGED ArtMethod FINAL : public Object {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Converts a dex PC to a native PC.
- uintptr_t ToNativeQuickPc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
diff --git a/runtime/oat.cc b/runtime/oat.cc
index d39b55cfcb..6bda6beac4 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -23,7 +23,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '4', '4', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '4', '5', '\0' };
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 291b45f358..631c4be90f 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -19,10 +19,13 @@
namespace art {
-#define CHECK_REGS_CONTAIN_REFS(native_pc_offset, ...) do { \
+#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
int t[] = {__VA_ARGS__}; \
int t_size = sizeof(t) / sizeof(*t); \
- CheckReferences(t, t_size, m->NativeQuickPcOffset(m->ToNativeQuickPc(native_pc_offset))); \
+ uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, abort_if_not_found); \
+ if (native_quick_pc != UINTPTR_MAX) { \
+ CheckReferences(t, t_size, m->NativeQuickPcOffset(native_quick_pc)); \
+ } \
} while (false);
struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
@@ -40,31 +43,33 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
// we know the Dex registers with live reference values. Assert that what we
// find is what is expected.
if (m_name.compare("f") == 0) {
- CHECK_REGS_CONTAIN_REFS(0x03U, 8); // v8: this
- CHECK_REGS_CONTAIN_REFS(0x06U, 8, 1); // v8: this, v1: x
- CHECK_REGS_CONTAIN_REFS(0x08U, 8, 3, 1); // v8: this, v3: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x0cU, 8, 3, 1); // v8: this, v3: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x0eU, 8, 3, 1); // v8: this, v3: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x10U, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x03U, true, 8); // v8: this
+ CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
// v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
// 0024: move-object v3, v2
// 0025: goto 0013
// Detaled dex instructions for ReferenceMap.java are at the end of this function.
// CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x
- // We eliminate the non-live registers at a return, so only v3 is live:
- CHECK_REGS_CONTAIN_REFS(0x13U); // v3: y
- CHECK_REGS_CONTAIN_REFS(0x18U, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x1aU, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x1dU, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+ // We eliminate the non-live registers at a return, so only v3 is live.
+ // Note that it is OK for a compiler to not have a dex map at this dex PC because
+ // a return is not a safepoint.
+ CHECK_REGS_CONTAIN_REFS(0x13U, false); // v3: y
+ CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
// v5 is removed from the root set because there is a "merge" operation.
// See 0015: if-nez v2, 001f.
- CHECK_REGS_CONTAIN_REFS(0x1fU, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x21U, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x27U, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x29U, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x2cU, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x2fU, 8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x32U, 8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x2cU, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x2fU, true, 8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x32U, true, 8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
}
return true;
diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java
index 07c407b565..7c3fd25ea7 100644
--- a/test/401-optimizing-compiler/src/Main.java
+++ b/test/401-optimizing-compiler/src/Main.java
@@ -94,6 +94,14 @@ public class Main {
exception = e;
}
+ // Test that we do NPE checks on array length.
+ exception = null;
+ try {
+ $opt$ArrayLengthOfNull(null);
+ } catch (NullPointerException e) {
+ exception = e;
+ }
+
if (exception == null) {
throw new Error("Missing NullPointerException");
}
@@ -218,5 +226,9 @@ public class Main {
return 42;
}
+ public static int $opt$ArrayLengthOfNull(int[] array) {
+ return array.length;
+ }
+
Object o;
}
diff --git a/test/415-optimizing-arith-neg/src/Main.java b/test/415-optimizing-arith-neg/src/Main.java
index b21b998235..e2850ca760 100644
--- a/test/415-optimizing-arith-neg/src/Main.java
+++ b/test/415-optimizing-arith-neg/src/Main.java
@@ -18,34 +18,61 @@
// it does compile the method.
public class Main {
- public static void expectEquals(int expected, int result) {
+ public static void assertEquals(int expected, int result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
}
}
- public static void expectEquals(long expected, long result) {
+ public static void assertEquals(long expected, long result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
}
}
+ public static void assertEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertIsNaN(float result) {
+ if (!Float.isNaN(result)) {
+ throw new Error("Expected NaN: " + result);
+ }
+ }
+
+ public static void assertIsNaN(double result) {
+ if (!Double.isNaN(result)) {
+ throw new Error("Expected NaN: " + result);
+ }
+ }
+
public static void main(String[] args) {
negInt();
$opt$InplaceNegOneInt(1);
negLong();
$opt$InplaceNegOneLong(1L);
+
+ negFloat();
+ negDouble();
}
private static void negInt() {
- expectEquals(-1, $opt$NegInt(1));
- expectEquals(1, $opt$NegInt(-1));
- expectEquals(0, $opt$NegInt(0));
- expectEquals(51, $opt$NegInt(-51));
- expectEquals(-51, $opt$NegInt(51));
- expectEquals(2147483647, $opt$NegInt(-2147483647)); // (2^31 - 1)
- expectEquals(-2147483647, $opt$NegInt(2147483647)); // -(2^31 - 1)
+ assertEquals(-1, $opt$NegInt(1));
+ assertEquals(1, $opt$NegInt(-1));
+ assertEquals(0, $opt$NegInt(0));
+ assertEquals(51, $opt$NegInt(-51));
+ assertEquals(-51, $opt$NegInt(51));
+ assertEquals(2147483647, $opt$NegInt(-2147483647)); // (2^31 - 1)
+ assertEquals(-2147483647, $opt$NegInt(2147483647)); // -(2^31 - 1)
// From the Java 7 SE Edition specification:
// http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.15.4
//
@@ -56,36 +83,84 @@ public class Main {
// int or long results in that same maximum negative number.
// Overflow occurs in this case, but no exception is thrown.
// For all integer values x, -x equals (~x)+1.''
- expectEquals(-2147483648, $opt$NegInt(-2147483648)); // -(2^31)
+ assertEquals(-2147483648, $opt$NegInt(-2147483648)); // -(2^31)
}
private static void $opt$InplaceNegOneInt(int a) {
a = -a;
- expectEquals(-1, a);
+ assertEquals(-1, a);
}
private static void negLong() {
- expectEquals(-1L, $opt$NegLong(1L));
- expectEquals(1L, $opt$NegLong(-1L));
- expectEquals(0L, $opt$NegLong(0L));
- expectEquals(51L, $opt$NegLong(-51L));
- expectEquals(-51L, $opt$NegLong(51L));
-
- expectEquals(2147483647L, $opt$NegLong(-2147483647L)); // (2^31 - 1)
- expectEquals(-2147483647L, $opt$NegLong(2147483647L)); // -(2^31 - 1)
- expectEquals(2147483648L, $opt$NegLong(-2147483648L)); // 2^31
- expectEquals(-2147483648L, $opt$NegLong(2147483648L)); // -(2^31)
-
- expectEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // (2^63 - 1)
- expectEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // -(2^63 - 1)
+ assertEquals(-1L, $opt$NegLong(1L));
+ assertEquals(1L, $opt$NegLong(-1L));
+ assertEquals(0L, $opt$NegLong(0L));
+ assertEquals(51L, $opt$NegLong(-51L));
+ assertEquals(-51L, $opt$NegLong(51L));
+
+ assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // (2^31 - 1)
+ assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // -(2^31 - 1)
+ assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // 2^31
+ assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // -(2^31)
+
+ assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // (2^63 - 1)
+ assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // -(2^63 - 1)
// See remark regarding the negation of the maximum negative
// (long) value in negInt().
- expectEquals(-9223372036854775808L, $opt$NegLong(-9223372036854775808L)); // -(2^63)
+ assertEquals(-9223372036854775808L, $opt$NegLong(-9223372036854775808L)); // -(2^63)
}
private static void $opt$InplaceNegOneLong(long a) {
a = -a;
- expectEquals(-1L, a);
+ assertEquals(-1L, a);
+ }
+
+ private static void negFloat() {
+ assertEquals(-1F, $opt$NegFloat(1F));
+ assertEquals(1F, $opt$NegFloat(-1F));
+ assertEquals(0F, $opt$NegFloat(0F));
+ assertEquals(51F, $opt$NegFloat(-51F));
+ assertEquals(-51F, $opt$NegFloat(51F));
+
+ assertEquals(-0.1F, $opt$NegFloat(0.1F));
+ assertEquals(0.1F, $opt$NegFloat(-0.1F));
+ assertEquals(343597.38362F, $opt$NegFloat(-343597.38362F));
+ assertEquals(-343597.38362F, $opt$NegFloat(343597.38362F));
+
+ assertEquals(-Float.MIN_NORMAL, $opt$NegFloat(Float.MIN_NORMAL));
+ assertEquals(Float.MIN_NORMAL, $opt$NegFloat(-Float.MIN_NORMAL));
+ assertEquals(-Float.MIN_VALUE, $opt$NegFloat(Float.MIN_VALUE));
+ assertEquals(Float.MIN_VALUE, $opt$NegFloat(-Float.MIN_VALUE));
+ assertEquals(-Float.MAX_VALUE, $opt$NegFloat(Float.MAX_VALUE));
+ assertEquals(Float.MAX_VALUE, $opt$NegFloat(-Float.MAX_VALUE));
+
+ assertEquals(Float.NEGATIVE_INFINITY, $opt$NegFloat(Float.POSITIVE_INFINITY));
+ assertEquals(Float.POSITIVE_INFINITY, $opt$NegFloat(Float.NEGATIVE_INFINITY));
+ assertIsNaN($opt$NegFloat(Float.NaN));
+ }
+
+ private static void negDouble() {
+ assertEquals(-1D, $opt$NegDouble(1D));
+ assertEquals(1D, $opt$NegDouble(-1D));
+ assertEquals(0D, $opt$NegDouble(0D));
+ assertEquals(51D, $opt$NegDouble(-51D));
+ assertEquals(-51D, $opt$NegDouble(51D));
+
+ assertEquals(-0.1D, $opt$NegDouble(0.1D));
+ assertEquals(0.1D, $opt$NegDouble(-0.1D));
+ assertEquals(343597.38362D, $opt$NegDouble(-343597.38362D));
+ assertEquals(-343597.38362D, $opt$NegDouble(343597.38362D));
+
+ assertEquals(-Double.MIN_NORMAL, $opt$NegDouble(Double.MIN_NORMAL));
+ assertEquals(Double.MIN_NORMAL, $opt$NegDouble(-Double.MIN_NORMAL));
+ assertEquals(-Double.MIN_VALUE, $opt$NegDouble(Double.MIN_VALUE));
+ assertEquals(Double.MIN_VALUE, $opt$NegDouble(-Double.MIN_VALUE));
+ assertEquals(-Double.MAX_VALUE, $opt$NegDouble(Double.MAX_VALUE));
+ assertEquals(Double.MAX_VALUE, $opt$NegDouble(-Double.MAX_VALUE));
+
+ assertEquals(Double.NEGATIVE_INFINITY, $opt$NegDouble(Double.POSITIVE_INFINITY));
+ assertEquals(Double.POSITIVE_INFINITY, $opt$NegDouble(Double.NEGATIVE_INFINITY));
+ assertIsNaN($opt$NegDouble(Double.NaN));
}
static int $opt$NegInt(int a){
@@ -95,4 +170,12 @@ public class Main {
static long $opt$NegLong(long a){
return -a;
}
+
+ static float $opt$NegFloat(float a){
+ return -a;
+ }
+
+ static double $opt$NegDouble(double a){
+ return -a;
+ }
}
diff --git a/test/417-optimizing-arith-div/src/Main.java b/test/417-optimizing-arith-div/src/Main.java
index 535cafb113..5825d24dda 100644
--- a/test/417-optimizing-arith-div/src/Main.java
+++ b/test/417-optimizing-arith-div/src/Main.java
@@ -18,6 +18,12 @@
// it does compile the method.
public class Main {
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
public static void expectEquals(float expected, float result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -60,15 +66,51 @@ public class Main {
}
}
+ public static void expectDivisionByZero(int value) {
+ try {
+ $opt$Div(value, 0);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$DivZero(value);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
public static void main(String[] args) {
div();
}
public static void div() {
+ divInt();
divFloat();
divDouble();
}
+ private static void divInt() {
+ expectEquals(2, $opt$DivLit(6));
+ expectEquals(2, $opt$Div(6, 3));
+ expectEquals(6, $opt$Div(6, 1));
+ expectEquals(-2, $opt$Div(6, -3));
+ expectEquals(1, $opt$Div(4, 3));
+ expectEquals(-1, $opt$Div(4, -3));
+ expectEquals(5, $opt$Div(23, 4));
+ expectEquals(-5, $opt$Div(-23, 4));
+
+ expectEquals(-Integer.MAX_VALUE, $opt$Div(Integer.MAX_VALUE, -1));
+ expectEquals(Integer.MIN_VALUE, $opt$Div(Integer.MIN_VALUE, -1)); // overflow
+ expectEquals(-1073741824, $opt$Div(Integer.MIN_VALUE, 2));
+
+ expectEquals(0, $opt$Div(0, Integer.MAX_VALUE));
+ expectEquals(0, $opt$Div(0, Integer.MIN_VALUE));
+
+ expectDivisionByZero(0);
+ expectDivisionByZero(1);
+ expectDivisionByZero(Integer.MAX_VALUE);
+ expectDivisionByZero(Integer.MIN_VALUE);
+ }
+
private static void divFloat() {
expectApproxEquals(1.6666666F, $opt$Div(5F, 3F));
expectApproxEquals(0F, $opt$Div(0F, 3F));
@@ -127,6 +169,19 @@ public class Main {
expectEquals(Float.NEGATIVE_INFINITY, $opt$Div(-Float.MAX_VALUE, Float.MIN_VALUE));
}
+ static int $opt$Div(int a, int b) {
+ return a / b;
+ }
+
+ static int $opt$DivZero(int a) {
+ return a / 0;
+ }
+
+ // Division by literals != 0 should not generate checks.
+ static int $opt$DivLit(int a) {
+ return a / 3;
+ }
+
static float $opt$Div(float a, float b) {
return a / b;
}
diff --git a/test/421-exceptions/expected.txt b/test/421-exceptions/expected.txt
new file mode 100644
index 0000000000..94db350a03
--- /dev/null
+++ b/test/421-exceptions/expected.txt
@@ -0,0 +1,20 @@
+1
+3
+4
+1
+4
+1
+4
+1
+4
+Caught class java.lang.RuntimeException
+1
+2
+4
+1
+4
+1
+4
+1
+4
+Caught class java.lang.NullPointerException
diff --git a/test/421-exceptions/info.txt b/test/421-exceptions/info.txt
new file mode 100644
index 0000000000..bdec67e963
--- /dev/null
+++ b/test/421-exceptions/info.txt
@@ -0,0 +1 @@
+Simple test for try/catch/throw.
diff --git a/test/421-exceptions/src/Main.java b/test/421-exceptions/src/Main.java
new file mode 100644
index 0000000000..6bf2377ceb
--- /dev/null
+++ b/test/421-exceptions/src/Main.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void $opt$bar() {
+ try {
+ $opt$foo(1);
+ } catch (NullPointerException e) {
+ $opt$foo(2);
+ } catch (RuntimeException e) {
+ $opt$foo(3);
+ } finally {
+ $opt$foo(4);
+ }
+ }
+
+ static int barState;
+ static int fooState;
+
+ public static void main(String[] args) {
+ fooState = 0;
+ $opt$runTest();
+ fooState = 1;
+ $opt$runTest();
+ }
+
+ public static void $opt$runTest() {
+ barState = 1;
+ $opt$bar();
+ barState = 2;
+ $opt$bar();
+ barState = 3;
+ $opt$bar();
+ barState = 4;
+ try {
+ $opt$bar();
+ } catch (RuntimeException e) {
+ System.out.println("Caught " + e.getClass());
+ }
+ }
+
+ public static void $opt$foo(int value) {
+ System.out.println(value);
+ if (value == barState) {
+ if (fooState == 0) {
+ throw new RuntimeException();
+ } else {
+ throw new NullPointerException();
+ }
+ }
+ }
+}
diff --git a/test/422-type-conversion/expected.txt b/test/422-type-conversion/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/422-type-conversion/expected.txt
diff --git a/test/422-type-conversion/info.txt b/test/422-type-conversion/info.txt
new file mode 100644
index 0000000000..e734f3213e
--- /dev/null
+++ b/test/422-type-conversion/info.txt
@@ -0,0 +1 @@
+Tests for type conversions.
diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java
new file mode 100644
index 0000000000..d2ffc5bdfc
--- /dev/null
+++ b/test/422-type-conversion/src/Main.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void assertEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ byteToLong();
+ shortToLong();
+ intToLong();
+ }
+
+ private static void byteToLong() {
+ assertEquals(1L, $opt$ByteToLong((byte)1));
+ assertEquals(0L, $opt$ByteToLong((byte)0));
+ assertEquals(-1L, $opt$ByteToLong((byte)-1));
+ assertEquals(51L, $opt$ByteToLong((byte)51));
+ assertEquals(-51L, $opt$ByteToLong((byte)-51));
+ assertEquals(127L, $opt$ByteToLong((byte)127)); // (2^7) - 1
+ assertEquals(-127L, $opt$ByteToLong((byte)-127)); // -(2^7) - 1
+ assertEquals(-128L, $opt$ByteToLong((byte)-128)); // -(2^7)
+ }
+
+ private static void shortToLong() {
+ assertEquals(1L, $opt$ShortToLong((short)1));
+ assertEquals(0L, $opt$ShortToLong((short)0));
+ assertEquals(-1L, $opt$ShortToLong((short)-1));
+ assertEquals(51L, $opt$ShortToLong((short)51));
+ assertEquals(-51L, $opt$ShortToLong((short)-51));
+ assertEquals(32767L, $opt$ShortToLong((short)32767)); // (2^15) - 1
+ assertEquals(-32767L, $opt$ShortToLong((short)-32767)); // -(2^15) - 1
+ assertEquals(-32768L, $opt$ShortToLong((short)-32768)); // -(2^15)
+ }
+
+ private static void intToLong() {
+ assertEquals(1L, $opt$IntToLong(1));
+ assertEquals(0L, $opt$IntToLong(0));
+ assertEquals(-1L, $opt$IntToLong(-1));
+ assertEquals(51L, $opt$IntToLong(51));
+ assertEquals(-51L, $opt$IntToLong(-51));
+ assertEquals(2147483647L, $opt$IntToLong(2147483647)); // (2^31) - 1
+ assertEquals(-2147483647L, $opt$IntToLong(-2147483647)); // -(2^31) - 1
+ assertEquals(-2147483648L, $opt$IntToLong(-2147483648)); // -(2^31)
+ }
+
+ static long $opt$ByteToLong(byte a) {
+ // Translates to an int-to-long Dex instruction.
+ return a;
+ }
+
+ static long $opt$ShortToLong(short a) {
+ // Translates to an int-to-long Dex instruction.
+ return a;
+ }
+
+ static long $opt$IntToLong(int a) {
+ // Translates to an int-to-long Dex instruction.
+ return a;
+ }
+}
diff --git a/test/703-floating-point-div/expected.txt b/test/703-floating-point-div/expected.txt
new file mode 100644
index 0000000000..76f5a5a5aa
--- /dev/null
+++ b/test/703-floating-point-div/expected.txt
@@ -0,0 +1 @@
+Done!
diff --git a/test/703-floating-point-div/info.txt b/test/703-floating-point-div/info.txt
new file mode 100644
index 0000000000..418b831350
--- /dev/null
+++ b/test/703-floating-point-div/info.txt
@@ -0,0 +1 @@
+Simple tests to check floating point division.
diff --git a/test/703-floating-point-div/src/Main.java b/test/703-floating-point-div/src/Main.java
new file mode 100644
index 0000000000..9990a545f4
--- /dev/null
+++ b/test/703-floating-point-div/src/Main.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ static double dPi = Math.PI;
+ static float fPi = (float)Math.PI;
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void divDoubleTest() {
+ double d1 = 0x1.0p1023;
+ double d2 = -2.0;
+ double d3 = 0.0;
+ double d4 = Double.MIN_NORMAL;
+ double d5 = Double.POSITIVE_INFINITY;
+ double d6 = Double.NEGATIVE_INFINITY;
+ double d7 = -0.0;
+ double d8 = Double.MAX_VALUE;
+ double d9 = Double.MIN_VALUE;
+ double d0 = Double.NaN;
+
+ expectEquals(Double.doubleToRawLongBits(dPi/d1), 0x1921fb54442d18L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d2), 0xbff921fb54442d18L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d3), 0x7ff0000000000000L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d4), 0x7fe921fb54442d18L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d5), 0x0L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d6), 0x8000000000000000L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d7), 0xfff0000000000000L);
+
+ expectEquals(Double.doubleToRawLongBits(dPi/d8), 0xc90fdaa22168cL);
+ expectEquals(Double.doubleToRawLongBits(dPi/d9), 0x7ff0000000000000L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d0), 0x7ff8000000000000L);
+ }
+
+ public static void divFloatTest() {
+ float f1 = 0x1.0p127f;
+ float f2 = -2.0f;
+ float f3 = 0.0f;
+ float f4 = Float.MIN_NORMAL;
+ float f5 = Float.POSITIVE_INFINITY;
+ float f6 = Float.NEGATIVE_INFINITY;
+ float f7 = -0.0f;
+ float f8 = Float.MAX_VALUE;
+ float f9 = Float.MIN_VALUE;
+ float f0 = Float.NaN;
+
+ expectEquals(Float.floatToRawIntBits(fPi/f1), 0xc90fdb);
+ expectEquals(Float.floatToRawIntBits(fPi/f2), 0xbfc90fdb);
+ expectEquals(Float.floatToRawIntBits(fPi/f3), 0x7f800000);
+ expectEquals(Float.floatToRawIntBits(fPi/f4), 0x7f490fdb);
+ expectEquals(Float.floatToRawIntBits(fPi/f5), 0x0);
+ expectEquals(Float.floatToRawIntBits(fPi/f6), 0x80000000);
+ expectEquals(Float.floatToRawIntBits(fPi/f7), 0xff800000);
+
+ expectEquals(Float.floatToRawIntBits(fPi/f8), 0x6487ee);
+ expectEquals(Float.floatToRawIntBits(fPi/f9), 0x7f800000);
+ expectEquals(Float.floatToRawIntBits(fPi/f0), 0x7fc00000);
+ }
+
+ public static void main(String[] args) {
+ divDoubleTest();
+ divFloatTest();
+ System.out.println("Done!");
+ }
+
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0a1e3e1111..e460f397b9 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -308,29 +308,43 @@ TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
001-HelloWorld \
+ 002-sleep \
003-omnibus-opcodes \
004-InterfaceTest \
004-JniTest \
+ 004-ReferenceMap \
+ 004-SignalTest \
004-StackWalk \
004-UnsafeTest \
+ 005-annotations \
006-args \
007-count10 \
+ 008-exceptions \
011-array-copy \
013-math2 \
016-intern \
017-float \
018-stack-overflow \
+ 019-wrong-array-type \
020-string \
+ 021-string2 \
022-interface \
023-many-interfaces \
+ 024-illegal-access \
026-access \
028-array-write \
+ 029-assert \
030-bad-finalizer \
031-class-attributes \
032-concrete-sub \
+ 033-class-init-deadlock \
+ 035-enum \
036-finalizer \
037-inherit \
038-inner-null \
+ 039-join-main \
+ 040-miranda \
+ 042-new-instance \
043-privates \
044-proxy \
045-reflect-array \
@@ -341,9 +355,13 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
051-thread \
052-verifier-fun \
054-uncaught \
+ 055-enum-performance \
056-const-string-jumbo \
061-out-of-memory \
063-process-manager \
+ 064-field-access \
+ 065-mismatched-implements \
+ 066-mismatched-super \
067-preemptive-unpark \
068-classloader \
069-field-type \
@@ -351,16 +369,20 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
071-dexfile \
072-precise-gc \
074-gc-thrash \
+ 075-verification-error \
076-boolean-put \
077-method-override \
+ 078-polymorphic-virtual \
079-phantom \
080-oom-throw \
+ 081-hot-exceptions \
082-inline-execute \
083-compiler-regressions \
084-class-init \
085-old-style-inner-class \
086-null-super \
087-gc-after-link \
+ 088-monitor-verification \
090-loop-formation \
092-locale \
093-serialization \
@@ -369,6 +391,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
097-duplicate-method \
098-ddmc \
100-reflect2 \
+ 101-fibonacci \
102-concurrent-gc \
103-string-append \
105-invoke \
@@ -376,20 +399,28 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
107-int-math2 \
109-suspend-check \
110-field-access \
+ 111-unresolvable-exception \
112-double-math \
113-multidex \
117-nopatchoat \
+ 118-noimage-dex2oat \
+ 119-noimage-patchoat \
121-modifiers \
+ 121-simple-suspend-check \
122-npe \
123-compiler-regressions-mt \
124-missing-classes \
125-gc-and-classloading \
126-miranda-multidex \
+ 201-built-in-exception-detail-messages \
+ 202-thread-oome \
300-package-override \
301-abstract-protected \
303-verification-stress \
401-optimizing-compiler \
+ 402-optimizing-control-flow \
403-optimizing-long \
+ 404-optimizing-allocator \
405-optimizing-long-allocator \
406-fields \
407-arrays \
@@ -406,9 +437,13 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
418-const-string \
419-long-parameter \
420-const-class \
+ 421-exceptions \
+ 421-large-frame \
+ 422-type-conversion \
700-LoadArgRegs \
701-easy-div-rem \
702-LargeBranchOffset \
+ 703-floating-point-div \
800-smali
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))