Merge "ART: Fix warnings in memcmp16 for arm32"
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index d168fc8..e43841a 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -273,6 +273,14 @@
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
+void HGraphBuilder::Conversion_12x(const Instruction& instruction,
+ Primitive::Type input_type,
+ Primitive::Type result_type) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
+ current_block_->AddInstruction(new (arena_) HTypeConversion(result_type, first));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
template<typename T>
void HGraphBuilder::Binop_23x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
@@ -529,6 +537,27 @@
return true;
}
+void HGraphBuilder::BuildCheckedDiv(const Instruction& instruction,
+ uint32_t dex_offset,
+ Primitive::Type type,
+ bool second_is_lit) {
+ DCHECK(type == Primitive::kPrimInt);
+
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ HInstruction* second = second_is_lit
+ ? GetIntConstant(instruction.VRegC())
+ : LoadLocal(instruction.VRegC(), type);
+ if (!second->IsIntConstant() || (second->AsIntConstant()->GetValue() == 0)) {
+ second = new (arena_) HDivZeroCheck(second, dex_offset);
+ Temporaries temps(graph_, 1);
+ current_block_->AddInstruction(second);
+ temps.Add(current_block_->GetLastInstruction());
+ }
+
+ current_block_->AddInstruction(new (arena_) HDiv(type, first, second));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
uint32_t dex_offset,
bool is_put,
@@ -609,6 +638,60 @@
}
}
+void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset) {
+ Temporaries temps(graph_, 1);
+ HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
+ HNullCheck* null_check = new (arena_) HNullCheck(array, dex_offset);
+ current_block_->AddInstruction(null_check);
+ temps.Add(null_check);
+
+ HInstruction* length = new (arena_) HArrayLength(null_check);
+ current_block_->AddInstruction(length);
+
+ int32_t payload_offset = instruction.VRegB_31t() + dex_offset;
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
+ const uint8_t* data = payload->data;
+ uint32_t element_count = payload->element_count;
+
+ // Implementation of this DEX instruction seems to be that the bounds check is
+ // done before doing any stores.
+ HInstruction* last_index = GetIntConstant(payload->element_count - 1);
+ current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_offset));
+
+ switch (payload->element_width) {
+ case 1:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int8_t*>(data),
+ element_count,
+ Primitive::kPrimByte,
+ dex_offset);
+ break;
+ case 2:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int16_t*>(data),
+ element_count,
+ Primitive::kPrimShort,
+ dex_offset);
+ break;
+ case 4:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int32_t*>(data),
+ element_count,
+ Primitive::kPrimInt,
+ dex_offset);
+ break;
+ case 8:
+ BuildFillWideArrayData(null_check,
+ reinterpret_cast<const int64_t*>(data),
+ element_count,
+ dex_offset);
+ break;
+ default:
+ LOG(FATAL) << "Unknown element width for " << payload->element_width;
+ }
+}
+
void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
@@ -823,6 +906,11 @@
break;
}
+ case Instruction::INT_TO_LONG: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimLong);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -888,6 +976,11 @@
break;
}
+ case Instruction::DIV_INT: {
+ BuildCheckedDiv(instruction, dex_offset, Primitive::kPrimInt, false);
+ break;
+ }
+
case Instruction::DIV_FLOAT: {
Binop_23x<HDiv>(instruction, Primitive::kPrimFloat);
break;
@@ -993,6 +1086,12 @@
break;
}
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::DIV_INT_LIT8: {
+ BuildCheckedDiv(instruction, dex_offset, Primitive::kPrimInt, true);
+ break;
+ }
+
case Instruction::NEW_INSTANCE: {
current_block_->AddInstruction(
new (arena_) HNewInstance(dex_offset, instruction.VRegB_21c()));
@@ -1027,57 +1126,7 @@
}
case Instruction::FILL_ARRAY_DATA: {
- Temporaries temps(graph_, 1);
- HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(array, dex_offset);
- current_block_->AddInstruction(null_check);
- temps.Add(null_check);
-
- HInstruction* length = new (arena_) HArrayLength(null_check);
- current_block_->AddInstruction(length);
-
- int32_t payload_offset = instruction.VRegB_31t() + dex_offset;
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
- const uint8_t* data = payload->data;
- uint32_t element_count = payload->element_count;
-
- // Implementation of this DEX instruction seems to be that the bounds check is
- // done before doing any stores.
- HInstruction* last_index = GetIntConstant(payload->element_count - 1);
- current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_offset));
-
- switch (payload->element_width) {
- case 1:
- BuildFillArrayData(null_check,
- reinterpret_cast<const int8_t*>(data),
- element_count,
- Primitive::kPrimByte,
- dex_offset);
- break;
- case 2:
- BuildFillArrayData(null_check,
- reinterpret_cast<const int16_t*>(data),
- element_count,
- Primitive::kPrimShort,
- dex_offset);
- break;
- case 4:
- BuildFillArrayData(null_check,
- reinterpret_cast<const int32_t*>(data),
- element_count,
- Primitive::kPrimInt,
- dex_offset);
- break;
- case 8:
- BuildFillWideArrayData(null_check,
- reinterpret_cast<const int64_t*>(data),
- element_count,
- dex_offset);
- break;
- default:
- LOG(FATAL) << "Unknown element width for " << payload->element_width;
- }
+ BuildFillArrayData(instruction, dex_offset);
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index eea762f..f09f729 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -116,6 +116,15 @@
template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_offset);
template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_offset);
+ void Conversion_12x(const Instruction& instruction,
+ Primitive::Type input_type,
+ Primitive::Type result_type);
+
+ void BuildCheckedDiv(const Instruction& instruction,
+ uint32_t dex_offset,
+ Primitive::Type type,
+ bool second_is_lit);
+
void BuildReturn(const Instruction& instruction, Primitive::Type type);
// Builds an instance field access node and returns whether the instruction is supported.
@@ -146,6 +155,8 @@
uint32_t* args,
uint32_t register_index);
+ void BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset);
+
// Fills the given object with data as specified in the fill-array-data
// instruction. Currently only used for non-reference and non-floating point
// arrays.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6e6d64c..dd595d9 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -92,6 +92,22 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
+class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
+};
+
class StackOverflowCheckSlowPathARM : public SlowPathCodeARM {
public:
StackOverflowCheckSlowPathARM() {}
@@ -373,9 +389,6 @@
blocked_core_registers_[LR] = true;
blocked_core_registers_[PC] = true;
- // Reserve R4 for suspend check.
- blocked_core_registers_[R4] = true;
-
// Reserve thread register.
blocked_core_registers_[TR] = true;
@@ -385,6 +398,7 @@
// TODO: We currently don't use Quick's callee saved registers.
// We always save and restore R6 and R7 to make sure we can use three
// register pairs for long operations.
+ blocked_core_registers_[R4] = true;
blocked_core_registers_[R5] = true;
blocked_core_registers_[R8] = true;
blocked_core_registers_[R10] = true;
@@ -785,6 +799,7 @@
DCHECK(instruction->IsSuspendCheck()
|| instruction->IsBoundsCheck()
|| instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
|| !IsLeafMethod());
}
@@ -1223,6 +1238,94 @@
}
}
+void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ DCHECK(out.IsRegisterPair());
+ DCHECK(in.IsRegister());
+ __ Mov(out.AsRegisterPairLow<Register>(), in.As<Register>());
+ // Sign extension.
+ __ Asr(out.AsRegisterPairHigh<Register>(),
+ out.AsRegisterPairLow<Register>(),
+ 31);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderARM::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -1445,7 +1548,12 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1470,7 +1578,11 @@
Location second = locations->InAt(1);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ __ sdiv(out.As<Register>(), first.As<Register>(), second.As<Register>());
+ break;
+ }
+
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1493,6 +1605,27 @@
}
}
+void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ DCHECK(value.IsRegister()) << value;
+ __ cmp(value.As<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+}
+
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -2069,12 +2202,15 @@
new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
codegen_->AddSlowPath(slow_path);
- __ subs(R4, R4, ShifterOperand(1));
+ __ LoadFromOffset(
+ kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
+ __ cmp(IP, ShifterOperand(0));
+ // TODO: Figure out the branch offsets and use cbz/cbnz.
if (successor == nullptr) {
- __ b(slow_path->GetEntryLabel(), EQ);
+ __ b(slow_path->GetEntryLabel(), NE);
__ Bind(slow_path->GetReturnLabel());
} else {
- __ b(codegen_->GetLabelOf(successor), NE);
+ __ b(codegen_->GetLabelOf(successor), EQ);
__ b(slow_path->GetEntryLabel());
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 90d7c35..c26b0ab 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -537,6 +537,7 @@
M(ClinitCheck) \
M(DoubleConstant) \
M(Div) \
+ M(DivZeroCheck) \
M(FloatConstant) \
M(LoadClass) \
M(LoadString) \
@@ -545,6 +546,7 @@
M(ParallelMove) \
M(StaticFieldGet) \
M(StaticFieldSet) \
+ M(TypeConversion) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1e37909..b2d9187 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -85,6 +85,36 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
};
+class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero)));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
+};
+
+class DivMinusOneSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit DivMinusOneSlowPathX86(Register reg) : reg_(reg) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ negl(reg_);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ Register reg_;
+ DISALLOW_COPY_AND_ASSIGN(DivMinusOneSlowPathX86);
+};
+
class StackOverflowCheckSlowPathX86 : public SlowPathCodeX86 {
public:
StackOverflowCheckSlowPathX86() {}
@@ -1126,6 +1156,91 @@
}
}
+void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
+ DCHECK_EQ(in.As<Register>(), EAX);
+ __ cdq();
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -1386,7 +1501,14 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Intel uses edx:eax as the dividend.
+ locations->AddTemp(Location::RegisterLocation(EDX));
+ break;
+ }
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1411,7 +1533,32 @@
DCHECK(first.Equals(locations->Out()));
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ Register first_reg = first.As<Register>();
+ Register second_reg = second.As<Register>();
+ DCHECK_EQ(EAX, first_reg);
+ DCHECK_EQ(EDX, locations->GetTemp(0).As<Register>());
+
+ SlowPathCodeX86* slow_path =
+ new (GetGraph()->GetArena()) DivMinusOneSlowPathX86(first_reg);
+ codegen_->AddSlowPath(slow_path);
+
+ // 0x80000000/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
+ // it's safe to just use negl instead of more complex comparisons.
+
+ __ cmpl(second_reg, Immediate(-1));
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+
+ __ Bind(slow_path->GetExitLabel());
+ break;
+ }
+
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1432,6 +1579,36 @@
}
}
+void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::Any());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ if (value.IsRegister()) {
+ __ testl(value.As<Register>(), value.As<Register>());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ return;
+ }
+ __ j(kEqual, slow_path->GetEntryLabel());
+}
+
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 40eec9b..2bd76c1 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -90,6 +90,37 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
};
+class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
+};
+
+class DivMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit DivMinusOneSlowPathX86_64(Register reg) : reg_(reg) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ negl(CpuRegister(reg_));
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ Register reg_;
+ DISALLOW_COPY_AND_ASSIGN(DivMinusOneSlowPathX86_64);
+};
+
class StackOverflowCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
StackOverflowCheckSlowPathX86_64() {}
@@ -1104,6 +1135,92 @@
}
}
+void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ // TODO: We would benefit from a (to-be-implemented)
+ // Location::RegisterOrStackSlot requirement for this input.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ DCHECK(out.IsRegister());
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // int-to-long conversion.
+ DCHECK(in.IsRegister());
+ __ movsxd(out.As<CpuRegister>(), in.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -1310,7 +1427,14 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RegisterLocation(RAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Intel uses edx:eax as the dividend.
+ locations->AddTemp(Location::RegisterLocation(RDX));
+ break;
+ }
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1335,7 +1459,32 @@
DCHECK(first.Equals(locations->Out()));
switch (div->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ CpuRegister first_reg = first.As<CpuRegister>();
+ CpuRegister second_reg = second.As<CpuRegister>();
+ DCHECK_EQ(RAX, first_reg.AsRegister());
+ DCHECK_EQ(RDX, locations->GetTemp(0).As<CpuRegister>().AsRegister());
+
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister());
+ codegen_->AddSlowPath(slow_path);
+
+ // 0x80000000/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
+ // it's safe to just use negl instead of more complex comparisons.
+
+ __ cmpl(second_reg, Immediate(-1));
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+
+ __ Bind(slow_path->GetExitLabel());
+ break;
+ }
+
case Primitive::kPrimLong: {
LOG(FATAL) << "Not implemented div type" << div->GetResultType();
break;
@@ -1356,6 +1505,37 @@
}
}
+void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::Any());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86_64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ if (value.IsRegister()) {
+ __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ return;
+ }
+ __ j(kEqual, slow_path->GetEntryLabel());
+}
+
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 68fcb25..47e9fa4 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -543,4 +543,17 @@
}
}
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnDivIntLit8) {
+#else
+TEST(CodegenTest, ReturnDivIntLit8) {
+#endif
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 4 << 12 | 0 << 8,
+ Instruction::DIV_INT_LIT8, 3 << 8 | 0,
+ Instruction::RETURN);
+
+ TestCode(data, true, 1);
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 79638b3..a57bbdb 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -478,6 +478,7 @@
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
M(Div, BinaryOperation) \
+ M(DivZeroCheck, Instruction) \
M(DoubleConstant, Constant) \
M(Equal, Condition) \
M(Exit, Instruction) \
@@ -516,6 +517,7 @@
M(Sub, BinaryOperation) \
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
+ M(TypeConversion, Instruction) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1713,7 +1715,12 @@
HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right)
: HBinaryOperation(result_type, left, right) {}
- virtual int32_t Evaluate(int32_t x, int32_t y) const { return x / y; }
+ virtual int32_t Evaluate(int32_t x, int32_t y) const {
+ // Our graph structure ensures we never have 0 for `y` during constant folding.
+ DCHECK_NE(y, 0);
+ // Special case -1 to avoid getting a SIGFPE on x86.
+ return (y == -1) ? -x : x / y;
+ }
virtual int64_t Evaluate(int64_t x, int64_t y) const { return x / y; }
DECLARE_INSTRUCTION(Div);
@@ -1722,6 +1729,33 @@
DISALLOW_COPY_AND_ASSIGN(HDiv);
};
+class HDivZeroCheck : public HExpression<1> {
+ public:
+ HDivZeroCheck(HInstruction* value, uint32_t dex_pc)
+ : HExpression(value->GetType(), SideEffects::None()), dex_pc_(dex_pc) {
+ SetRawInputAt(0, value);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(DivZeroCheck);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
+};
+
// The value of a parameter in this method. Its location depends on
// the calling convention.
class HParameterValue : public HExpression<0> {
@@ -1761,6 +1795,28 @@
DISALLOW_COPY_AND_ASSIGN(HNot);
};
+class HTypeConversion : public HExpression<1> {
+ public:
+ // Instantiate a type conversion of `input` to `result_type`.
+ HTypeConversion(Primitive::Type result_type, HInstruction* input)
+ : HExpression(result_type, SideEffects::None()) {
+ SetRawInputAt(0, input);
+ DCHECK_NE(input->GetType(), result_type);
+ }
+
+ HInstruction* GetInput() const { return InputAt(0); }
+ Primitive::Type GetInputType() const { return GetInput()->GetType(); }
+ Primitive::Type GetResultType() const { return GetType(); }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+
+ DECLARE_INSTRUCTION(TypeConversion);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HTypeConversion);
+};
+
class HPhi : public HInstruction {
public:
HPhi(ArenaAllocator* arena, uint32_t reg_number, size_t number_of_inputs, Primitive::Type type)
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index c4db840..7186dbe 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -34,6 +34,10 @@
check->ReplaceWith(check->InputAt(0));
}
+void PrepareForRegisterAllocation::VisitDivZeroCheck(HDivZeroCheck* check) {
+ check->ReplaceWith(check->InputAt(0));
+}
+
void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
check->ReplaceWith(check->InputAt(0));
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 3e63ecb..0fdb65f 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -34,6 +34,7 @@
private:
virtual void VisitNullCheck(HNullCheck* check) OVERRIDE;
+ virtual void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
virtual void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
virtual void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
virtual void VisitCondition(HCondition* condition) OVERRIDE;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 2a9c885..0745f9c 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -257,7 +257,13 @@
//
// The backwards walking ensures the ranges are ordered on increasing start positions.
Location output = locations->Out();
- if (output.IsRegister() || output.IsFpuRegister()) {
+ if (output.IsUnallocated() && output.GetPolicy() == Location::kSameAsFirstInput) {
+ Location first = locations->InAt(0);
+ if (first.IsRegister() || first.IsFpuRegister()) {
+ current->SetFrom(position + 1);
+ current->SetRegister(first.reg());
+ }
+ } else if (output.IsRegister() || output.IsFpuRegister()) {
// Shift the interval's start by one to account for the blocked register.
current->SetFrom(position + 1);
current->SetRegister(output.reg());
@@ -1172,7 +1178,11 @@
if (location.IsUnallocated()) {
if (location.GetPolicy() == Location::kSameAsFirstInput) {
- locations->SetInAt(0, source);
+ if (locations->InAt(0).IsUnallocated()) {
+ locations->SetInAt(0, source);
+ } else {
+ DCHECK(locations->InAt(0).Equals(source));
+ }
}
locations->SetOut(source);
} else {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 6845dea..9b1a121 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -696,4 +696,45 @@
}
}
+static HGraph* BuildDiv(ArenaAllocator* allocator,
+ HInstruction** div) {
+ HGraph* graph = new (allocator) HGraph(allocator);
+ HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->SetEntryBlock(entry);
+ HInstruction* first = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* second = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ entry->AddInstruction(first);
+ entry->AddInstruction(second);
+
+ HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ graph->AddBlock(block);
+ entry->AddSuccessor(block);
+
+ *div = new (allocator) HDiv(Primitive::kPrimInt, first, second);
+ block->AddInstruction(*div);
+
+ block->AddInstruction(new (allocator) HExit());
+ return graph;
+}
+
+TEST(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HInstruction *div;
+
+ {
+ HGraph* graph = BuildDiv(&allocator, &div);
+ x86::CodeGeneratorX86 codegen(graph);
+ SsaLivenessAnalysis liveness(*graph, &codegen);
+ liveness.Analyze();
+
+ RegisterAllocator register_allocator(&allocator, &codegen, liveness);
+ register_allocator.AllocateRegisters();
+
+ // div on x86 requires its first input in eax and the output be the same as the first input.
+ ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
+ }
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 2e951dd..5b70658 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -351,6 +351,22 @@
}
+void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0x63);
+ EmitRegisterOperand(dst.LowBits(), src.LowBits());
+}
+
+
+void X86_64Assembler::movsxd(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0x63);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::movd(XmmRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 5b16f08..ed80e44 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -300,6 +300,9 @@
void movss(const Address& dst, XmmRegister src);
void movss(XmmRegister dst, XmmRegister src);
+ void movsxd(CpuRegister dst, CpuRegister src);
+ void movsxd(CpuRegister dst, const Address& src);
+
void movd(XmmRegister dst, CpuRegister src);
void movd(CpuRegister dst, XmmRegister src);
diff --git a/test/417-optimizing-arith-div/src/Main.java b/test/417-optimizing-arith-div/src/Main.java
index 535cafb..5825d24 100644
--- a/test/417-optimizing-arith-div/src/Main.java
+++ b/test/417-optimizing-arith-div/src/Main.java
@@ -18,6 +18,12 @@
// it does compile the method.
public class Main {
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
public static void expectEquals(float expected, float result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -60,15 +66,51 @@
}
}
+ public static void expectDivisionByZero(int value) {
+ try {
+ $opt$Div(value, 0);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$DivZero(value);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
public static void main(String[] args) {
div();
}
public static void div() {
+ divInt();
divFloat();
divDouble();
}
+ private static void divInt() {
+ expectEquals(2, $opt$DivLit(6));
+ expectEquals(2, $opt$Div(6, 3));
+ expectEquals(6, $opt$Div(6, 1));
+ expectEquals(-2, $opt$Div(6, -3));
+ expectEquals(1, $opt$Div(4, 3));
+ expectEquals(-1, $opt$Div(4, -3));
+ expectEquals(5, $opt$Div(23, 4));
+ expectEquals(-5, $opt$Div(-23, 4));
+
+ expectEquals(-Integer.MAX_VALUE, $opt$Div(Integer.MAX_VALUE, -1));
+ expectEquals(Integer.MIN_VALUE, $opt$Div(Integer.MIN_VALUE, -1)); // overflow
+ expectEquals(-1073741824, $opt$Div(Integer.MIN_VALUE, 2));
+
+ expectEquals(0, $opt$Div(0, Integer.MAX_VALUE));
+ expectEquals(0, $opt$Div(0, Integer.MIN_VALUE));
+
+ expectDivisionByZero(0);
+ expectDivisionByZero(1);
+ expectDivisionByZero(Integer.MAX_VALUE);
+ expectDivisionByZero(Integer.MIN_VALUE);
+ }
+
private static void divFloat() {
expectApproxEquals(1.6666666F, $opt$Div(5F, 3F));
expectApproxEquals(0F, $opt$Div(0F, 3F));
@@ -127,6 +169,19 @@
expectEquals(Float.NEGATIVE_INFINITY, $opt$Div(-Float.MAX_VALUE, Float.MIN_VALUE));
}
+ static int $opt$Div(int a, int b) {
+ return a / b;
+ }
+
+ static int $opt$DivZero(int a) {
+ return a / 0;
+ }
+
+ // Division by literals != 0 should not generate checks.
+ static int $opt$DivLit(int a) {
+ return a / 3;
+ }
+
static float $opt$Div(float a, float b) {
return a / b;
}
diff --git a/test/422-type-conversion/expected.txt b/test/422-type-conversion/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/422-type-conversion/expected.txt
diff --git a/test/422-type-conversion/info.txt b/test/422-type-conversion/info.txt
new file mode 100644
index 0000000..e734f32
--- /dev/null
+++ b/test/422-type-conversion/info.txt
@@ -0,0 +1 @@
+Tests for type conversions.
diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java
new file mode 100644
index 0000000..d2ffc5b
--- /dev/null
+++ b/test/422-type-conversion/src/Main.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void assertEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ byteToLong();
+ shortToLong();
+ intToLong();
+ }
+
+ private static void byteToLong() {
+ assertEquals(1L, $opt$ByteToLong((byte)1));
+ assertEquals(0L, $opt$ByteToLong((byte)0));
+ assertEquals(-1L, $opt$ByteToLong((byte)-1));
+ assertEquals(51L, $opt$ByteToLong((byte)51));
+ assertEquals(-51L, $opt$ByteToLong((byte)-51));
+ assertEquals(127L, $opt$ByteToLong((byte)127)); // (2^7) - 1
+ assertEquals(-127L, $opt$ByteToLong((byte)-127)); // -(2^7) - 1
+ assertEquals(-128L, $opt$ByteToLong((byte)-128)); // -(2^7)
+ }
+
+ private static void shortToLong() {
+ assertEquals(1L, $opt$ShortToLong((short)1));
+ assertEquals(0L, $opt$ShortToLong((short)0));
+ assertEquals(-1L, $opt$ShortToLong((short)-1));
+ assertEquals(51L, $opt$ShortToLong((short)51));
+ assertEquals(-51L, $opt$ShortToLong((short)-51));
+ assertEquals(32767L, $opt$ShortToLong((short)32767)); // (2^15) - 1
+ assertEquals(-32767L, $opt$ShortToLong((short)-32767)); // -(2^15) - 1
+ assertEquals(-32768L, $opt$ShortToLong((short)-32768)); // -(2^15)
+ }
+
+ private static void intToLong() {
+ assertEquals(1L, $opt$IntToLong(1));
+ assertEquals(0L, $opt$IntToLong(0));
+ assertEquals(-1L, $opt$IntToLong(-1));
+ assertEquals(51L, $opt$IntToLong(51));
+ assertEquals(-51L, $opt$IntToLong(-51));
+ assertEquals(2147483647L, $opt$IntToLong(2147483647)); // (2^31) - 1
+ assertEquals(-2147483647L, $opt$IntToLong(-2147483647)); // -(2^31) - 1
+ assertEquals(-2147483648L, $opt$IntToLong(-2147483648)); // -(2^31)
+ }
+
+ static long $opt$ByteToLong(byte a) {
+ // Translates to an int-to-long Dex instruction.
+ return a;
+ }
+
+ static long $opt$ShortToLong(short a) {
+ // Translates to an int-to-long Dex instruction.
+ return a;
+ }
+
+ static long $opt$IntToLong(int a) {
+ // Translates to an int-to-long Dex instruction.
+ return a;
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0a1e3e1..7d56271 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -308,6 +308,7 @@
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
001-HelloWorld \
+ 002-sleep \
003-omnibus-opcodes \
004-InterfaceTest \
004-JniTest \
@@ -406,9 +407,11 @@
418-const-string \
419-long-parameter \
420-const-class \
+ 422-type-conversion \
700-LoadArgRegs \
701-easy-div-rem \
702-LargeBranchOffset \
+ 703-floating-point-div \
800-smali
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))