summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc30
-rw-r--r--compiler/optimizing/code_generator.cc12
-rw-r--r--compiler/optimizing/code_generator.h17
-rw-r--r--compiler/optimizing/code_generator_arm.cc207
-rw-r--r--compiler/optimizing/code_generator_arm.h16
-rw-r--r--compiler/optimizing/code_generator_x86.cc253
-rw-r--r--compiler/optimizing/code_generator_x86.h16
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc177
-rw-r--r--compiler/optimizing/code_generator_x86_64.h12
-rw-r--r--compiler/optimizing/codegen_test.cc45
-rw-r--r--compiler/optimizing/constant_propagation_test.cc2
-rw-r--r--compiler/optimizing/dead_code_elimination.cc5
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/graph_checker.cc39
-rw-r--r--compiler/optimizing/graph_checker.h11
-rw-r--r--compiler/optimizing/graph_checker_test.cc12
-rw-r--r--compiler/optimizing/gvn.h3
-rw-r--r--compiler/optimizing/nodes.cc6
-rw-r--r--compiler/optimizing/nodes.h28
-rw-r--r--compiler/optimizing/register_allocator.h4
20 files changed, 707 insertions, 190 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 5bcc65b03b..2648d4d670 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -713,6 +713,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::MUL_INT: {
+ Binop_23x<HMul>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::MUL_LONG: {
+ Binop_23x<HMul>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
case Instruction::ADD_LONG_2ADDR: {
Binop_12x<HAdd>(instruction, Primitive::kPrimLong);
break;
@@ -738,6 +748,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::MUL_INT_2ADDR: {
+ Binop_12x<HMul>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::MUL_LONG_2ADDR: {
+ Binop_12x<HMul>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
case Instruction::ADD_INT_LIT16: {
Binop_22s<HAdd>(instruction, false);
break;
@@ -748,6 +768,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::MUL_INT_LIT16: {
+ Binop_22s<HMul>(instruction, false);
+ break;
+ }
+
case Instruction::ADD_INT_LIT8: {
Binop_22b<HAdd>(instruction, false);
break;
@@ -758,6 +783,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::MUL_INT_LIT8: {
+ Binop_22b<HMul>(instruction, false);
+ break;
+ }
+
case Instruction::NEW_INSTANCE: {
current_block_->AddInstruction(
new (arena_) HNewInstance(dex_offset, instruction.VRegB_21c()));
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 29dbd8b33d..408e13e36d 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -36,7 +36,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
- block_labels_.SetSize(blocks.Size());
+ Initialize();
DCHECK_EQ(frame_size_, kUninitializedFrameSize);
if (!is_leaf) {
@@ -54,7 +54,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
HBasicBlock* block = blocks.Get(i);
- Bind(GetLabelOf(block));
+ Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
current->Accept(location_builder);
@@ -76,13 +76,13 @@ void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
- block_labels_.SetSize(blocks.Size());
+ Initialize();
GenerateFrameEntry();
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
HBasicBlock* block = blocks.Get(i);
- Bind(GetLabelOf(block));
+ Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
current->Accept(instruction_visitor);
@@ -273,10 +273,6 @@ bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) con
return current->GetBlockId() + 1 == next->GetBlockId();
}
-Label* CodeGenerator::GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
-}
-
CodeGenerator* CodeGenerator::Create(ArenaAllocator* allocator,
HGraph* graph,
InstructionSet instruction_set) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 4eba791723..7aaf99108f 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -24,13 +24,13 @@
#include "memory_region.h"
#include "nodes.h"
#include "stack_map_stream.h"
-#include "utils/assembler.h"
namespace art {
static size_t constexpr kVRegSize = 4;
static size_t constexpr kUninitializedFrameSize = 0;
+class Assembler;
class CodeGenerator;
class DexCompilationUnit;
class SrcMap;
@@ -53,18 +53,12 @@ struct PcInfo {
class SlowPathCode : public ArenaObject {
public:
- SlowPathCode() : entry_label_(), exit_label_() {}
+ SlowPathCode() {}
virtual ~SlowPathCode() {}
- Label* GetEntryLabel() { return &entry_label_; }
- Label* GetExitLabel() { return &exit_label_; }
-
virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
private:
- Label entry_label_;
- Label exit_label_;
-
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
@@ -80,7 +74,6 @@ class CodeGenerator : public ArenaObject {
HGraph* GetGraph() const { return graph_; }
- Label* GetLabelOf(HBasicBlock* block) const;
bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
@@ -90,9 +83,10 @@ class CodeGenerator : public ArenaObject {
+ parameter->GetIndex() * kVRegSize;
}
+ virtual void Initialize() = 0;
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
- virtual void Bind(Label* label) = 0;
+ virtual void Bind(HBasicBlock* block) = 0;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
virtual HGraphVisitor* GetLocationBuilder() = 0;
virtual HGraphVisitor* GetInstructionVisitor() = 0;
@@ -167,7 +161,6 @@ class CodeGenerator : public ArenaObject {
number_of_fpu_registers_(number_of_fpu_registers),
number_of_register_pairs_(number_of_register_pairs),
graph_(graph),
- block_labels_(graph->GetArena(), 0),
pc_infos_(graph->GetArena(), 32),
slow_paths_(graph->GetArena(), 8),
is_leaf_(true),
@@ -205,8 +198,6 @@ class CodeGenerator : public ArenaObject {
HGraph* const graph_;
- // Labels for each block that will be compiled.
- GrowableArray<Label> block_labels_;
GrowableArray<PcInfo> pc_infos_;
GrowableArray<SlowPathCode*> slow_paths_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9be780216a..a2cf670b0f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -60,7 +60,21 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, DRegis
#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
-class NullCheckSlowPathARM : public SlowPathCode {
+class SlowPathCodeARM : public SlowPathCode {
+ public:
+ SlowPathCodeARM() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM);
+};
+
+class NullCheckSlowPathARM : public SlowPathCodeARM {
public:
explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
@@ -77,7 +91,7 @@ class NullCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
-class StackOverflowCheckSlowPathARM : public SlowPathCode {
+class StackOverflowCheckSlowPathARM : public SlowPathCodeARM {
public:
StackOverflowCheckSlowPathARM() {}
@@ -91,12 +105,13 @@ class StackOverflowCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM);
};
-class SuspendCheckSlowPathARM : public SlowPathCode {
+class SuspendCheckSlowPathARM : public SlowPathCodeARM {
public:
explicit SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pTestSuspend).Int32Value();
@@ -107,7 +122,7 @@ class SuspendCheckSlowPathARM : public SlowPathCode {
if (successor_ == nullptr) {
__ b(GetReturnLabel());
} else {
- __ b(codegen->GetLabelOf(successor_));
+ __ b(arm_codegen->GetLabelOf(successor_));
}
}
@@ -127,7 +142,7 @@ class SuspendCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
};
-class BoundsCheckSlowPathARM : public SlowPathCode {
+class BoundsCheckSlowPathARM : public SlowPathCodeARM {
public:
BoundsCheckSlowPathARM(HBoundsCheck* instruction,
Location index_location,
@@ -137,7 +152,7 @@ class BoundsCheckSlowPathARM : public SlowPathCode {
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorARM* arm_codegen = reinterpret_cast<CodeGeneratorARM*>(codegen);
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
@@ -205,6 +220,7 @@ void CodeGeneratorARM::RestoreCoreRegister(Location stack_location, uint32_t reg
CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
: CodeGenerator(graph, kNumberOfCoreRegisters, kNumberOfDRegisters, kNumberOfRegisterPairs),
+ block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
@@ -220,19 +236,12 @@ Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
ArmManagedRegister pair =
ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
+
blocked_core_registers_[pair.AsRegisterPairLow()] = true;
blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- // Block all other register pairs that share a register with `pair`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- ArmManagedRegister current =
- ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == pair.AsRegisterPairLow()
- || current.AsRegisterPairLow() == pair.AsRegisterPairHigh()
- || current.AsRegisterPairHigh() == pair.AsRegisterPairLow()
- || current.AsRegisterPairHigh() == pair.AsRegisterPairHigh()) {
- blocked_register_pairs_[i] = true;
- }
- }
+ UpdateBlockedPairRegisters();
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
}
@@ -278,7 +287,6 @@ void CodeGeneratorARM::SetupBlockedRegisters() const {
// Reserve R4 for suspend check.
blocked_core_registers_[R4] = true;
- blocked_register_pairs_[R4_R5] = true;
// Reserve thread register.
blocked_core_registers_[TR] = true;
@@ -302,6 +310,19 @@ void CodeGeneratorARM::SetupBlockedRegisters() const {
blocked_fpu_registers_[D13] = true;
blocked_fpu_registers_[D14] = true;
blocked_fpu_registers_[D15] = true;
+
+ UpdateBlockedPairRegisters();
+}
+
+void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ ArmManagedRegister current =
+ ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (blocked_core_registers_[current.AsRegisterPairLow()]
+ || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
}
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
@@ -313,7 +334,7 @@ void CodeGeneratorARM::GenerateFrameEntry() {
bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
if (!skip_overflow_check) {
if (kExplicitStackOverflowCheck) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM();
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM();
AddSlowPath(slow_path);
__ LoadFromOffset(kLoadWord, IP, TR, Thread::StackEndOffset<kArmWordSize>().Int32Value());
@@ -339,8 +360,8 @@ void CodeGeneratorARM::GenerateFrameExit() {
__ PopList(1 << PC | 1 << R6 | 1 << R7);
}
-void CodeGeneratorARM::Bind(Label* label) {
- __ Bind(label);
+void CodeGeneratorARM::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
}
Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
@@ -641,36 +662,51 @@ void LocationsBuilderARM::VisitIf(HIf* if_instr) {
void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
HInstruction* cond = if_instr->InputAt(0);
- if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
- // Condition has been materialized, compare the output to 0
- DCHECK(if_instr->GetLocations()->InAt(0).IsRegister());
- __ cmp(if_instr->GetLocations()->InAt(0).As<Register>(),
- ShifterOperand(0));
- __ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), NE);
+ if (cond->IsIntConstant()) {
+ // Constant condition, statically compared against 1.
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfTrueSuccessor())) {
+ __ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ }
+ return;
+ } else {
+ DCHECK_EQ(cond_value, 0);
+ }
} else {
- // Condition has not been materialized, use its inputs as the comparison and its
- // condition as the branch condition.
- LocationSummary* locations = cond->GetLocations();
- if (locations->InAt(1).IsRegister()) {
- __ cmp(locations->InAt(0).As<Register>(),
- ShifterOperand(locations->InAt(1).As<Register>()));
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ // Condition has been materialized, compare the output to 0
+ DCHECK(if_instr->GetLocations()->InAt(0).IsRegister());
+ __ cmp(if_instr->GetLocations()->InAt(0).As<Register>(),
+ ShifterOperand(0));
+ __ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), NE);
} else {
- DCHECK(locations->InAt(1).IsConstant());
- int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
- ShifterOperand operand;
- if (ShifterOperand::CanHoldArm(value, &operand)) {
- __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
+ // Condition has not been materialized, use its inputs as the
+ // comparison and its condition as the branch condition.
+ LocationSummary* locations = cond->GetLocations();
+ if (locations->InAt(1).IsRegister()) {
+ __ cmp(locations->InAt(0).As<Register>(),
+ ShifterOperand(locations->InAt(1).As<Register>()));
} else {
- Register temp = IP;
- __ LoadImmediate(temp, value);
- __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
+ DCHECK(locations->InAt(1).IsConstant());
+ int32_t value =
+ locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ ShifterOperand operand;
+ if (ShifterOperand::CanHoldArm(value, &operand)) {
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
+ } else {
+ Register temp = IP;
+ __ LoadImmediate(temp, value);
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
+ }
}
+ __ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()),
+ ARMCondition(cond->AsCondition()->GetCondition()));
}
- __ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()),
- ARMCondition(cond->AsCondition()->GetCondition()));
}
-
- if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfFalseSuccessor())) {
__ b(codegen_->GetLabelOf(if_instr->IfFalseSuccessor()));
}
}
@@ -810,6 +846,7 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
}
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
+ // Will be generated at use site.
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -1107,6 +1144,82 @@ void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
}
}
+void LocationsBuilderARM::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
+ locations->SetInAt(1, Location::RequiresRegister(), Location::kDiesAtEntry);
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
+ LocationSummary* locations = mul->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt: {
+ __ mul(out.As<Register>(), first.As<Register>(), second.As<Register>());
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register out_hi = out.AsRegisterPairHigh<Register>();
+ Register out_lo = out.AsRegisterPairLow<Register>();
+ Register in1_hi = first.AsRegisterPairHigh<Register>();
+ Register in1_lo = first.AsRegisterPairLow<Register>();
+ Register in2_hi = second.AsRegisterPairHigh<Register>();
+ Register in2_lo = second.AsRegisterPairLow<Register>();
+
+ // Extra checks to protect caused by the existence of R1_R2.
+ // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
+ // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
+ DCHECK_NE(out_hi, in1_lo);
+ DCHECK_NE(out_hi, in2_lo);
+
+ // input: in1 - 64 bits, in2 - 64 bits
+ // output: out
+ // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
+ // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
+ // parts: out.lo = (in1.lo * in2.lo)[31:0]
+
+ // IP <- in1.lo * in2.hi
+ __ mul(IP, in1_lo, in2_hi);
+ // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
+ __ mla(out_hi, in1_hi, in2_lo, IP);
+ // out.lo <- (in1.lo * in2.lo)[31:0];
+ __ umull(out_lo, IP, in1_lo, in2_lo);
+ // out.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
+ __ add(out_hi, out_hi, ShifterOperand(IP));
+ break;
+ }
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ }
+}
+
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -1349,7 +1462,7 @@ void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
}
void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -1595,7 +1708,7 @@ void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 874db0fd54..57b289c801 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -140,7 +140,7 @@ class CodeGeneratorARM : public CodeGenerator {
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(Label* label) OVERRIDE;
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
@@ -164,6 +164,7 @@ class CodeGeneratorARM : public CodeGenerator {
}
virtual void SetupBlockedRegisters() const OVERRIDE;
+
virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
@@ -171,6 +172,9 @@ class CodeGeneratorARM : public CodeGenerator {
virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ // Blocks all register pairs made out of blocked core registers.
+ void UpdateBlockedPairRegisters() const;
+
ParallelMoveResolverARM* GetMoveResolver() {
return &move_resolver_;
}
@@ -187,7 +191,17 @@ class CodeGeneratorARM : public CodeGenerator {
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
+ Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_.GetRawStorage() + block->GetBlockId();
+ }
+
+ virtual void Initialize() OVERRIDE {
+ block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ }
+
private:
+ // Labels for each block that will be compiled.
+ GrowableArray<Label> block_labels_;
LocationsBuilderARM location_builder_;
InstructionCodeGeneratorARM instruction_visitor_;
ParallelMoveResolverARM move_resolver_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 34fa46efd0..041acdf91e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -56,7 +56,21 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmReg
#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())->
-class NullCheckSlowPathX86 : public SlowPathCode {
+class SlowPathCodeX86 : public SlowPathCode {
+ public:
+ SlowPathCodeX86() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86);
+};
+
+class NullCheckSlowPathX86 : public SlowPathCodeX86 {
public:
explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
@@ -71,7 +85,7 @@ class NullCheckSlowPathX86 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
};
-class StackOverflowCheckSlowPathX86 : public SlowPathCode {
+class StackOverflowCheckSlowPathX86 : public SlowPathCodeX86 {
public:
StackOverflowCheckSlowPathX86() {}
@@ -86,7 +100,7 @@ class StackOverflowCheckSlowPathX86 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86);
};
-class BoundsCheckSlowPathX86 : public SlowPathCode {
+class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
public:
BoundsCheckSlowPathX86(HBoundsCheck* instruction,
Location index_location,
@@ -94,7 +108,7 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
: instruction_(instruction), index_location_(index_location), length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86* x86_codegen = reinterpret_cast<CodeGeneratorX86*>(codegen);
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
@@ -111,12 +125,13 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
};
-class SuspendCheckSlowPathX86 : public SlowPathCode {
+class SuspendCheckSlowPathX86 : public SlowPathCodeX86 {
public:
explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
@@ -125,7 +140,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
- __ jmp(codegen->GetLabelOf(successor_));
+ __ jmp(x86_codegen->GetLabelOf(successor_));
}
}
@@ -177,6 +192,7 @@ void CodeGeneratorX86::RestoreCoreRegister(Location stack_location, uint32_t reg
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
: CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters, kNumberOfRegisterPairs),
+ block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
@@ -191,19 +207,11 @@ Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type) const {
size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
X86ManagedRegister pair =
X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
blocked_core_registers_[pair.AsRegisterPairLow()] = true;
blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- // Block all other register pairs that share a register with `pair`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- X86ManagedRegister current =
- X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == pair.AsRegisterPairLow()
- || current.AsRegisterPairLow() == pair.AsRegisterPairHigh()
- || current.AsRegisterPairHigh() == pair.AsRegisterPairLow()
- || current.AsRegisterPairHigh() == pair.AsRegisterPairHigh()) {
- blocked_register_pairs_[i] = true;
- }
- }
+ UpdateBlockedPairRegisters();
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
}
@@ -250,10 +258,19 @@ void CodeGeneratorX86::SetupBlockedRegisters() const {
blocked_core_registers_[EBP] = true;
blocked_core_registers_[ESI] = true;
blocked_core_registers_[EDI] = true;
- blocked_register_pairs_[EAX_EDI] = true;
- blocked_register_pairs_[EDX_EDI] = true;
- blocked_register_pairs_[ECX_EDI] = true;
- blocked_register_pairs_[EBX_EDI] = true;
+
+ UpdateBlockedPairRegisters();
+}
+
+void CodeGeneratorX86::UpdateBlockedPairRegisters() const {
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ X86ManagedRegister current =
+ X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (blocked_core_registers_[current.AsRegisterPairLow()]
+ || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
}
InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen)
@@ -276,7 +293,7 @@ void CodeGeneratorX86::GenerateFrameEntry() {
__ subl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
if (!skip_overflow_check && kExplicitStackOverflowCheck) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86();
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86();
AddSlowPath(slow_path);
__ fs()->cmpl(ESP, Address::Absolute(Thread::StackEndOffset<kX86WordSize>()));
@@ -290,8 +307,8 @@ void CodeGeneratorX86::GenerateFrameExit() {
__ addl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
}
-void CodeGeneratorX86::Bind(Label* label) {
- __ Bind(label);
+void CodeGeneratorX86::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
}
void InstructionCodeGeneratorX86::LoadCurrentMethod(Register reg) {
@@ -577,42 +594,60 @@ void LocationsBuilderX86::VisitIf(HIf* if_instr) {
void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
HInstruction* cond = if_instr->InputAt(0);
- bool materialized = !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
- // Moves do not affect the eflags register, so if the condition is evaluated
- // just before the if, we don't need to evaluate it again.
- bool eflags_set = cond->IsCondition()
- && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr);
- if (materialized) {
- if (!eflags_set) {
- // Materialized condition, compare against 0.
- Location lhs = if_instr->GetLocations()->InAt(0);
- if (lhs.IsRegister()) {
- __ cmpl(lhs.As<Register>(), Immediate(0));
- } else {
- __ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
+ if (cond->IsIntConstant()) {
+ // Constant condition, statically compared against 1.
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfTrueSuccessor())) {
+ __ jmp(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ return;
} else {
- __ j(X86Condition(cond->AsCondition()->GetCondition()),
- codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ DCHECK_EQ(cond_value, 0);
}
} else {
- Location lhs = cond->GetLocations()->InAt(0);
- Location rhs = cond->GetLocations()->InAt(1);
- // LHS is guaranteed to be in a register (see LocationsBuilderX86::VisitCondition).
- if (rhs.IsRegister()) {
- __ cmpl(lhs.As<Register>(), rhs.As<Register>());
- } else if (rhs.IsConstant()) {
- HIntConstant* instruction = rhs.GetConstant()->AsIntConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(lhs.As<Register>(), imm);
+ bool materialized =
+ !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
+ // Moves do not affect the eflags register, so if the condition is
+ // evaluated just before the if, we don't need to evaluate it
+ // again.
+ bool eflags_set = cond->IsCondition()
+ && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr);
+ if (materialized) {
+ if (!eflags_set) {
+ // Materialized condition, compare against 0.
+ Location lhs = if_instr->GetLocations()->InAt(0);
+ if (lhs.IsRegister()) {
+ __ cmpl(lhs.As<Register>(), Immediate(0));
+ } else {
+ __ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
+ }
+ __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ } else {
+ __ j(X86Condition(cond->AsCondition()->GetCondition()),
+ codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ }
} else {
- __ cmpl(lhs.As<Register>(), Address(ESP, rhs.GetStackIndex()));
+ Location lhs = cond->GetLocations()->InAt(0);
+ Location rhs = cond->GetLocations()->InAt(1);
+ // LHS is guaranteed to be in a register (see
+ // LocationsBuilderX86::VisitCondition).
+ if (rhs.IsRegister()) {
+ __ cmpl(lhs.As<Register>(), rhs.As<Register>());
+ } else if (rhs.IsConstant()) {
+ HIntConstant* instruction = rhs.GetConstant()->AsIntConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ __ cmpl(lhs.As<Register>(), imm);
+ } else {
+ __ cmpl(lhs.As<Register>(), Address(ESP, rhs.GetStackIndex()));
+ }
+ __ j(X86Condition(cond->AsCondition()->GetCondition()),
+ codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- __ j(X86Condition(cond->AsCondition()->GetCondition()),
- codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfFalseSuccessor())) {
__ jmp(codegen_->GetLabelOf(if_instr->IfFalseSuccessor()));
}
}
@@ -747,6 +782,7 @@ void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
}
void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
+ // Will be generated at use site.
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -1083,6 +1119,113 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
}
}
+void LocationsBuilderX86::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ // TODO: Currently this handles only stack operands:
+ // - we don't have enough registers because we currently use Quick ABI.
+ // - by the time we have a working register allocator we will probably change the ABI
+ // and fix the above.
+ // - we don't have a way yet to request operands on stack but the base line compiler
+ // will leave the operands on the stack with Any().
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Needed for imul on 32bits with 64bits output.
+ locations->AddTemp(Location::RegisterLocation(EAX));
+ locations->AddTemp(Location::RegisterLocation(EDX));
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
+ LocationSummary* locations = mul->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (second.IsRegister()) {
+ __ imull(first.As<Register>(), second.As<Register>());
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ __ imull(first.As<Register>(), imm);
+ } else {
+ DCHECK(second.IsStackSlot());
+ __ imull(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ DCHECK(second.IsDoubleStackSlot());
+
+ Register in1_hi = first.AsRegisterPairHigh<Register>();
+ Register in1_lo = first.AsRegisterPairLow<Register>();
+ Address in2_hi(ESP, second.GetHighStackIndex(kX86WordSize));
+ Address in2_lo(ESP, second.GetStackIndex());
+ Register eax = locations->GetTemp(0).As<Register>();
+ Register edx = locations->GetTemp(1).As<Register>();
+
+ DCHECK_EQ(EAX, eax);
+ DCHECK_EQ(EDX, edx);
+
+ // input: in1 - 64 bits, in2 - 64 bits
+ // output: in1
+ // formula: in1.hi : in1.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
+ // parts: in1.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
+ // parts: in1.lo = (in1.lo * in2.lo)[31:0]
+
+ __ movl(eax, in2_hi);
+ // eax <- in1.lo * in2.hi
+ __ imull(eax, in1_lo);
+ // in1.hi <- in1.hi * in2.lo
+ __ imull(in1_hi, in2_lo);
+ // in1.hi <- in1.lo * in2.hi + in1.hi * in2.lo
+ __ addl(in1_hi, eax);
+ // move in1_lo to eax to prepare for double precision
+ __ movl(eax, in1_lo);
+ // edx:eax <- in1.lo * in2.lo
+ __ mull(in2_lo);
+ // in1.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
+ __ addl(in1_hi, edx);
+ // in1.lo <- (in1.lo * in2.lo)[31:0];
+ __ movl(in1_lo, eax);
+
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ }
+}
+
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -1356,7 +1499,7 @@ void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
}
void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction);
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -1658,7 +1801,7 @@ void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index a1a72a2bd7..db8b9abd91 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -142,7 +142,7 @@ class CodeGeneratorX86 : public CodeGenerator {
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(Label* label) OVERRIDE;
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
@@ -166,6 +166,7 @@ class CodeGeneratorX86 : public CodeGenerator {
}
virtual void SetupBlockedRegisters() const OVERRIDE;
+
virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
@@ -173,6 +174,9 @@ class CodeGeneratorX86 : public CodeGenerator {
virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ // Blocks all register pairs made out of blocked core registers.
+ void UpdateBlockedPairRegisters() const;
+
ParallelMoveResolverX86* GetMoveResolver() {
return &move_resolver_;
}
@@ -189,7 +193,17 @@ class CodeGeneratorX86 : public CodeGenerator {
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
+ Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_.GetRawStorage() + block->GetBlockId();
+ }
+
+ virtual void Initialize() OVERRIDE {
+ block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ }
+
private:
+ // Labels for each block that will be compiled.
+ GrowableArray<Label> block_labels_;
LocationsBuilderX86 location_builder_;
InstructionCodeGeneratorX86 instruction_visitor_;
ParallelMoveResolverX86 move_resolver_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 059140d9bf..5fa930512b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -60,7 +60,21 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatR
#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())->
-class NullCheckSlowPathX86_64 : public SlowPathCode {
+class SlowPathCodeX86_64 : public SlowPathCode {
+ public:
+ SlowPathCodeX86_64() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86_64);
+};
+
+class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
@@ -76,7 +90,7 @@ class NullCheckSlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
};
-class StackOverflowCheckSlowPathX86_64 : public SlowPathCode {
+class StackOverflowCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
StackOverflowCheckSlowPathX86_64() {}
@@ -92,12 +106,13 @@ class StackOverflowCheckSlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86_64);
};
-class SuspendCheckSlowPathX86_64 : public SlowPathCode {
+class SuspendCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
explicit SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
@@ -106,7 +121,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
- __ jmp(codegen->GetLabelOf(successor_));
+ __ jmp(x64_codegen->GetLabelOf(successor_));
}
}
@@ -123,7 +138,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86_64);
};
-class BoundsCheckSlowPathX86_64 : public SlowPathCode {
+class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
BoundsCheckSlowPathX86_64(HBoundsCheck* instruction,
Location index_location,
@@ -133,7 +148,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = reinterpret_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
@@ -186,6 +201,7 @@ void CodeGeneratorX86_64::RestoreCoreRegister(Location stack_location, uint32_t
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph)
: CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfFloatRegisters, 0),
+ block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
@@ -266,7 +282,7 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
if (!skip_overflow_check && kExplicitStackOverflowCheck) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
AddSlowPath(slow_path);
__ gs()->cmpq(CpuRegister(RSP),
@@ -282,8 +298,8 @@ void CodeGeneratorX86_64::GenerateFrameExit() {
Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
}
-void CodeGeneratorX86_64::Bind(Label* label) {
- __ Bind(label);
+void CodeGeneratorX86_64::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
}
void InstructionCodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
@@ -479,40 +495,59 @@ void LocationsBuilderX86_64::VisitIf(HIf* if_instr) {
void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
HInstruction* cond = if_instr->InputAt(0);
- bool materialized = !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
- // Moves do not affect the eflags register, so if the condition is evaluated
- // just before the if, we don't need to evaluate it again.
- bool eflags_set = cond->IsCondition()
- && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr);
- if (materialized) {
- if (!eflags_set) {
- // Materialized condition, compare against 0.
- Location lhs = if_instr->GetLocations()->InAt(0);
- if (lhs.IsRegister()) {
- __ cmpl(lhs.As<CpuRegister>(), Immediate(0));
- } else {
- __ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0));
+ if (cond->IsIntConstant()) {
+ // Constant condition, statically compared against 1.
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfTrueSuccessor())) {
+ __ jmp(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ return;
} else {
- __ j(X86_64Condition(cond->AsCondition()->GetCondition()),
- codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ DCHECK_EQ(cond_value, 0);
}
} else {
- Location lhs = cond->GetLocations()->InAt(0);
- Location rhs = cond->GetLocations()->InAt(1);
- if (rhs.IsRegister()) {
- __ cmpl(lhs.As<CpuRegister>(), rhs.As<CpuRegister>());
- } else if (rhs.IsConstant()) {
- __ cmpl(lhs.As<CpuRegister>(),
- Immediate(rhs.GetConstant()->AsIntConstant()->GetValue()));
+ bool materialized =
+ !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
+ // Moves do not affect the eflags register, so if the condition is
+ // evaluated just before the if, we don't need to evaluate it
+ // again.
+ bool eflags_set = cond->IsCondition()
+ && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr);
+ if (materialized) {
+ if (!eflags_set) {
+ // Materialized condition, compare against 0.
+ Location lhs = if_instr->GetLocations()->InAt(0);
+ if (lhs.IsRegister()) {
+ __ cmpl(lhs.As<CpuRegister>(), Immediate(0));
+ } else {
+ __ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()),
+ Immediate(0));
+ }
+ __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ } else {
+ __ j(X86_64Condition(cond->AsCondition()->GetCondition()),
+ codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ }
} else {
- __ cmpl(lhs.As<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ Location lhs = cond->GetLocations()->InAt(0);
+ Location rhs = cond->GetLocations()->InAt(1);
+ if (rhs.IsRegister()) {
+ __ cmpl(lhs.As<CpuRegister>(), rhs.As<CpuRegister>());
+ } else if (rhs.IsConstant()) {
+ __ cmpl(lhs.As<CpuRegister>(),
+ Immediate(rhs.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(lhs.As<CpuRegister>(),
+ Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ }
+ __ j(X86_64Condition(cond->AsCondition()->GetCondition()),
+ codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- __ j(X86_64Condition(cond->AsCondition()->GetCondition()),
- codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfFalseSuccessor())) {
__ jmp(codegen_->GetLabelOf(if_instr->IfFalseSuccessor()));
}
}
@@ -679,6 +714,7 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
}
void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
+ // Will be generated at use site.
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -688,6 +724,7 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
}
void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
+ // Will be generated at use site.
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -1043,6 +1080,70 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
}
}
+void LocationsBuilderX86_64::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ }
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) {
+ LocationSummary* locations = mul->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (second.IsRegister()) {
+ __ imull(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ __ imull(first.As<CpuRegister>(), imm);
+ } else {
+ DCHECK(second.IsStackSlot());
+ __ imull(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ __ imulq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ }
+}
+
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -1233,7 +1334,7 @@ void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction);
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -1505,7 +1606,7 @@ void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 288f3f61f9..5ac0189b55 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -144,7 +144,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(Label* label) OVERRIDE;
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
@@ -188,7 +188,17 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Helper method to move a value between two locations.
void Move(Location destination, Location source);
+ Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_.GetRawStorage() + block->GetBlockId();
+ }
+
+ virtual void Initialize() OVERRIDE {
+ block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ }
+
private:
+ // Labels for each block that will be compiled.
+ GrowableArray<Label> block_labels_;
LocationsBuilderX86_64 location_builder_;
InstructionCodeGeneratorX86_64 instruction_visitor_;
ParallelMoveResolverX86_64 move_resolver_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 3037f1c2e8..8bb12de387 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -349,4 +349,49 @@ TEST(CodegenTest, NonMaterializedCondition) {
RunCodeOptimized(graph, hook_before_codegen, true, 0);
}
+#define MUL_TEST(TYPE, TEST_NAME) \
+ TEST(CodegenTest, Return ## TEST_NAME) { \
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
+ Instruction::CONST_4 | 3 << 12 | 0, \
+ Instruction::CONST_4 | 4 << 12 | 1 << 8, \
+ Instruction::MUL_ ## TYPE, 1 << 8 | 0, \
+ Instruction::RETURN); \
+ \
+ TestCode(data, true, 12); \
+ } \
+ \
+ TEST(CodegenTest, Return ## TEST_NAME ## 2addr) { \
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
+ Instruction::CONST_4 | 3 << 12 | 0, \
+ Instruction::CONST_4 | 4 << 12 | 1 << 8, \
+ Instruction::MUL_ ## TYPE ## _2ADDR | 1 << 12, \
+ Instruction::RETURN); \
+ \
+ TestCode(data, true, 12); \
+ }
+
+MUL_TEST(INT, MulInt);
+MUL_TEST(LONG, MulLong);
+// MUL_TEST(FLOAT, Float);
+// MUL_TEST(DOUBLE, Double);
+
+TEST(CodegenTest, ReturnMulIntLit8) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 4 << 12 | 0 << 8,
+ Instruction::MUL_INT_LIT8, 3 << 8 | 0,
+ Instruction::RETURN);
+
+ TestCode(data, true, 12);
+}
+
+TEST(CodegenTest, ReturnMulIntLit16) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 4 << 12 | 0 << 8,
+ Instruction::MUL_INT_LIT16, 3,
+ Instruction::RETURN);
+
+ TestCode(data, true, 12);
+}
+
+
} // namespace art
diff --git a/compiler/optimizing/constant_propagation_test.cc b/compiler/optimizing/constant_propagation_test.cc
index 342777a49c..ff44805ed2 100644
--- a/compiler/optimizing/constant_propagation_test.cc
+++ b/compiler/optimizing/constant_propagation_test.cc
@@ -62,7 +62,7 @@ static void TestCode(const uint16_t* data,
ASSERT_EQ(expected_after_dce, actual_after_dce);
SSAChecker ssa_checker(&allocator, graph);
- ssa_checker.VisitInsertionOrder();
+ ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
}
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index fe2adc77d0..5655544427 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -35,7 +35,10 @@ void DeadCodeElimination::Run() {
for (i.Advance(); !i.Done(); i.Advance()) {
HInstruction* inst = i.Current();
DCHECK(!inst->IsControlFlow());
- if (!inst->HasSideEffects() && !inst->HasUses() && !inst->IsSuspendCheck()) {
+ if (!inst->HasSideEffects()
+ && !inst->CanThrow()
+ && !inst->IsSuspendCheck()
+ && !inst->HasUses()) {
block->RemoveInstruction(inst);
}
}
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 245bcb21d5..3e0ba3aee3 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -47,7 +47,7 @@ static void TestCode(const uint16_t* data,
ASSERT_EQ(actual_after, expected_after);
SSAChecker ssa_checker(&allocator, graph);
- ssa_checker.VisitInsertionOrder();
+ ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 589b44a167..9f4029785a 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -264,21 +264,38 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
void SSAChecker::VisitInstruction(HInstruction* instruction) {
super_type::VisitInstruction(instruction);
- // Ensure an instruction dominates all its uses (or in the present
- // case, that all uses of an instruction (used as input) are
- // dominated by its definition).
- for (HInputIterator input_it(instruction); !input_it.Done();
- input_it.Advance()) {
- HInstruction* input = input_it.Current();
- if (!input->Dominates(instruction)) {
+ // Ensure an instruction dominates all its uses.
+ for (HUseIterator<HInstruction> use_it(instruction->GetUses());
+ !use_it.Done(); use_it.Advance()) {
+ HInstruction* use = use_it.Current()->GetUser();
+ if (!use->IsPhi() && !instruction->Dominates(use)) {
std::stringstream error;
- error << "Instruction " << input->GetId()
- << " in block " << input->GetBlock()->GetBlockId()
- << " does not dominate use " << instruction->GetId()
- << " in block " << current_block_->GetBlockId() << ".";
+ error << "Instruction " << instruction->GetId()
+ << " in block " << current_block_->GetBlockId()
+ << " does not dominate use " << use->GetId()
+ << " in block " << use->GetBlock()->GetBlockId() << ".";
errors_.Insert(error.str());
}
}
+
+ // Ensure an instruction having an environment is dominated by the
+ // instructions contained in the environment.
+ HEnvironment* environment = instruction->GetEnvironment();
+ if (environment != nullptr) {
+ for (size_t i = 0, e = environment->Size(); i < e; ++i) {
+ HInstruction* env_instruction = environment->GetInstructionAt(i);
+ if (env_instruction != nullptr
+ && !env_instruction->Dominates(instruction)) {
+ std::stringstream error;
+ error << "Instruction " << env_instruction->GetId()
+ << " in environment of instruction " << instruction->GetId()
+ << " from block " << current_block_->GetBlockId()
+ << " does not dominate instruction " << instruction->GetId()
+ << ".";
+ errors_.Insert(error.str());
+ }
+ }
+ }
}
void SSAChecker::VisitPhi(HPhi* phi) {
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 34a770b5f3..862f1b600b 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -29,6 +29,9 @@ class GraphChecker : public HGraphVisitor {
allocator_(allocator),
errors_(allocator, 0) {}
+ // Check the whole graph (in insertion order).
+ virtual void Run() { VisitInsertionOrder(); }
+
// Check `block`.
virtual void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
@@ -65,6 +68,14 @@ class SSAChecker : public GraphChecker {
SSAChecker(ArenaAllocator* allocator, HGraph* graph)
: GraphChecker(allocator, graph) {}
+ // Check the whole graph (in reverse post-order).
+ virtual void Run() {
+ // VisitReversePostOrder is used instead of VisitInsertionOrder,
+ // as the latter might visit dead blocks removed by the dominator
+ // computation.
+ VisitReversePostOrder();
+ }
+
// Perform SSA form checks on `block`.
virtual void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
// Loop-related checks from block `loop_header`.
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index ea0692088d..39def82007 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -51,7 +51,7 @@ static void TestCode(const uint16_t* data) {
ASSERT_NE(graph, nullptr);
GraphChecker graph_checker(&allocator, graph);
- graph_checker.VisitInsertionOrder();
+ graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
}
@@ -65,7 +65,7 @@ static void TestCodeSSA(const uint16_t* data) {
graph->TransformToSSA();
SSAChecker ssa_checker(&allocator, graph);
- ssa_checker.VisitInsertionOrder();
+ ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
}
@@ -113,13 +113,13 @@ TEST(GraphChecker, InconsistentPredecessorsAndSuccessors) {
HGraph* graph = CreateSimpleCFG(&allocator);
GraphChecker graph_checker(&allocator, graph);
- graph_checker.VisitInsertionOrder();
+ graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
// Remove the entry block from the exit block's predecessors, to create an
// inconsistent successor/predecessor relation.
graph->GetExitBlock()->RemovePredecessor(graph->GetEntryBlock());
- graph_checker.VisitInsertionOrder();
+ graph_checker.Run();
ASSERT_FALSE(graph_checker.IsValid());
}
@@ -131,7 +131,7 @@ TEST(GraphChecker, BlockEndingWithNonBranchInstruction) {
HGraph* graph = CreateSimpleCFG(&allocator);
GraphChecker graph_checker(&allocator, graph);
- graph_checker.VisitInsertionOrder();
+ graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
// Remove the sole instruction of the exit block (composed of a
@@ -141,7 +141,7 @@ TEST(GraphChecker, BlockEndingWithNonBranchInstruction) {
HInstruction* last_inst = exit_block->GetLastInstruction();
exit_block->RemoveInstruction(last_inst);
- graph_checker.VisitInsertionOrder();
+ graph_checker.Run();
ASSERT_FALSE(graph_checker.IsValid());
}
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 41b3ceb509..a98d714476 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_GVN_H_
#define ART_COMPILER_OPTIMIZING_GVN_H_
-#include <gtest/gtest.h>
#include "nodes.h"
namespace art {
@@ -221,7 +220,7 @@ class GlobalValueNumberer : public ValueObject {
// Mark visisted blocks. Only used for debugging.
GrowableArray<bool> visited_;
- FRIEND_TEST(GVNTest, LoopSideEffects);
+ ART_FRIEND_TEST(GVNTest, LoopSideEffects);
DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
};
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index a058dea6b4..aee21770b7 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -553,6 +553,12 @@ void HGraphVisitor::VisitInsertionOrder() {
}
}
+void HGraphVisitor::VisitReversePostOrder() {
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ VisitBasicBlock(it.Current());
+ }
+}
+
void HGraphVisitor::VisitBasicBlock(HBasicBlock* block) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
it.Current()->Accept(this);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 677a4f8591..ec26c4a4dc 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -502,11 +502,12 @@ class HBasicBlock : public ArenaObject {
M(NullCheck, Instruction) \
M(Temporary, Instruction) \
M(SuspendCheck, Instruction) \
+ M(Mul, BinaryOperation) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Constant, Instruction) \
- M(BinaryOperation, Instruction) \
+ M(BinaryOperation, Instruction) \
M(Invoke, Instruction)
#define FORWARD_DECLARATION(type, super) class H##type;
@@ -650,6 +651,7 @@ class HInstruction : public ArenaObject {
virtual bool NeedsEnvironment() const { return false; }
virtual bool IsControlFlow() const { return false; }
+ virtual bool CanThrow() const { return false; }
bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
void AddUseAt(HInstruction* user, size_t index) {
@@ -1555,6 +1557,22 @@ class HSub : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HSub);
};
+class HMul : public HBinaryOperation {
+ public:
+ HMul(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ virtual bool IsCommutative() { return true; }
+
+ virtual int32_t Evaluate(int32_t x, int32_t y) const { return x * y; }
+ virtual int64_t Evaluate(int64_t x, int64_t y) const { return x * y; }
+
+ DECLARE_INSTRUCTION(Mul);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HMul);
+};
+
// The value of a parameter in this method. Its location depends on
// the calling convention.
class HParameterValue : public HExpression<0> {
@@ -1642,6 +1660,8 @@ class HNullCheck : public HExpression<1> {
virtual bool NeedsEnvironment() const { return true; }
+ virtual bool CanThrow() const { return true; }
+
uint32_t GetDexPc() const { return dex_pc_; }
DECLARE_INSTRUCTION(NullCheck);
@@ -1802,6 +1822,8 @@ class HBoundsCheck : public HExpression<2> {
virtual bool NeedsEnvironment() const { return true; }
+ virtual bool CanThrow() const { return true; }
+
uint32_t GetDexPc() const { return dex_pc_; }
DECLARE_INSTRUCTION(BoundsCheck);
@@ -1956,8 +1978,12 @@ class HGraphVisitor : public ValueObject {
virtual void VisitInstruction(HInstruction* instruction) {}
virtual void VisitBasicBlock(HBasicBlock* block);
+ // Visit the graph following basic block insertion order.
void VisitInsertionOrder();
+ // Visit the graph following dominator tree reverse post-order.
+ void VisitReversePostOrder();
+
HGraph* GetGraph() const { return graph_; }
// Visit functions for instruction classes.
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index d4c233a7f8..0c3a9b3818 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -21,8 +21,6 @@
#include "primitive.h"
#include "utils/growable_array.h"
-#include "gtest/gtest.h"
-
namespace art {
class CodeGenerator;
@@ -189,7 +187,7 @@ class RegisterAllocator {
// The maximum live registers at safepoints.
size_t maximum_number_of_live_registers_;
- FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
+ ART_FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};