Don't use assembler classes in code_generator.h.
The arm64 backend uses its own assembler and does not share
the same classes as the other backends. To avoid conflicts
or unnecessary mappings, just don't use those classes in the
shared part of the code generator.
Change-Id: I9e5fa40c1021d2e83a4ef14c52cd1ccd03f2f73d
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 29dbd8b..408e13e 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -36,7 +36,7 @@
const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
- block_labels_.SetSize(blocks.Size());
+ Initialize();
DCHECK_EQ(frame_size_, kUninitializedFrameSize);
if (!is_leaf) {
@@ -54,7 +54,7 @@
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
HBasicBlock* block = blocks.Get(i);
- Bind(GetLabelOf(block));
+ Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
current->Accept(location_builder);
@@ -76,13 +76,13 @@
const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
- block_labels_.SetSize(blocks.Size());
+ Initialize();
GenerateFrameEntry();
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
HBasicBlock* block = blocks.Get(i);
- Bind(GetLabelOf(block));
+ Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
current->Accept(instruction_visitor);
@@ -273,10 +273,6 @@
return current->GetBlockId() + 1 == next->GetBlockId();
}
-Label* CodeGenerator::GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
-}
-
CodeGenerator* CodeGenerator::Create(ArenaAllocator* allocator,
HGraph* graph,
InstructionSet instruction_set) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 4eba791..7aaf991 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -24,13 +24,13 @@
#include "memory_region.h"
#include "nodes.h"
#include "stack_map_stream.h"
-#include "utils/assembler.h"
namespace art {
static size_t constexpr kVRegSize = 4;
static size_t constexpr kUninitializedFrameSize = 0;
+class Assembler;
class CodeGenerator;
class DexCompilationUnit;
class SrcMap;
@@ -53,18 +53,12 @@
class SlowPathCode : public ArenaObject {
public:
- SlowPathCode() : entry_label_(), exit_label_() {}
+ SlowPathCode() {}
virtual ~SlowPathCode() {}
- Label* GetEntryLabel() { return &entry_label_; }
- Label* GetExitLabel() { return &exit_label_; }
-
virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
private:
- Label entry_label_;
- Label exit_label_;
-
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
@@ -80,7 +74,6 @@
HGraph* GetGraph() const { return graph_; }
- Label* GetLabelOf(HBasicBlock* block) const;
bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
@@ -90,9 +83,10 @@
+ parameter->GetIndex() * kVRegSize;
}
+ virtual void Initialize() = 0;
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
- virtual void Bind(Label* label) = 0;
+ virtual void Bind(HBasicBlock* block) = 0;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
virtual HGraphVisitor* GetLocationBuilder() = 0;
virtual HGraphVisitor* GetInstructionVisitor() = 0;
@@ -167,7 +161,6 @@
number_of_fpu_registers_(number_of_fpu_registers),
number_of_register_pairs_(number_of_register_pairs),
graph_(graph),
- block_labels_(graph->GetArena(), 0),
pc_infos_(graph->GetArena(), 32),
slow_paths_(graph->GetArena(), 8),
is_leaf_(true),
@@ -205,8 +198,6 @@
HGraph* const graph_;
- // Labels for each block that will be compiled.
- GrowableArray<Label> block_labels_;
GrowableArray<PcInfo> pc_infos_;
GrowableArray<SlowPathCode*> slow_paths_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9be7802..f725a6e 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -60,7 +60,21 @@
#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
-class NullCheckSlowPathARM : public SlowPathCode {
+class SlowPathCodeARM : public SlowPathCode {
+ public:
+ SlowPathCodeARM() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM);
+};
+
+class NullCheckSlowPathARM : public SlowPathCodeARM {
public:
explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
@@ -77,7 +91,7 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
-class StackOverflowCheckSlowPathARM : public SlowPathCode {
+class StackOverflowCheckSlowPathARM : public SlowPathCodeARM {
public:
StackOverflowCheckSlowPathARM() {}
@@ -91,12 +105,13 @@
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM);
};
-class SuspendCheckSlowPathARM : public SlowPathCode {
+class SuspendCheckSlowPathARM : public SlowPathCodeARM {
public:
explicit SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pTestSuspend).Int32Value();
@@ -107,7 +122,7 @@
if (successor_ == nullptr) {
__ b(GetReturnLabel());
} else {
- __ b(codegen->GetLabelOf(successor_));
+ __ b(arm_codegen->GetLabelOf(successor_));
}
}
@@ -127,7 +142,7 @@
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
};
-class BoundsCheckSlowPathARM : public SlowPathCode {
+class BoundsCheckSlowPathARM : public SlowPathCodeARM {
public:
BoundsCheckSlowPathARM(HBoundsCheck* instruction,
Location index_location,
@@ -137,7 +152,7 @@
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorARM* arm_codegen = reinterpret_cast<CodeGeneratorARM*>(codegen);
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
@@ -205,6 +220,7 @@
CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
: CodeGenerator(graph, kNumberOfCoreRegisters, kNumberOfDRegisters, kNumberOfRegisterPairs),
+ block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
@@ -313,7 +329,7 @@
bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
if (!skip_overflow_check) {
if (kExplicitStackOverflowCheck) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM();
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM();
AddSlowPath(slow_path);
__ LoadFromOffset(kLoadWord, IP, TR, Thread::StackEndOffset<kArmWordSize>().Int32Value());
@@ -339,8 +355,8 @@
__ PopList(1 << PC | 1 << R6 | 1 << R7);
}
-void CodeGeneratorARM::Bind(Label* label) {
- __ Bind(label);
+void CodeGeneratorARM::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
}
Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
@@ -1349,7 +1365,7 @@
}
void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -1595,7 +1611,7 @@
void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 874db0f..7c063f1 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -140,7 +140,7 @@
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(Label* label) OVERRIDE;
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
@@ -187,7 +187,17 @@
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
+ Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_.GetRawStorage() + block->GetBlockId();
+ }
+
+ virtual void Initialize() OVERRIDE {
+ block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ }
+
private:
+ // Labels for each block that will be compiled.
+ GrowableArray<Label> block_labels_;
LocationsBuilderARM location_builder_;
InstructionCodeGeneratorARM instruction_visitor_;
ParallelMoveResolverARM move_resolver_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 34fa46e..0f6d635 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -56,7 +56,21 @@
#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())->
-class NullCheckSlowPathX86 : public SlowPathCode {
+class SlowPathCodeX86 : public SlowPathCode {
+ public:
+ SlowPathCodeX86() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86);
+};
+
+class NullCheckSlowPathX86 : public SlowPathCodeX86 {
public:
explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
@@ -71,7 +85,7 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
};
-class StackOverflowCheckSlowPathX86 : public SlowPathCode {
+class StackOverflowCheckSlowPathX86 : public SlowPathCodeX86 {
public:
StackOverflowCheckSlowPathX86() {}
@@ -86,7 +100,7 @@
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86);
};
-class BoundsCheckSlowPathX86 : public SlowPathCode {
+class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
public:
BoundsCheckSlowPathX86(HBoundsCheck* instruction,
Location index_location,
@@ -94,7 +108,7 @@
: instruction_(instruction), index_location_(index_location), length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86* x86_codegen = reinterpret_cast<CodeGeneratorX86*>(codegen);
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
@@ -111,12 +125,13 @@
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
};
-class SuspendCheckSlowPathX86 : public SlowPathCode {
+class SuspendCheckSlowPathX86 : public SlowPathCodeX86 {
public:
explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
@@ -125,7 +140,7 @@
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
- __ jmp(codegen->GetLabelOf(successor_));
+ __ jmp(x86_codegen->GetLabelOf(successor_));
}
}
@@ -177,6 +192,7 @@
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
: CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters, kNumberOfRegisterPairs),
+ block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
@@ -276,7 +292,7 @@
__ subl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
if (!skip_overflow_check && kExplicitStackOverflowCheck) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86();
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86();
AddSlowPath(slow_path);
__ fs()->cmpl(ESP, Address::Absolute(Thread::StackEndOffset<kX86WordSize>()));
@@ -290,8 +306,8 @@
__ addl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
}
-void CodeGeneratorX86::Bind(Label* label) {
- __ Bind(label);
+void CodeGeneratorX86::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
}
void InstructionCodeGeneratorX86::LoadCurrentMethod(Register reg) {
@@ -1356,7 +1372,7 @@
}
void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction);
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -1658,7 +1674,7 @@
void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index a1a72a2..aa5fee0 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -142,7 +142,7 @@
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(Label* label) OVERRIDE;
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
@@ -189,7 +189,17 @@
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
+ Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_.GetRawStorage() + block->GetBlockId();
+ }
+
+ virtual void Initialize() OVERRIDE {
+ block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ }
+
private:
+ // Labels for each block that will be compiled.
+ GrowableArray<Label> block_labels_;
LocationsBuilderX86 location_builder_;
InstructionCodeGeneratorX86 instruction_visitor_;
ParallelMoveResolverX86 move_resolver_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 059140d..2b17591 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -60,7 +60,21 @@
#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())->
-class NullCheckSlowPathX86_64 : public SlowPathCode {
+class SlowPathCodeX86_64 : public SlowPathCode {
+ public:
+ SlowPathCodeX86_64() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86_64);
+};
+
+class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
@@ -76,7 +90,7 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
};
-class StackOverflowCheckSlowPathX86_64 : public SlowPathCode {
+class StackOverflowCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
StackOverflowCheckSlowPathX86_64() {}
@@ -92,12 +106,13 @@
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86_64);
};
-class SuspendCheckSlowPathX86_64 : public SlowPathCode {
+class SuspendCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
explicit SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
@@ -106,7 +121,7 @@
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
- __ jmp(codegen->GetLabelOf(successor_));
+ __ jmp(x64_codegen->GetLabelOf(successor_));
}
}
@@ -123,7 +138,7 @@
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86_64);
};
-class BoundsCheckSlowPathX86_64 : public SlowPathCode {
+class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
BoundsCheckSlowPathX86_64(HBoundsCheck* instruction,
Location index_location,
@@ -133,7 +148,7 @@
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = reinterpret_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
@@ -186,6 +201,7 @@
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph)
: CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfFloatRegisters, 0),
+ block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
@@ -266,7 +282,7 @@
Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
if (!skip_overflow_check && kExplicitStackOverflowCheck) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
AddSlowPath(slow_path);
__ gs()->cmpq(CpuRegister(RSP),
@@ -282,8 +298,8 @@
Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
}
-void CodeGeneratorX86_64::Bind(Label* label) {
- __ Bind(label);
+void CodeGeneratorX86_64::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
}
void InstructionCodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
@@ -1233,7 +1249,7 @@
}
void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction);
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -1505,7 +1521,7 @@
void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 288f3f6..5ac0189 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -144,7 +144,7 @@
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(Label* label) OVERRIDE;
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
@@ -188,7 +188,17 @@
// Helper method to move a value between two locations.
void Move(Location destination, Location source);
+ Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_.GetRawStorage() + block->GetBlockId();
+ }
+
+ virtual void Initialize() OVERRIDE {
+ block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ }
+
private:
+ // Labels for each block that will be compiled.
+ GrowableArray<Label> block_labels_;
LocationsBuilderX86_64 location_builder_;
InstructionCodeGeneratorX86_64 instruction_visitor_;
ParallelMoveResolverX86_64 move_resolver_;