Fix: correctly destruct VIXL labels.
Bug: 27505766
Change-Id: I077465e3d308f4331e7a861902e05865f9d99835
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a771cc1..e7fa4e4 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -187,7 +187,8 @@
void CodeGenerator::GenerateSlowPaths() {
size_t code_start = 0;
- for (SlowPathCode* slow_path : slow_paths_) {
+ for (const std::unique_ptr<SlowPathCode>& slow_path_unique_ptr : slow_paths_) {
+ SlowPathCode* slow_path = slow_path_unique_ptr.get();
current_slow_path_ = slow_path;
if (disasm_info_ != nullptr) {
code_start = GetAssembler()->CodeSize();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 87832a2..d69c410 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -67,7 +67,7 @@
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
};
-class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
+class SlowPathCode : public DeletableArenaObject<kArenaAllocSlowPaths> {
public:
explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) {
for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) {
@@ -205,7 +205,7 @@
virtual const Assembler& GetAssembler() const = 0;
virtual size_t GetWordSize() const = 0;
virtual size_t GetFloatingPointSpillSlotSize() const = 0;
- virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
+ virtual uintptr_t GetAddressOf(HBasicBlock* block) = 0;
void InitializeCodeGeneration(size_t number_of_spill_slots,
size_t maximum_number_of_live_core_registers,
size_t maximum_number_of_live_fpu_registers,
@@ -298,8 +298,9 @@
// save live registers, which may be needed by the runtime to set catch phis.
bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
+ // TODO: Avoid creating the `std::unique_ptr` here.
void AddSlowPath(SlowPathCode* slow_path) {
- slow_paths_.push_back(slow_path);
+ slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
}
void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
@@ -617,7 +618,7 @@
HGraph* const graph_;
const CompilerOptions& compiler_options_;
- ArenaVector<SlowPathCode*> slow_paths_;
+ ArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
// The current slow-path that we're generating code for.
SlowPathCode* current_slow_path_;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 144d58d..0020f7b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -339,7 +339,7 @@
return assembler_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return GetLabelOf(block)->Position();
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index efe4c06..e8e6b68 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -899,7 +899,7 @@
callee_saved_fp_registers.list(),
compiler_options,
stats),
- block_labels_(nullptr),
+ block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
@@ -928,7 +928,7 @@
#define __ GetVIXLAssembler()->
void CodeGeneratorARM64::EmitJumpTables() {
- for (auto jump_table : jump_tables_) {
+ for (auto&& jump_table : jump_tables_) {
jump_table->EmitTable(this);
}
}
@@ -4784,8 +4784,7 @@
__ B(codegen_->GetLabelOf(default_block));
}
} else {
- JumpTableARM64* jump_table = new (GetGraph()->GetArena()) JumpTableARM64(switch_instr);
- codegen_->AddJumpTable(jump_table);
+ JumpTableARM64* jump_table = codegen_->CreateJumpTable(switch_instr);
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index ec46a34..422963e 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -83,7 +83,7 @@
DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
};
-class JumpTableARM64 : public ArenaObject<kArenaAllocSwitchTable> {
+class JumpTableARM64 : public DeletableArenaObject<kArenaAllocSwitchTable> {
public:
explicit JumpTableARM64(HPackedSwitch* switch_instr)
: switch_instr_(switch_instr), table_start_() {}
@@ -352,8 +352,9 @@
void Bind(HBasicBlock* block) OVERRIDE;
- vixl::Label* GetLabelOf(HBasicBlock* block) const {
- return CommonGetLabelOf<vixl::Label>(block_labels_, block);
+ vixl::Label* GetLabelOf(HBasicBlock* block) {
+ block = FirstNonEmptyBlock(block);
+ return &(block_labels_[block->GetBlockId()]);
}
size_t GetWordSize() const OVERRIDE {
@@ -365,7 +366,7 @@
return kArm64WordSize;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
vixl::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
return block_entry_label->location();
@@ -413,11 +414,12 @@
}
void Initialize() OVERRIDE {
- block_labels_ = CommonInitializeLabels<vixl::Label>();
+ block_labels_.resize(GetGraph()->GetBlocks().size());
}
- void AddJumpTable(JumpTableARM64* jump_table) {
- jump_tables_.push_back(jump_table);
+ JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
+ jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARM64(switch_instr));
+ return jump_tables_.back().get();
}
void Finalize(CodeAllocator* allocator) OVERRIDE;
@@ -616,9 +618,10 @@
void EmitJumpTables();
// Labels for each block that will be compiled.
- vixl::Label* block_labels_; // Indexed by block id.
+ // We use a deque so that the `vixl::Label` objects do not move in memory.
+ ArenaDeque<vixl::Label> block_labels_; // Indexed by block id.
vixl::Label frame_entry_label_;
- ArenaVector<JumpTableARM64*> jump_tables_;
+ ArenaVector<std::unique_ptr<JumpTableARM64>> jump_tables_;
LocationsBuilderARM64 location_builder_;
InstructionCodeGeneratorARM64 instruction_visitor_;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 5e6fec8..435a869 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -275,7 +275,7 @@
size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; }
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 4e15cdd..9785a2e 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -271,7 +271,7 @@
size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMips64DoublewordSize; }
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 69a6253..1739eec 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -361,7 +361,7 @@
return assembler_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return GetLabelOf(block)->Position();
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d7ce7c6..3a211c5 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -346,7 +346,7 @@
return &move_resolver_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return GetLabelOf(block)->Position();
}