Revert "Revert "ART: Implement literal pool for arm, fix branch fixup.""
This reverts commit fbeb4aede0ddc5b1e6a5a3a40cc6266fe8518c98.
Adjust block label positions. Bad catch block labels were the
reason for the revert.
Change-Id: Ia6950d639d46b9da6b07f3ade63ab46d03d63310
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e6b1f7c..5b0abd7 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -358,15 +358,15 @@
number_of_register_pairs_(number_of_register_pairs),
core_callee_save_mask_(core_callee_save_mask),
fpu_callee_save_mask_(fpu_callee_save_mask),
+ stack_map_stream_(graph->GetArena()),
+ block_order_(nullptr),
is_baseline_(false),
graph_(graph),
compiler_options_(compiler_options),
slow_paths_(graph->GetArena(), 8),
- block_order_(nullptr),
current_block_index_(0),
is_leaf_(true),
- requires_current_method_(false),
- stack_map_stream_(graph->GetArena()) {}
+ requires_current_method_(false) {}
// Register allocation logic.
void AllocateRegistersLocally(HInstruction* instruction) const;
@@ -436,6 +436,11 @@
const uint32_t core_callee_save_mask_;
const uint32_t fpu_callee_save_mask_;
+ StackMapStream stack_map_stream_;
+
+ // The order to use for code generation.
+ const GrowableArray<HBasicBlock*>* block_order_;
+
// Whether we are using baseline.
bool is_baseline_;
@@ -451,9 +456,6 @@
GrowableArray<SlowPathCode*> slow_paths_;
- // The order to use for code generation.
- const GrowableArray<HBasicBlock*>* block_order_;
-
// The current block index in `block_order_` of the block
// we are generating code for.
size_t current_block_index_;
@@ -464,8 +466,6 @@
// Whether an instruction in the graph accesses the current method.
bool requires_current_method_;
- StackMapStream stack_map_stream_;
-
friend class OptimizingCFITest;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3d3e35d..f6ae452 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -392,12 +392,38 @@
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
- assembler_(false /* can_relocate_branches */),
+ assembler_(),
isa_features_(isa_features) {
// Save the PC register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(PC));
}
+void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
+ // Ensure that we fix up branches and literal loads and emit the literal pool.
+ __ FinalizeCode();
+
+ // Adjust native pc offsets in stack maps.
+ for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t new_position = __ GetAdjustedPosition(old_position);
+ stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ }
+ // Adjust native pc offsets of block labels.
+ for (size_t block_idx = 0u, end = block_order_->Size(); block_idx != end; ++block_idx) {
+ HBasicBlock* block = block_order_->Get(block_idx);
+ // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
+ // FirstNonEmptyBlock() which could lead to adjusting a label more than once.
+ DCHECK_LT(static_cast<size_t>(block->GetBlockId()), block_labels_.Size());
+ Label* block_label = &block_labels_.GetRawStorage()[block->GetBlockId()];
+ DCHECK_EQ(block_label->IsBound(), !block->IsSingleGoto());
+ if (block_label->IsBound()) {
+ __ AdjustLabelPosition(block_label);
+ }
+ }
+
+ CodeGenerator::Finalize(allocator);
+}
+
Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
switch (type) {
case Primitive::kPrimLong: {
@@ -2831,7 +2857,7 @@
Location left = locations->InAt(0);
Location right = locations->InAt(1);
- NearLabel less, greater, done;
+ Label less, greater, done;
Primitive::Type type = compare->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong: {
@@ -2927,7 +2953,7 @@
Register temp1,
Register temp2,
HInstruction* instruction) {
- NearLabel fail;
+ Label fail;
if (offset != 0) {
__ LoadImmediate(temp1, offset);
__ add(IP, addr, ShifterOperand(temp1));
@@ -3607,7 +3633,7 @@
Register object,
Register value,
bool can_be_null) {
- NearLabel is_null;
+ Label is_null;
if (can_be_null) {
__ CompareAndBranchIfZero(value, &is_null);
}
@@ -4036,7 +4062,7 @@
Register cls = locations->InAt(1).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- NearLabel done, zero;
+ Label done, zero;
SlowPathCodeARM* slow_path = nullptr;
// Return 0 if `obj` is null.
@@ -4093,19 +4119,15 @@
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- NearLabel done;
// avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, &done);
+ __ CompareAndBranchIfZero(obj, slow_path->GetExitLabel());
}
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, temp, obj, class_offset);
__ cmp(temp, ShifterOperand(cls));
__ b(slow_path->GetEntryLabel(), NE);
__ Bind(slow_path->GetExitLabel());
- if (instruction->MustDoNullCheck()) {
- __ Bind(&done);
- }
}
void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 824e48c..1599a23 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -298,6 +298,8 @@
block_labels_.SetSize(GetGraph()->GetBlocks().Size());
}
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
const ArmInstructionSetFeatures& GetInstructionSetFeatures() const {
return isa_features_;
}
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index b0d1433..fe3bb1a 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -71,6 +71,8 @@
}
}
}
+ GrowableArray<HBasicBlock*> blocks(&allocator, 0);
+ code_gen->block_order_ = &blocks;
code_gen->ComputeSpillMask();
code_gen->SetFrameSize(frame_size);
code_gen->GenerateFrameEntry();
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index bc3653d..550ed70 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -134,6 +134,11 @@
return stack_maps_.GetRawStorage()[i];
}
+ void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
+ DCHECK_LT(i, stack_maps_.Size());
+ stack_maps_.GetRawStorage()[i].native_pc_offset = native_pc_offset;
+ }
+
uint32_t ComputeMaxNativePcOffset() const;
// Prepares the stream to fill in a memory region. Must be called before FillIn.