summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/boolean_simplifier.cc8
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc105
-rw-r--r--compiler/optimizing/bounds_check_elimination.h9
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc781
-rw-r--r--compiler/optimizing/builder.cc9
-rw-r--r--compiler/optimizing/code_generator.cc94
-rw-r--r--compiler/optimizing/code_generator.h11
-rw-r--r--compiler/optimizing/code_generator_arm.cc52
-rw-r--r--compiler/optimizing/code_generator_arm64.cc32
-rw-r--r--compiler/optimizing/code_generator_mips64.cc32
-rw-r--r--compiler/optimizing/code_generator_x86.cc60
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc58
-rw-r--r--compiler/optimizing/codegen_test.cc6
-rw-r--r--compiler/optimizing/dead_code_elimination.cc10
-rw-r--r--compiler/optimizing/graph_checker.cc133
-rw-r--r--compiler/optimizing/graph_test.cc32
-rw-r--r--compiler/optimizing/graph_visualizer.cc9
-rw-r--r--compiler/optimizing/gvn.cc14
-rw-r--r--compiler/optimizing/induction_var_analysis.cc80
-rw-r--r--compiler/optimizing/induction_var_range.cc16
-rw-r--r--compiler/optimizing/induction_var_range.h15
-rw-r--r--compiler/optimizing/inliner.cc6
-rw-r--r--compiler/optimizing/intrinsics.cc32
-rw-r--r--compiler/optimizing/intrinsics_arm.cc221
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc127
-rw-r--r--compiler/optimizing/intrinsics_list.h6
-rw-r--r--compiler/optimizing/intrinsics_x86.cc207
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc195
-rw-r--r--compiler/optimizing/licm_test.cc21
-rw-r--r--compiler/optimizing/nodes.cc207
-rw-r--r--compiler/optimizing/nodes.h183
-rw-r--r--compiler/optimizing/optimizing_compiler.cc75
-rw-r--r--compiler/optimizing/pretty_printer.h22
-rw-r--r--compiler/optimizing/reference_type_propagation.cc4
-rw-r--r--compiler/optimizing/register_allocator.cc159
-rw-r--r--compiler/optimizing/register_allocator.h18
-rw-r--r--compiler/optimizing/ssa_builder.cc18
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc52
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h3
-rw-r--r--compiler/optimizing/stack_map_stream.cc2
-rw-r--r--compiler/optimizing/suspend_check_test.cc2
41 files changed, 2074 insertions, 1052 deletions
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index 84201c39a7..b0e83b0058 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -42,9 +42,9 @@ void HBooleanSimplifier::TryRemovingNegatedCondition(HBasicBlock* block) {
// successor and the successor can only be reached from them.
static bool BlocksDoMergeTogether(HBasicBlock* block1, HBasicBlock* block2) {
if (!block1->IsSingleGoto() || !block2->IsSingleGoto()) return false;
- HBasicBlock* succ1 = block1->GetSuccessors().Get(0);
- HBasicBlock* succ2 = block2->GetSuccessors().Get(0);
- return succ1 == succ2 && succ1->GetPredecessors().Size() == 2u;
+ HBasicBlock* succ1 = block1->GetSuccessor(0);
+ HBasicBlock* succ2 = block2->GetSuccessor(0);
+ return succ1 == succ2 && succ1->GetPredecessors().size() == 2u;
}
// Returns true if the outcome of the branching matches the boolean value of
@@ -108,7 +108,7 @@ void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
if (!BlocksDoMergeTogether(true_block, false_block)) {
return;
}
- HBasicBlock* merge_block = true_block->GetSuccessors().Get(0);
+ HBasicBlock* merge_block = true_block->GetSuccessor(0);
if (!merge_block->HasSinglePhi()) {
return;
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index ebc0adc64a..0d953900a9 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -16,6 +16,7 @@
#include "base/arena_containers.h"
#include "bounds_check_elimination.h"
+#include "induction_var_range.h"
#include "nodes.h"
namespace art {
@@ -126,14 +127,17 @@ class ValueBound : public ValueObject {
return instruction_ == bound.instruction_ && constant_ == bound.constant_;
}
- static HInstruction* FromArrayLengthToArray(HInstruction* instruction) {
- DCHECK(instruction->IsArrayLength() || instruction->IsNewArray());
- if (instruction->IsArrayLength()) {
- HInstruction* input = instruction->InputAt(0);
- if (input->IsNullCheck()) {
- input = input->AsNullCheck()->InputAt(0);
- }
- return input;
+ /*
+ * Hunt "under the hood" of array lengths (leading to array references),
+ * null checks (also leading to array references), and new arrays
+ * (leading to the actual length). This makes it more likely related
+ * instructions become actually comparable.
+ */
+ static HInstruction* HuntForDeclaration(HInstruction* instruction) {
+ while (instruction->IsArrayLength() ||
+ instruction->IsNullCheck() ||
+ instruction->IsNewArray()) {
+ instruction = instruction->InputAt(0);
}
return instruction;
}
@@ -142,16 +146,11 @@ class ValueBound : public ValueObject {
if (instruction1 == instruction2) {
return true;
}
-
if (instruction1 == nullptr || instruction2 == nullptr) {
return false;
}
-
- // Some bounds are created with HNewArray* as the instruction instead
- // of HArrayLength*. They are treated the same.
- // HArrayLength with the same array input are considered equal also.
- instruction1 = FromArrayLengthToArray(instruction1);
- instruction2 = FromArrayLengthToArray(instruction2);
+ instruction1 = HuntForDeclaration(instruction1);
+ instruction2 = HuntForDeclaration(instruction2);
return instruction1 == instruction2;
}
@@ -275,9 +274,8 @@ class ArrayAccessInsideLoopFinder : public ValueObject {
// Loop header of loop_info. Exiting loop is normal.
return false;
}
- const GrowableArray<HBasicBlock*>& successors = block->GetSuccessors();
- for (size_t i = 0; i < successors.Size(); i++) {
- if (!loop_info->Contains(*successors.Get(i))) {
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ if (!loop_info->Contains(*successor)) {
// One of the successors exits the loop.
return true;
}
@@ -797,8 +795,8 @@ class MonotonicValueRange : public ValueRange {
HBasicBlock* new_pre_header = header->GetDominator();
DCHECK(new_pre_header == header->GetLoopInformation()->GetPreHeader());
HBasicBlock* if_block = new_pre_header->GetDominator();
- HBasicBlock* dummy_block = if_block->GetSuccessors().Get(0); // True successor.
- HBasicBlock* deopt_block = if_block->GetSuccessors().Get(1); // False successor.
+ HBasicBlock* dummy_block = if_block->GetSuccessor(0); // True successor.
+ HBasicBlock* deopt_block = if_block->GetSuccessor(1); // False successor.
dummy_block->AddInstruction(new (graph->GetArena()) HGoto());
deopt_block->AddInstruction(new (graph->GetArena()) HGoto());
@@ -845,14 +843,14 @@ class MonotonicValueRange : public ValueRange {
DCHECK(header->IsLoopHeader());
HBasicBlock* pre_header = header->GetDominator();
if (loop_entry_test_block_added) {
- DCHECK(deopt_block->GetSuccessors().Get(0) == pre_header);
+ DCHECK(deopt_block->GetSuccessor(0) == pre_header);
} else {
DCHECK(deopt_block == pre_header);
}
HGraph* graph = header->GetGraph();
HSuspendCheck* suspend_check = header->GetLoopInformation()->GetSuspendCheck();
if (loop_entry_test_block_added) {
- DCHECK_EQ(deopt_block, header->GetDominator()->GetDominator()->GetSuccessors().Get(1));
+ DCHECK_EQ(deopt_block, header->GetDominator()->GetDominator()->GetSuccessor(1));
}
HIntConstant* const_instr = graph->GetIntConstant(constant);
@@ -926,7 +924,7 @@ class MonotonicValueRange : public ValueRange {
DCHECK(header->IsLoopHeader());
HBasicBlock* pre_header = header->GetDominator();
if (loop_entry_test_block_added) {
- DCHECK(deopt_block->GetSuccessors().Get(0) == pre_header);
+ DCHECK(deopt_block->GetSuccessor(0) == pre_header);
} else {
DCHECK(deopt_block == pre_header);
}
@@ -1109,9 +1107,12 @@ class BCEVisitor : public HGraphVisitor {
return block->GetBlockId() >= initial_block_size_;
}
- explicit BCEVisitor(HGraph* graph)
- : HGraphVisitor(graph), maps_(graph->GetBlocks().Size()),
- need_to_revisit_block_(false), initial_block_size_(graph->GetBlocks().Size()) {}
+ BCEVisitor(HGraph* graph, HInductionVarAnalysis* induction_analysis)
+ : HGraphVisitor(graph),
+ maps_(graph->GetBlocks().Size()),
+ need_to_revisit_block_(false),
+ initial_block_size_(graph->GetBlocks().Size()),
+ induction_range_(induction_analysis) {}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
DCHECK(!IsAddedBlock(block));
@@ -1160,6 +1161,23 @@ class BCEVisitor : public HGraphVisitor {
return nullptr;
}
+ // Return the range resulting from induction variable analysis of "instruction" when the value
+ // is used from "context", for example, an index used from a bounds-check inside a loop body.
+ ValueRange* LookupInductionRange(HInstruction* context, HInstruction* instruction) {
+ InductionVarRange::Value v1 = induction_range_.GetMinInduction(context, instruction);
+ InductionVarRange::Value v2 = induction_range_.GetMaxInduction(context, instruction);
+ if ((v1.a_constant == 0 || v1.a_constant == 1) && v1.b_constant != INT_MIN &&
+ (v2.a_constant == 0 || v2.a_constant == 1) && v2.b_constant != INT_MAX) {
+ DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
+ DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
+ ValueBound low = ValueBound(v1.instruction, v1.b_constant);
+ ValueBound up = ValueBound(v2.instruction, v2.b_constant);
+ return new (GetGraph()->GetArena()) ValueRange(GetGraph()->GetArena(), low, up);
+ }
+ // Didn't find anything useful.
+ return nullptr;
+ }
+
// Narrow the value range of `instruction` at the end of `basic_block` with `range`,
// and push the narrowed value range to `successor`.
void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block,
@@ -1256,11 +1274,11 @@ class BCEVisitor : public HGraphVisitor {
HBasicBlock* true_successor = instruction->IfTrueSuccessor();
// There should be no critical edge at this point.
- DCHECK_EQ(true_successor->GetPredecessors().Size(), 1u);
+ DCHECK_EQ(true_successor->GetPredecessors().size(), 1u);
HBasicBlock* false_successor = instruction->IfFalseSuccessor();
// There should be no critical edge at this point.
- DCHECK_EQ(false_successor->GetPredecessors().Size(), 1u);
+ DCHECK_EQ(false_successor->GetPredecessors().size(), 1u);
ValueRange* left_range = LookupValueRange(left, block);
MonotonicValueRange* left_monotonic_range = nullptr;
@@ -1391,16 +1409,20 @@ class BCEVisitor : public HGraphVisitor {
}
if (!index->IsIntConstant()) {
+ ValueBound lower = ValueBound(nullptr, 0); // constant 0
+ ValueBound upper = ValueBound(array_length, -1); // array_length - 1
+ ValueRange array_range(GetGraph()->GetArena(), lower, upper);
+ // Try range obtained by local analysis.
ValueRange* index_range = LookupValueRange(index, block);
- if (index_range != nullptr) {
- ValueBound lower = ValueBound(nullptr, 0); // constant 0
- ValueBound upper = ValueBound(array_length, -1); // array_length - 1
- ValueRange* array_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
- if (index_range->FitsIn(array_range)) {
- ReplaceBoundsCheck(bounds_check, index);
- return;
- }
+ if (index_range != nullptr && index_range->FitsIn(&array_range)) {
+ ReplaceBoundsCheck(bounds_check, index);
+ return;
+ }
+ // Try range obtained by induction variable analysis.
+ index_range = LookupInductionRange(bounds_check, index);
+ if (index_range != nullptr && index_range->FitsIn(&array_range)) {
+ ReplaceBoundsCheck(bounds_check, index);
+ return;
}
} else {
int32_t constant = index->AsIntConstant()->GetValue();
@@ -1468,10 +1490,10 @@ class BCEVisitor : public HGraphVisitor {
// Start with input 1. Input 0 is from the incoming block.
HInstruction* input1 = phi->InputAt(1);
DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
- *phi->GetBlock()->GetPredecessors().Get(1)));
+ *phi->GetBlock()->GetPredecessor(1)));
for (size_t i = 2, e = phi->InputCount(); i < e; ++i) {
DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
- *phi->GetBlock()->GetPredecessors().Get(i)));
+ *phi->GetBlock()->GetPredecessor(i)));
if (input1 != phi->InputAt(i)) {
return false;
}
@@ -1832,6 +1854,9 @@ class BCEVisitor : public HGraphVisitor {
// Initial number of blocks.
int32_t initial_block_size_;
+ // Range analysis based on induction variables.
+ InductionVarRange induction_range_;
+
DISALLOW_COPY_AND_ASSIGN(BCEVisitor);
};
@@ -1840,7 +1865,7 @@ void BoundsCheckElimination::Run() {
return;
}
- BCEVisitor visitor(graph_);
+ BCEVisitor visitor(graph_, induction_analysis_);
// Reverse post order guarantees a node's dominators are visited first.
// We want to visit in the dominator-based order since if a value is known to
// be bounded by a range at one instruction, it must be true that all uses of
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 24b8ea7c56..cdff3ca0ba 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -21,16 +21,21 @@
namespace art {
+class HInductionVarAnalysis;
+
class BoundsCheckElimination : public HOptimization {
public:
- explicit BoundsCheckElimination(HGraph* graph)
- : HOptimization(graph, kBoundsCheckEliminiationPassName) {}
+ BoundsCheckElimination(HGraph* graph, HInductionVarAnalysis* induction_analysis)
+ : HOptimization(graph, kBoundsCheckEliminiationPassName),
+ induction_analysis_(induction_analysis) {}
void Run() OVERRIDE;
static constexpr const char* kBoundsCheckEliminiationPassName = "BCE";
private:
+ HInductionVarAnalysis* induction_analysis_;
+
DISALLOW_COPY_AND_ASSIGN(BoundsCheckElimination);
};
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 4701bddd48..08e1e3682b 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -18,6 +18,7 @@
#include "bounds_check_elimination.h"
#include "builder.h"
#include "gvn.h"
+#include "induction_var_analysis.h"
#include "instruction_simplifier.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
@@ -27,101 +28,122 @@
namespace art {
-static void RunSimplifierAndGvn(HGraph* graph) {
- InstructionSimplifier simplify(graph);
- simplify.Run();
- SideEffectsAnalysis side_effects(graph);
- side_effects.Run();
- GVNOptimization(graph, side_effects).Run();
-}
+/**
+ * Fixture class for the BoundsCheckElimination tests.
+ */
+class BoundsCheckEliminationTest : public testing::Test {
+ public:
+ BoundsCheckEliminationTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ graph_->SetHasBoundsChecks(true);
+ }
+
+ ~BoundsCheckEliminationTest() { }
+
+ void RunBCE() {
+ graph_->BuildDominatorTree();
+ graph_->AnalyzeNaturalLoops();
+
+ InstructionSimplifier(graph_).Run();
+
+ SideEffectsAnalysis side_effects(graph_);
+ side_effects.Run();
+
+ GVNOptimization(graph_, side_effects).Run();
+
+ HInductionVarAnalysis induction(graph_);
+ induction.Run();
+
+ BoundsCheckElimination(graph_, &induction).Run();
+ }
+
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+};
+
// if (i < 0) { array[i] = 1; // Can't eliminate. }
// else if (i >= array.length) { array[i] = 1; // Can't eliminate. }
// else { array[i] = 1; // Can eliminate. }
-TEST(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator)
+TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter1 = new (&allocator_)
HParameterValue(0, Primitive::kPrimNot); // array
- HInstruction* parameter2 = new (&allocator)
+ HInstruction* parameter2 = new (&allocator_)
HParameterValue(0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- HInstruction* constant_1 = graph->GetIntConstant(1);
- HInstruction* constant_0 = graph->GetIntConstant(0);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
- HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block1);
- HInstruction* cmp = new (&allocator) HGreaterThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block1);
+ HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block2);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check2 = new (&allocator)
+ HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block2);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check2 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
- HArraySet* array_set = new (&allocator) HArraySet(
+ HArraySet* array_set = new (&allocator_) HArraySet(
null_check, bounds_check2, constant_1, Primitive::kPrimInt, 0);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(bounds_check2);
block2->AddInstruction(array_set);
- HBasicBlock* block3 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block3);
- null_check = new (&allocator) HNullCheck(parameter1, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- cmp = new (&allocator) HLessThan(parameter2, array_length);
- if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block3);
+ null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ cmp = new (&allocator_) HLessThan(parameter2, array_length);
+ if_inst = new (&allocator_) HIf(cmp);
block3->AddInstruction(null_check);
block3->AddInstruction(array_length);
block3->AddInstruction(cmp);
block3->AddInstruction(if_inst);
- HBasicBlock* block4 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block4);
- null_check = new (&allocator) HNullCheck(parameter1, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check4 = new (&allocator)
+ HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block4);
+ null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check4, constant_1, Primitive::kPrimInt, 0);
block4->AddInstruction(null_check);
block4->AddInstruction(array_length);
block4->AddInstruction(bounds_check4);
block4->AddInstruction(array_set);
- HBasicBlock* block5 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block5);
- null_check = new (&allocator) HNullCheck(parameter1, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check5 = new (&allocator)
+ HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block5);
+ null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check5, constant_1, Primitive::kPrimInt, 0);
block5->AddInstruction(null_check);
block5->AddInstruction(array_length);
block5->AddInstruction(bounds_check5);
block5->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
block2->AddSuccessor(exit);
block4->AddSuccessor(exit);
block5->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ exit->AddInstruction(new (&allocator_) HExit());
block1->AddSuccessor(block3); // True successor
block1->AddSuccessor(block2); // False successor
@@ -129,10 +151,8 @@ TEST(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
block3->AddSuccessor(block5); // True successor
block3->AddSuccessor(block4); // False successor
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ RunBCE();
+
ASSERT_FALSE(IsRemoved(bounds_check2));
ASSERT_FALSE(IsRemoved(bounds_check4));
ASSERT_TRUE(IsRemoved(bounds_check5));
@@ -143,230 +163,203 @@ TEST(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
// int j = i + Integer.MAX_VALUE;
// if (j < array.length) array[j] = 1; // Can't eliminate.
// }
-TEST(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator)
+TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter1 = new (&allocator_)
HParameterValue(0, Primitive::kPrimNot); // array
- HInstruction* parameter2 = new (&allocator)
+ HInstruction* parameter2 = new (&allocator_)
HParameterValue(0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- HInstruction* constant_1 = graph->GetIntConstant(1);
- HInstruction* constant_0 = graph->GetIntConstant(0);
- HInstruction* constant_max_int = graph->GetIntConstant(INT_MAX);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
+ HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block1);
- HInstruction* cmp = new (&allocator) HLessThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block1);
+ HInstruction* cmp = new (&allocator_) HLessThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block2);
- HInstruction* add = new (&allocator) HAdd(Primitive::kPrimInt, parameter2, constant_max_int);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* cmp2 = new (&allocator) HGreaterThanOrEqual(add, array_length);
- if_inst = new (&allocator) HIf(cmp2);
+ HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block2);
+ HInstruction* add = new (&allocator_) HAdd(Primitive::kPrimInt, parameter2, constant_max_int);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length);
+ if_inst = new (&allocator_) HIf(cmp2);
block2->AddInstruction(add);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator)
+ HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block3);
+ HBoundsCheck* bounds_check = new (&allocator_)
HBoundsCheck(add, array_length, 0);
- HArraySet* array_set = new (&allocator) HArraySet(
+ HArraySet* array_set = new (&allocator_) HArraySet(
null_check, bounds_check, constant_1, Primitive::kPrimInt, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
+ exit->AddInstruction(new (&allocator_) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
block2->AddSuccessor(block3); // false successor
block3->AddSuccessor(exit);
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ RunBCE();
+
ASSERT_FALSE(IsRemoved(bounds_check));
}
// if (i < array.length) {
// int j = i - Integer.MAX_VALUE;
-// j = j - Integer.MAX_VALUE; // j is (i+2) after substracting MAX_INT twice
+// j = j - Integer.MAX_VALUE; // j is (i+2) after subtracting MAX_INT twice
// if (j > 0) array[j] = 1; // Can't eliminate.
// }
-TEST(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator)
+TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter1 = new (&allocator_)
HParameterValue(0, Primitive::kPrimNot); // array
- HInstruction* parameter2 = new (&allocator)
+ HInstruction* parameter2 = new (&allocator_)
HParameterValue(0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- HInstruction* constant_1 = graph->GetIntConstant(1);
- HInstruction* constant_0 = graph->GetIntConstant(0);
- HInstruction* constant_max_int = graph->GetIntConstant(INT_MAX);
-
- HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block1);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* cmp = new (&allocator) HGreaterThanOrEqual(parameter2, array_length);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
+ HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
+
+ HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block1);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(null_check);
block1->AddInstruction(array_length);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block2);
- HInstruction* sub1 = new (&allocator) HSub(Primitive::kPrimInt, parameter2, constant_max_int);
- HInstruction* sub2 = new (&allocator) HSub(Primitive::kPrimInt, sub1, constant_max_int);
- HInstruction* cmp2 = new (&allocator) HLessThanOrEqual(sub2, constant_0);
- if_inst = new (&allocator) HIf(cmp2);
+ HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block2);
+ HInstruction* sub1 = new (&allocator_) HSub(Primitive::kPrimInt, parameter2, constant_max_int);
+ HInstruction* sub2 = new (&allocator_) HSub(Primitive::kPrimInt, sub1, constant_max_int);
+ HInstruction* cmp2 = new (&allocator_) HLessThanOrEqual(sub2, constant_0);
+ if_inst = new (&allocator_) HIf(cmp2);
block2->AddInstruction(sub1);
block2->AddInstruction(sub2);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator)
+ HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block3);
+ HBoundsCheck* bounds_check = new (&allocator_)
HBoundsCheck(sub2, array_length, 0);
- HArraySet* array_set = new (&allocator) HArraySet(
+ HArraySet* array_set = new (&allocator_) HArraySet(
null_check, bounds_check, constant_1, Primitive::kPrimInt, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
+ exit->AddInstruction(new (&allocator_) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
block2->AddSuccessor(block3); // false successor
block3->AddSuccessor(exit);
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ RunBCE();
+
ASSERT_FALSE(IsRemoved(bounds_check));
}
// array[6] = 1; // Can't eliminate.
// array[5] = 1; // Can eliminate.
// array[4] = 1; // Can eliminate.
-TEST(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
- HInstruction* constant_5 = graph->GetIntConstant(5);
- HInstruction* constant_4 = graph->GetIntConstant(4);
- HInstruction* constant_6 = graph->GetIntConstant(6);
- HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_5 = graph_->GetIntConstant(5);
+ HInstruction* constant_4 = graph_->GetIntConstant(4);
+ HInstruction* constant_6 = graph_->GetIntConstant(6);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block);
+ HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block);
entry->AddSuccessor(block);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check6 = new (&allocator)
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check6 = new (&allocator_)
HBoundsCheck(constant_6, array_length, 0);
- HInstruction* array_set = new (&allocator) HArraySet(
+ HInstruction* array_set = new (&allocator_) HArraySet(
null_check, bounds_check6, constant_1, Primitive::kPrimInt, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check6);
block->AddInstruction(array_set);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check5 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(constant_5, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check5, constant_1, Primitive::kPrimInt, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check5);
block->AddInstruction(array_set);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check4 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(constant_4, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check4, constant_1, Primitive::kPrimInt, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check4);
block->AddInstruction(array_set);
- block->AddInstruction(new (&allocator) HGoto());
+ block->AddInstruction(new (&allocator_) HGoto());
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ exit->AddInstruction(new (&allocator_) HExit());
+
+ RunBCE();
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
ASSERT_FALSE(IsRemoved(bounds_check6));
ASSERT_TRUE(IsRemoved(bounds_check5));
ASSERT_TRUE(IsRemoved(bounds_check4));
}
// for (int i=initial; i<array.length; i+=increment) { array[i] = 10; }
-static HGraph* BuildSSAGraph1(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- int increment,
- IfCondition cond = kCondGE) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph1(HGraph* graph,
+ ArenaAllocator* allocator,
+ int initial,
+ int increment,
+ IfCondition cond = kCondGE) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -414,14 +407,14 @@ static HGraph* BuildSSAGraph1(ArenaAllocator* allocator,
null_check = new (allocator) HNullCheck(parameter, 0);
array_length = new (allocator) HArrayLength(null_check);
- *bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -429,79 +422,58 @@ static HGraph* BuildSSAGraph1(ArenaAllocator* allocator,
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1a) {
// for (int i=0; i<array.length; i++) { array[i] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph1(&allocator, &bounds_check, 0, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1b) {
// for (int i=1; i<array.length; i++) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 1, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1c) {
// for (int i=-1; i<array.length; i++) { array[i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, -1, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_minus_1(graph);
- bounds_check_elimination_with_initial_minus_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, -1, 1);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) {
// for (int i=0; i<=array.length; i++) { array[i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 0, 1, kCondGT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_greater_than(graph);
- bounds_check_elimination_with_greater_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1, kCondGT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1e) {
// for (int i=0; i<array.length; i += 2) {
// array[i] = 10; // Can't eliminate due to overflow concern. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 0, 2);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_increment_2(graph);
- bounds_check_elimination_with_increment_2.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 2);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1f) {
// for (int i=1; i<array.length; i += 2) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 1, 2);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_increment_2_from_1(graph);
- bounds_check_elimination_with_increment_2_from_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 2);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
// for (int i=array.length; i>0; i+=increment) { array[i-1] = 10; }
-static HGraph* BuildSSAGraph2(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- int increment = -1,
- IfCondition cond = kCondLE) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph2(HGraph *graph,
+ ArenaAllocator* allocator,
+ int initial,
+ int increment = -1,
+ IfCondition cond = kCondLE) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -551,14 +523,14 @@ static HGraph* BuildSSAGraph2(ArenaAllocator* allocator,
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_minus_1);
null_check = new (allocator) HNullCheck(parameter, 0);
array_length = new (allocator) HArrayLength(null_check);
- *bounds_check = new (allocator) HBoundsCheck(add, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(add, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add_phi = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment);
loop_body->AddInstruction(add);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add_phi);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -566,70 +538,51 @@ static HGraph* BuildSSAGraph2(ArenaAllocator* allocator,
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2a) {
// for (int i=array.length; i>0; i--) { array[i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph2(&allocator, &bounds_check, 0);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2b) {
// for (int i=array.length; i>1; i--) { array[i-1] = 10; // Can eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2c) {
// for (int i=array.length; i>-1; i--) { array[i-1] = 10; // Can't eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, -1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_minus_1(graph);
- bounds_check_elimination_with_initial_minus_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, -1);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2d) {
// for (int i=array.length; i>=0; i--) { array[i-1] = 10; // Can't eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, 0, -1, kCondLT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_less_than(graph);
- bounds_check_elimination_with_less_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -1, kCondLT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2e) {
// for (int i=array.length; i>0; i-=2) { array[i-1] = 10; // Can eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, 0, -2);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_increment_minus_2(graph);
- bounds_check_elimination_increment_minus_2.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -2);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
// int[] array = new int[10];
// for (int i=0; i<10; i+=increment) { array[i] = 10; }
-static HGraph* BuildSSAGraph3(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- int increment,
- IfCondition cond) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph3(HGraph* graph,
+ ArenaAllocator* allocator,
+ int initial,
+ int increment,
+ IfCondition cond) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -679,13 +632,13 @@ static HGraph* BuildSSAGraph3(ArenaAllocator* allocator,
HNullCheck* null_check = new (allocator) HNullCheck(new_array, 0);
HArrayLength* array_length = new (allocator) HArrayLength(null_check);
- *bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -693,63 +646,46 @@ static HGraph* BuildSSAGraph3(ArenaAllocator* allocator,
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination3) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) {
// int[] array = new int[10];
// for (int i=0; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph3(&allocator, &bounds_check, 0, 1, kCondGE);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGE);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) {
// int[] array = new int[10];
// for (int i=1; i<10; i++) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph3(&allocator, &bounds_check, 1, 1, kCondGE);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 1, kCondGE);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) {
// int[] array = new int[10];
// for (int i=0; i<=10; i++) { array[i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph3(&allocator, &bounds_check, 0, 1, kCondGT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_greater_than(graph);
- bounds_check_elimination_with_greater_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3d) {
// int[] array = new int[10];
// for (int i=1; i<10; i+=8) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph3(&allocator, &bounds_check, 1, 8, kCondGE);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_increment_8(graph);
- bounds_check_elimination_increment_8.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 8, kCondGE);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
// for (int i=initial; i<array.length; i++) { array[array.length-i-1] = 10; }
-static HGraph* BuildSSAGraph4(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- IfCondition cond = kCondGE) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph4(HGraph* graph,
+ ArenaAllocator* allocator,
+ int initial,
+ IfCondition cond = kCondGE) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -800,15 +736,15 @@ static HGraph* BuildSSAGraph4(ArenaAllocator* allocator,
HInstruction* sub = new (allocator) HSub(Primitive::kPrimInt, array_length, phi);
HInstruction* add_minus_1 = new (allocator)
HAdd(Primitive::kPrimInt, sub, constant_minus_1);
- *bounds_check = new (allocator) HBoundsCheck(add_minus_1, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(add_minus_1, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_1);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
loop_body->AddInstruction(sub);
loop_body->AddInstruction(add_minus_1);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -816,39 +752,27 @@ static HGraph* BuildSSAGraph4(ArenaAllocator* allocator,
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination4) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4a) {
// for (int i=0; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph4(&allocator, &bounds_check, 0);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4b) {
// for (int i=1; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate. }
- graph = BuildSSAGraph4(&allocator, &bounds_check, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) {
// for (int i=0; i<=array.length; i++) { array[array.length-i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph4(&allocator, &bounds_check, 0, kCondGT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_greater_than(graph);
- bounds_check_elimination_with_greater_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0, kCondGT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -863,40 +787,34 @@ TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination4) {
// }
// }
// }
-TEST(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
- HInstruction* constant_0 = graph->GetIntConstant(0);
- HInstruction* constant_minus_1 = graph->GetIntConstant(-1);
- HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
+ HInstruction* constant_minus_1 = graph_->GetIntConstant(-1);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block);
+ HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HGoto());
-
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
- exit->AddInstruction(new (&allocator) HExit());
-
- HBasicBlock* outer_header = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(outer_header);
- HPhi* phi_i = new (&allocator) HPhi(&allocator, 0, 0, Primitive::kPrimInt);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HAdd* add = new (&allocator) HAdd(Primitive::kPrimInt, array_length, constant_minus_1);
- HInstruction* cmp = new (&allocator) HGreaterThanOrEqual(phi_i, add);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ block->AddInstruction(new (&allocator_) HGoto());
+
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
+ exit->AddInstruction(new (&allocator_) HExit());
+
+ HBasicBlock* outer_header = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(outer_header);
+ HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HAdd* add = new (&allocator_) HAdd(Primitive::kPrimInt, array_length, constant_minus_1);
+ HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
outer_header->AddPhi(phi_i);
outer_header->AddInstruction(null_check);
outer_header->AddInstruction(array_length);
@@ -905,15 +823,15 @@ TEST(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
outer_header->AddInstruction(if_inst);
phi_i->AddInput(constant_0);
- HBasicBlock* inner_header = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_header);
- HPhi* phi_j = new (&allocator) HPhi(&allocator, 0, 0, Primitive::kPrimInt);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HSub* sub = new (&allocator) HSub(Primitive::kPrimInt, array_length, phi_i);
- add = new (&allocator) HAdd(Primitive::kPrimInt, sub, constant_minus_1);
- cmp = new (&allocator) HGreaterThanOrEqual(phi_j, add);
- if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* inner_header = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_header);
+ HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HSub* sub = new (&allocator_) HSub(Primitive::kPrimInt, array_length, phi_i);
+ add = new (&allocator_) HAdd(Primitive::kPrimInt, sub, constant_minus_1);
+ cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add);
+ if_inst = new (&allocator_) HIf(cmp);
inner_header->AddPhi(phi_j);
inner_header->AddInstruction(null_check);
inner_header->AddInstruction(array_length);
@@ -923,25 +841,25 @@ TEST(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_header->AddInstruction(if_inst);
phi_j->AddInput(constant_0);
- HBasicBlock* inner_body_compare = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_body_compare);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check1 = new (&allocator) HBoundsCheck(phi_j, array_length, 0);
- HArrayGet* array_get_j = new (&allocator)
+ HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_body_compare);
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
+ HArrayGet* array_get_j = new (&allocator_)
HArrayGet(null_check, bounds_check1, Primitive::kPrimInt);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
inner_body_compare->AddInstruction(bounds_check1);
inner_body_compare->AddInstruction(array_get_j);
- HInstruction* j_plus_1 = new (&allocator) HAdd(Primitive::kPrimInt, phi_j, constant_1);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check2 = new (&allocator) HBoundsCheck(j_plus_1, array_length, 0);
- HArrayGet* array_get_j_plus_1 = new (&allocator)
+ HInstruction* j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
+ HArrayGet* array_get_j_plus_1 = new (&allocator_)
HArrayGet(null_check, bounds_check2, Primitive::kPrimInt);
- cmp = new (&allocator) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
- if_inst = new (&allocator) HIf(cmp);
+ cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
+ if_inst = new (&allocator_) HIf(cmp);
inner_body_compare->AddInstruction(j_plus_1);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
@@ -950,14 +868,14 @@ TEST(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_compare->AddInstruction(cmp);
inner_body_compare->AddInstruction(if_inst);
- HBasicBlock* inner_body_swap = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_body_swap);
- j_plus_1 = new (&allocator) HAdd(Primitive::kPrimInt, phi_j, constant_1);
+ HBasicBlock* inner_body_swap = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_body_swap);
+ j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
// temp = array[j+1]
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check3 = new (&allocator) HBoundsCheck(j_plus_1, array_length, 0);
- array_get_j_plus_1 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
+ array_get_j_plus_1 = new (&allocator_)
HArrayGet(null_check, bounds_check3, Primitive::kPrimInt);
inner_body_swap->AddInstruction(j_plus_1);
inner_body_swap->AddInstruction(null_check);
@@ -965,48 +883,48 @@ TEST(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_swap->AddInstruction(bounds_check3);
inner_body_swap->AddInstruction(array_get_j_plus_1);
// array[j+1] = array[j]
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check4 = new (&allocator) HBoundsCheck(phi_j, array_length, 0);
- array_get_j = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
+ array_get_j = new (&allocator_)
HArrayGet(null_check, bounds_check4, Primitive::kPrimInt);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check4);
inner_body_swap->AddInstruction(array_get_j);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check5 = new (&allocator) HBoundsCheck(j_plus_1, array_length, 0);
- HArraySet* array_set_j_plus_1 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
+ HArraySet* array_set_j_plus_1 = new (&allocator_)
HArraySet(null_check, bounds_check5, array_get_j, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check5);
inner_body_swap->AddInstruction(array_set_j_plus_1);
// array[j] = temp
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check6 = new (&allocator) HBoundsCheck(phi_j, array_length, 0);
- HArraySet* array_set_j = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
+ HArraySet* array_set_j = new (&allocator_)
HArraySet(null_check, bounds_check6, array_get_j_plus_1, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check6);
inner_body_swap->AddInstruction(array_set_j);
- inner_body_swap->AddInstruction(new (&allocator) HGoto());
+ inner_body_swap->AddInstruction(new (&allocator_) HGoto());
- HBasicBlock* inner_body_add = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_body_add);
- add = new (&allocator) HAdd(Primitive::kPrimInt, phi_j, constant_1);
+ HBasicBlock* inner_body_add = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_body_add);
+ add = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
inner_body_add->AddInstruction(add);
- inner_body_add->AddInstruction(new (&allocator) HGoto());
+ inner_body_add->AddInstruction(new (&allocator_) HGoto());
phi_j->AddInput(add);
- HBasicBlock* outer_body_add = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(outer_body_add);
- add = new (&allocator) HAdd(Primitive::kPrimInt, phi_i, constant_1);
+ HBasicBlock* outer_body_add = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(outer_body_add);
+ add = new (&allocator_) HAdd(Primitive::kPrimInt, phi_i, constant_1);
outer_body_add->AddInstruction(add);
- outer_body_add->AddInstruction(new (&allocator) HGoto());
+ outer_body_add->AddInstruction(new (&allocator_) HGoto());
phi_i->AddInput(add);
block->AddSuccessor(outer_header);
@@ -1020,19 +938,8 @@ TEST(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_add->AddSuccessor(inner_header);
outer_body_add->AddSuccessor(outer_header);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- // gvn should remove the same bounds check.
- ASSERT_FALSE(IsRemoved(bounds_check1));
- ASSERT_FALSE(IsRemoved(bounds_check2));
- ASSERT_TRUE(IsRemoved(bounds_check3));
- ASSERT_TRUE(IsRemoved(bounds_check4));
- ASSERT_TRUE(IsRemoved(bounds_check5));
- ASSERT_TRUE(IsRemoved(bounds_check6));
+ RunBCE(); // gvn removes same bounds check already
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
ASSERT_TRUE(IsRemoved(bounds_check1));
ASSERT_TRUE(IsRemoved(bounds_check2));
ASSERT_TRUE(IsRemoved(bounds_check3));
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 7a3aa58149..0a3f083e10 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -398,8 +398,8 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item)
// Find predecessors which are not covered by the same TryItem range. Such
// edges enter the try block and will have a TryBoundary inserted.
- for (size_t i = 0; i < try_block->GetPredecessors().Size(); ++i) {
- HBasicBlock* predecessor = try_block->GetPredecessors().Get(i);
+ for (size_t i = 0; i < try_block->GetPredecessors().size(); ++i) {
+ HBasicBlock* predecessor = try_block->GetPredecessor(i);
if (predecessor->IsSingleTryBoundary()) {
// The edge was already split because of an exit from a neighbouring
// TryItem. We split it again and insert an entry point.
@@ -426,8 +426,7 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item)
// Find successors which are not covered by the same TryItem range. Such
// edges exit the try block and will have a TryBoundary inserted.
- for (size_t i = 0; i < try_block->GetSuccessors().Size(); ++i) {
- HBasicBlock* successor = try_block->GetSuccessors().Get(i);
+ for (HBasicBlock* successor : try_block->GetSuccessors()) {
if (successor->IsCatchBlock()) {
// A catch block is always considered an entry point into its TryItem.
// We therefore assume this is an exit point, regardless of whether
@@ -479,6 +478,8 @@ bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
+ graph_->SetHasTryCatch(code_item.tries_size_ != 0);
+
InitializeLocals(code_item.registers_size_);
graph_->SetMaximumNumberOfOutVRegs(code_item.outs_size_);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 1097adbaf3..3bbff6ae17 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -171,7 +171,7 @@ HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
while (block->IsSingleJump()) {
- block = block->GetSuccessors().Get(0);
+ block = block->GetSuccessor(0);
}
return block;
}
@@ -248,6 +248,12 @@ void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline)
GenerateSlowPaths();
+ // Emit catch stack maps at the end of the stack map stream as expected by the
+ // runtime exception handler.
+ if (!is_baseline && graph_->HasTryCatch()) {
+ RecordCatchBlockInfo();
+ }
+
// Finalize instructions in assember;
Finalize(allocator);
}
@@ -805,6 +811,73 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
stack_map_stream_.EndStackMapEntry();
}
+void CodeGenerator::RecordCatchBlockInfo() {
+ ArenaAllocator* arena = graph_->GetArena();
+
+ for (size_t i = 0, e = block_order_->Size(); i < e; ++i) {
+ HBasicBlock* block = block_order_->Get(i);
+ if (!block->IsCatchBlock()) {
+ continue;
+ }
+
+ uint32_t dex_pc = block->GetDexPc();
+ uint32_t num_vregs = graph_->GetNumberOfVRegs();
+ uint32_t inlining_depth = 0; // Inlining of catch blocks is not supported at the moment.
+ uint32_t native_pc = GetAddressOf(block);
+ uint32_t register_mask = 0; // Not used.
+
+ // The stack mask is not used, so we leave it empty.
+ ArenaBitVector* stack_mask = new (arena) ArenaBitVector(arena, 0, /* expandable */ true);
+
+ stack_map_stream_.BeginStackMapEntry(dex_pc,
+ native_pc,
+ register_mask,
+ stack_mask,
+ num_vregs,
+ inlining_depth);
+
+ HInstruction* current_phi = block->GetFirstPhi();
+ for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
+ while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
+ HInstruction* next_phi = current_phi->GetNext();
+ DCHECK(next_phi == nullptr ||
+ current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
+ << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
+ current_phi = next_phi;
+ }
+
+ if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ } else {
+ Location location = current_phi->GetLiveInterval()->ToLocation();
+ switch (location.GetKind()) {
+ case Location::kStackSlot: {
+ stack_map_stream_.AddDexRegisterEntry(
+ DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ break;
+ }
+ case Location::kDoubleStackSlot: {
+ stack_map_stream_.AddDexRegisterEntry(
+ DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ stack_map_stream_.AddDexRegisterEntry(
+ DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
+ ++vreg;
+ DCHECK_LT(vreg, num_vregs);
+ break;
+ }
+ default: {
+ // All catch phis must be allocated to a stack slot.
+ LOG(FATAL) << "Unexpected kind " << location.GetKind();
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
+ stack_map_stream_.EndStackMapEntry();
+ }
+}
+
void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
if (environment == nullptr) return;
@@ -975,6 +1048,13 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
}
+bool CodeGenerator::IsImplicitNullCheckAllowed(HNullCheck* null_check) const {
+ return compiler_options_.GetImplicitNullChecks() &&
+ // Null checks which might throw into a catch block need to save live
+ // registers and therefore cannot be done implicitly.
+ !null_check->CanThrowIntoCatchBlock();
+}
+
bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
@@ -990,10 +1070,6 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
return;
}
- if (!compiler_options_.GetImplicitNullChecks()) {
- return;
- }
-
if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
return;
}
@@ -1005,9 +1081,11 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
// and needs to record the pc.
if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
HNullCheck* null_check = first_prev_not_move->AsNullCheck();
- // TODO: The parallel moves modify the environment. Their changes need to be reverted
- // otherwise the stack maps at the throw point will not be correct.
- RecordPcInfo(null_check, null_check->GetDexPc());
+ if (IsImplicitNullCheckAllowed(null_check)) {
+ // TODO: The parallel moves modify the environment. Their changes need to be
+ // reverted otherwise the stack maps at the throw point will not be correct.
+ RecordPcInfo(null_check, null_check->GetDexPc());
+ }
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b3c4d727e0..a93d07ad50 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -237,6 +237,17 @@ class CodeGenerator {
bool CanMoveNullCheckToUser(HNullCheck* null_check);
void MaybeRecordImplicitNullCheck(HInstruction* instruction);
+ // Records a stack map which the runtime might use to set catch phi values
+ // during exception delivery.
+ // TODO: Replace with a catch-entering instruction that records the environment.
+ void RecordCatchBlockInfo();
+
+ // Returns true if implicit null checks are allowed in the compiler options
+ // and if the null check is not inside a try block. We currently cannot do
+ // implicit null checks in that case because we need the NullCheckSlowPath to
+ // save live registers, which may be needed by the runtime to set catch phis.
+ bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
+
void AddSlowPath(SlowPathCode* slow_path) {
slow_paths_.Add(slow_path);
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 438ef694bf..b3e38f0946 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -48,7 +48,7 @@ static constexpr Register kMethodRegisterArgument = R0;
// with baseline.
static constexpr Register kCoreSavedRegisterForBaseline = R5;
static constexpr Register kCoreCalleeSaves[] =
- { R5, R6, R7, R8, R10, R11, PC };
+ { R5, R6, R7, R8, R10, R11, LR };
static constexpr SRegister kFpuCalleeSaves[] =
{ S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
@@ -66,6 +66,10 @@ class NullCheckSlowPathARM : public SlowPathCodeARM {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
}
@@ -86,6 +90,10 @@ class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
}
@@ -150,6 +158,10 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -409,8 +421,8 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
relative_call_patches_(graph->GetArena()->Adapter()) {
- // Save the PC register to mimic Quick.
- AddAllocatedRegister(Location::RegisterLocation(PC));
+ // Always save the LR register to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(LR));
}
void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
@@ -599,12 +611,9 @@ void CodeGeneratorARM::GenerateFrameEntry() {
RecordPcInfo(nullptr, 0);
}
- // PC is in the list of callee-save to mimic Quick, but we need to push
- // LR at entry instead.
- uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
- __ PushList(push_mask);
- __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
- __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
+ __ PushList(core_spill_mask_);
+ __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
+ __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, core_spill_mask_, kArmWordSize);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
@@ -632,7 +641,10 @@ void CodeGeneratorARM::GenerateFrameExit() {
__ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
__ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
- __ PopList(core_spill_mask_);
+ // Pop LR into PC to return.
+ DCHECK_NE(core_spill_mask_ & (1 << LR), 0U);
+ uint32_t pop_mask = (core_spill_mask_ & (~(1 << LR))) | 1 << PC;
+ __ PopList(pop_mask);
__ cfi().RestoreState();
__ cfi().DefCFAOffset(GetFrameSize());
}
@@ -2741,8 +2753,10 @@ void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
}
void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3495,8 +3509,10 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
}
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3524,7 +3540,7 @@ void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruct
}
void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
@@ -3863,8 +3879,10 @@ void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
}
void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
if (instruction->HasUses()) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6b1457bc31..5094f6761a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -198,6 +198,10 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -226,6 +230,10 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
@@ -338,6 +346,10 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
@@ -1580,8 +1592,10 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
if (instruction->HasUses()) {
@@ -1977,8 +1991,10 @@ void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
}
void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2875,8 +2891,10 @@ void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2905,7 +2923,7 @@ void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instru
}
void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 10942ef3a5..8d60026b41 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -118,6 +118,10 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -151,6 +155,10 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
instruction_,
instruction_->GetDexPc(),
@@ -269,6 +277,10 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
instruction_,
instruction_->GetDexPc(),
@@ -1566,8 +1578,10 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
if (instruction->HasUses()) {
@@ -1862,8 +1876,10 @@ void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
}
void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2824,8 +2840,10 @@ void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2852,7 +2870,7 @@ void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instr
}
void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a5ad226e0b..9713d6a9c9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -56,6 +56,10 @@ class NullCheckSlowPathX86 : public SlowPathCodeX86 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
instruction_,
instruction_->GetDexPc(),
@@ -78,6 +82,10 @@ class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
instruction_,
instruction_->GetDexPc(),
@@ -125,6 +133,10 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
InvokeRuntimeCallingConvention calling_convention;
x86_codegen->EmitParallelMoves(
locations->InAt(0),
@@ -1300,7 +1312,7 @@ void InstructionCodeGeneratorX86::VisitCondition(HCondition* cond) {
}
// Convert the jumps into the result.
- Label done_label;
+ NearLabel done_label;
// False case: result = 0.
__ Bind(&false_label);
@@ -1956,7 +1968,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
XmmRegister input = in.AsFpuRegister<XmmRegister>();
Register output = out.AsRegister<Register>();
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// temp = int-to-float(output)
@@ -1981,7 +1993,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
XmmRegister input = in.AsFpuRegister<XmmRegister>();
Register output = out.AsRegister<Register>();
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// temp = int-to-double(output)
@@ -2640,7 +2652,7 @@ void InstructionCodeGeneratorX86::GenerateRemFP(HRem *rem) {
PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
// Loop doing FPREM until we stabilize.
- Label retry;
+ NearLabel retry;
__ Bind(&retry);
__ fprem();
@@ -2754,8 +2766,8 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation
int shift;
CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
- Label ndiv;
- Label end;
+ NearLabel ndiv;
+ NearLabel end;
// If numerator is 0, the result is 0, no computation needed.
__ testl(eax, eax);
__ j(kNotEqual, &ndiv);
@@ -3039,8 +3051,10 @@ void InstructionCodeGeneratorX86::VisitRem(HRem* rem) {
}
void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
switch (instruction->GetType()) {
case Primitive::kPrimByte:
case Primitive::kPrimChar:
@@ -3229,7 +3243,7 @@ void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, int shift
}
void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, Register shifter) {
- Label done;
+ NearLabel done;
__ shld(loc.AsRegisterPairHigh<Register>(), loc.AsRegisterPairLow<Register>(), shifter);
__ shll(loc.AsRegisterPairLow<Register>(), shifter);
__ testl(shifter, Immediate(32));
@@ -3261,7 +3275,7 @@ void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, int shift
}
void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, Register shifter) {
- Label done;
+ NearLabel done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
__ sarl(loc.AsRegisterPairHigh<Register>(), shifter);
__ testl(shifter, Immediate(32));
@@ -3296,7 +3310,7 @@ void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, int shif
}
void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, Register shifter) {
- Label done;
+ NearLabel done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
__ shrl(loc.AsRegisterPairHigh<Register>(), shifter);
__ testl(shifter, Immediate(32));
@@ -3471,7 +3485,7 @@ void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) {
Location left = locations->InAt(0);
Location right = locations->InAt(1);
- Label less, greater, done;
+ NearLabel less, greater, done;
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
Register left_low = left.AsRegisterPairLow<Register>();
@@ -3695,7 +3709,7 @@ void CodeGeneratorX86::MarkGCCard(Register temp,
Register object,
Register value,
bool value_can_be_null) {
- Label is_null;
+ NearLabel is_null;
if (value_can_be_null) {
__ testl(value, value);
__ j(kEqual, &is_null);
@@ -3984,9 +3998,11 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr
}
void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ Location loc = codegen_->IsImplicitNullCheckAllowed(instruction)
? Location::RequiresRegister()
: Location::Any();
locations->SetInAt(0, loc);
@@ -4019,7 +4035,7 @@ void InstructionCodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruct
__ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0));
} else {
DCHECK(obj.IsConstant()) << obj;
- DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ DCHECK(obj.GetConstant()->IsNullConstant());
__ jmp(slow_path->GetEntryLabel());
return;
}
@@ -4027,7 +4043,7 @@ void InstructionCodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruct
}
void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
@@ -4432,8 +4448,10 @@ void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
}
void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (instruction->HasUses()) {
@@ -4928,7 +4946,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
Location cls = locations->InAt(1);
Register out = locations->Out().AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Label done, zero;
+ NearLabel done, zero;
SlowPathCodeX86* slow_path = nullptr;
// Return 0 if `obj` is null.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0f3eb74c64..43a3e52a7f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -57,6 +57,10 @@ class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
instruction_,
instruction_->GetDexPc(),
@@ -79,6 +83,10 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
instruction_,
instruction_->GetDexPc(),
@@ -177,6 +185,10 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -1312,7 +1324,7 @@ void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* cond) {
}
// Convert the jumps into the result.
- Label done_label;
+ NearLabel done_label;
// False case: result = 0.
__ Bind(&false_label);
@@ -1401,7 +1413,7 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) {
Location left = locations->InAt(0);
Location right = locations->InAt(1);
- Label less, greater, done;
+ NearLabel less, greater, done;
Primitive::Type type = compare->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong: {
@@ -2111,7 +2123,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
// Processing a Dex `float-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// if input >= (float)INT_MAX goto done
@@ -2133,7 +2145,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
// Processing a Dex `double-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// if input >= (double)INT_MAX goto done
@@ -2175,7 +2187,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
// Processing a Dex `float-to-long' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
codegen_->Load64BitValue(output, kPrimLongMax);
// if input >= (float)LONG_MAX goto done
@@ -2197,7 +2209,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
// Processing a Dex `double-to-long' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
codegen_->Load64BitValue(output, kPrimLongMax);
// if input >= (double)LONG_MAX goto done
@@ -2760,7 +2772,7 @@ void InstructionCodeGeneratorX86_64::GenerateRemFP(HRem *rem) {
PushOntoFPStack(first, 0, 2 * elem_size, is_float);
// Loop doing FPREM until we stabilize.
- Label retry;
+ NearLabel retry;
__ Bind(&retry);
__ fprem();
@@ -2914,8 +2926,8 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat
__ movl(numerator, eax);
- Label no_div;
- Label end;
+ NearLabel no_div;
+ NearLabel end;
__ testl(eax, eax);
__ j(kNotEqual, &no_div);
@@ -3194,8 +3206,10 @@ void InstructionCodeGeneratorX86_64::VisitRem(HRem* rem) {
}
void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::Any());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3748,9 +3762,11 @@ void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instru
}
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ Location loc = codegen_->IsImplicitNullCheckAllowed(instruction)
? Location::RequiresRegister()
: Location::Any();
locations->SetInAt(0, loc);
@@ -3783,7 +3799,7 @@ void InstructionCodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instr
__ cmpl(Address(CpuRegister(RSP), obj.GetStackIndex()), Immediate(0));
} else {
DCHECK(obj.IsConstant()) << obj;
- DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ DCHECK(obj.GetConstant()->IsNullConstant());
__ jmp(slow_path->GetEntryLabel());
return;
}
@@ -3791,7 +3807,7 @@ void InstructionCodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instr
}
void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
@@ -4175,8 +4191,10 @@ void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction)
}
void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (instruction->HasUses()) {
@@ -4229,7 +4247,7 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
CpuRegister object,
CpuRegister value,
bool value_can_be_null) {
- Label is_null;
+ NearLabel is_null;
if (value_can_be_null) {
__ testl(value, value);
__ j(kEqual, &is_null);
@@ -4656,7 +4674,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
Location cls = locations->InAt(1);
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Label done, zero;
+ NearLabel done, zero;
SlowPathCodeX86_64* slow_path = nullptr;
// Return 0 if `obj` is null.
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 4fbb51d43c..72c67f5651 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -561,7 +561,7 @@ TEST(CodegenTest, NonMaterializedCondition) {
ASSERT_FALSE(equal->NeedsMaterialization());
auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessor(0);
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -667,7 +667,7 @@ TEST(CodegenTest, MaterializedCondition1) {
code_block->AddInstruction(&ret);
auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessor(0);
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -733,7 +733,7 @@ TEST(CodegenTest, MaterializedCondition2) {
if_false_block->AddInstruction(&ret_ge);
auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessor(0);
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 50cbf5ca77..509478cfad 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -42,8 +42,8 @@ static void MarkReachableBlocks(HBasicBlock* block, ArenaBitVector* visited) {
MarkReachableBlocks(if_instruction->IfFalseSuccessor(), visited);
}
} else {
- for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
- MarkReachableBlocks(block->GetSuccessors().Get(i), visited);
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ MarkReachableBlocks(successor, visited);
}
}
}
@@ -99,12 +99,12 @@ void HDeadCodeElimination::RemoveDeadBlocks() {
// Connect successive blocks created by dead branches. Order does not matter.
for (HReversePostOrderIterator it(*graph_); !it.Done();) {
HBasicBlock* block = it.Current();
- if (block->IsEntryBlock() || block->GetSuccessors().Size() != 1u) {
+ if (block->IsEntryBlock() || block->GetSuccessors().size() != 1u) {
it.Advance();
continue;
}
- HBasicBlock* successor = block->GetSuccessors().Get(0);
- if (successor->IsExitBlock() || successor->GetPredecessors().Size() != 1u) {
+ HBasicBlock* successor = block->GetSuccessor(0);
+ if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
it.Advance();
continue;
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 847d5a4e9e..074ed71025 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -29,19 +29,16 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
current_block_ = block;
// Check consistency with respect to predecessors of `block`.
- const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
std::map<HBasicBlock*, size_t> predecessors_count;
- for (size_t i = 0, e = predecessors.Size(); i < e; ++i) {
- HBasicBlock* p = predecessors.Get(i);
+ for (HBasicBlock* p : block->GetPredecessors()) {
++predecessors_count[p];
}
for (auto& pc : predecessors_count) {
HBasicBlock* p = pc.first;
size_t p_count_in_block_predecessors = pc.second;
- const GrowableArray<HBasicBlock*>& p_successors = p->GetSuccessors();
size_t block_count_in_p_successors = 0;
- for (size_t j = 0, f = p_successors.Size(); j < f; ++j) {
- if (p_successors.Get(j) == block) {
+ for (HBasicBlock* p_successor : p->GetSuccessors()) {
+ if (p_successor == block) {
++block_count_in_p_successors;
}
}
@@ -55,19 +52,16 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
}
// Check consistency with respect to successors of `block`.
- const GrowableArray<HBasicBlock*>& successors = block->GetSuccessors();
std::map<HBasicBlock*, size_t> successors_count;
- for (size_t i = 0, e = successors.Size(); i < e; ++i) {
- HBasicBlock* s = successors.Get(i);
+ for (HBasicBlock* s : block->GetSuccessors()) {
++successors_count[s];
}
for (auto& sc : successors_count) {
HBasicBlock* s = sc.first;
size_t s_count_in_block_successors = sc.second;
- const GrowableArray<HBasicBlock*>& s_predecessors = s->GetPredecessors();
size_t block_count_in_s_predecessors = 0;
- for (size_t j = 0, f = s_predecessors.Size(); j < f; ++j) {
- if (s_predecessors.Get(j) == block) {
+ for (HBasicBlock* s_predecessor : s->GetPredecessors()) {
+ if (s_predecessor == block) {
++block_count_in_s_predecessors;
}
}
@@ -92,8 +86,7 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
// Ensure that only Return(Void) and Throw jump to Exit. An exiting
// TryBoundary may be between a Throw and the Exit if the Throw is in a try.
if (block->IsExitBlock()) {
- for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
- HBasicBlock* predecessor = block->GetPredecessors().Get(i);
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
if (predecessor->IsSingleTryBoundary()
&& !predecessor->GetLastInstruction()->AsTryBoundary()->IsEntry()) {
HBasicBlock* real_predecessor = predecessor->GetSinglePredecessor();
@@ -178,8 +171,7 @@ void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
try_boundary->GetId(),
handler->GetBlockId()));
}
- if (current_block_->GetSuccessors().Contains(
- handler, /* start_from */ it.CurrentSuccessorIndex() + 1)) {
+ if (current_block_->HasSuccessor(handler, it.CurrentSuccessorIndex() + 1)) {
AddError(StringPrintf("Exception handler block %d of %s:%d is listed multiple times.",
handler->GetBlockId(),
try_boundary->DebugName(),
@@ -359,15 +351,15 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// never exceptional successors.
const size_t num_normal_successors = block->NumberOfNormalSuccessors();
for (size_t j = 0; j < num_normal_successors; ++j) {
- HBasicBlock* successor = block->GetSuccessors().Get(j);
+ HBasicBlock* successor = block->GetSuccessor(j);
if (successor->IsCatchBlock()) {
AddError(StringPrintf("Catch block %d is a normal successor of block %d.",
successor->GetBlockId(),
block->GetBlockId()));
}
}
- for (size_t j = num_normal_successors, e = block->GetSuccessors().Size(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors().Get(j);
+ for (size_t j = num_normal_successors, e = block->GetSuccessors().size(); j < e; ++j) {
+ HBasicBlock* successor = block->GetSuccessor(j);
if (!successor->IsCatchBlock()) {
AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.",
successor->GetBlockId(),
@@ -381,8 +373,8 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// not accounted for.
if (block->NumberOfNormalSuccessors() > 1) {
for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors().Get(j);
- if (successor->GetPredecessors().Size() > 1) {
+ HBasicBlock* successor = block->GetSuccessor(j);
+ if (successor->GetPredecessors().size() > 1) {
AddError(StringPrintf("Critical edge between blocks %d and %d.",
block->GetBlockId(),
successor->GetBlockId()));
@@ -390,17 +382,6 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
}
}
- // Check Phi uniqueness (no two Phis with the same type refer to the same register).
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- if (phi->GetNextEquivalentPhiWithSameType() != nullptr) {
- std::stringstream type_str;
- type_str << phi->GetType();
- AddError(StringPrintf("Equivalent phi (%d) found for VReg %d with type: %s",
- phi->GetId(), phi->GetRegNumber(), type_str.str().c_str()));
- }
- }
-
// Ensure try membership information is consistent.
if (block->IsCatchBlock()) {
if (block->IsTryBlock()) {
@@ -417,8 +398,7 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
block->GetBlockId()));
}
} else {
- for (size_t i = 0; i < block->GetPredecessors().Size(); ++i) {
- HBasicBlock* predecessor = block->GetPredecessors().Get(i);
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
const HTryBoundary* incoming_try_entry = predecessor->ComputeTryEntryOfSuccessors();
if (block->IsTryBlock()) {
const HTryBoundary& stored_try_entry = block->GetTryCatchInformation()->GetTryEntry();
@@ -469,21 +449,21 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
// Ensure the loop header has only one incoming branch and the remaining
// predecessors are back edges.
- size_t num_preds = loop_header->GetPredecessors().Size();
+ size_t num_preds = loop_header->GetPredecessors().size();
if (num_preds < 2) {
AddError(StringPrintf(
"Loop header %d has less than two predecessors: %zu.",
id,
num_preds));
} else {
- HBasicBlock* first_predecessor = loop_header->GetPredecessors().Get(0);
+ HBasicBlock* first_predecessor = loop_header->GetPredecessor(0);
if (loop_information->IsBackEdge(*first_predecessor)) {
AddError(StringPrintf(
"First predecessor of loop header %d is a back edge.",
id));
}
- for (size_t i = 1, e = loop_header->GetPredecessors().Size(); i < e; ++i) {
- HBasicBlock* predecessor = loop_header->GetPredecessors().Get(i);
+ for (size_t i = 1, e = loop_header->GetPredecessors().size(); i < e; ++i) {
+ HBasicBlock* predecessor = loop_header->GetPredecessor(i);
if (!loop_information->IsBackEdge(*predecessor)) {
AddError(StringPrintf(
"Loop header %d has multiple incoming (non back edge) blocks.",
@@ -586,6 +566,35 @@ static Primitive::Type PrimitiveKind(Primitive::Type type) {
}
}
+static bool IsSameSizeConstant(HInstruction* insn1, HInstruction* insn2) {
+ return insn1->IsConstant()
+ && insn2->IsConstant()
+ && Primitive::Is64BitType(insn1->GetType()) == Primitive::Is64BitType(insn2->GetType());
+}
+
+static bool IsConstantEquivalent(HInstruction* insn1, HInstruction* insn2, BitVector* visited) {
+ if (insn1->IsPhi() &&
+ insn1->AsPhi()->IsVRegEquivalentOf(insn2) &&
+ insn1->InputCount() == insn2->InputCount()) {
+ // Testing only one of the two inputs for recursion is sufficient.
+ if (visited->IsBitSet(insn1->GetId())) {
+ return true;
+ }
+ visited->SetBit(insn1->GetId());
+
+ for (size_t i = 0, e = insn1->InputCount(); i < e; ++i) {
+ if (!IsConstantEquivalent(insn1->InputAt(i), insn2->InputAt(i), visited)) {
+ return false;
+ }
+ }
+ return true;
+ } else if (IsSameSizeConstant(insn1, insn2)) {
+ return insn1->AsConstant()->GetValueAsUint64() == insn2->AsConstant()->GetValueAsUint64();
+ } else {
+ return false;
+ }
+}
+
void SSAChecker::VisitPhi(HPhi* phi) {
VisitInstruction(phi);
@@ -621,20 +630,19 @@ void SSAChecker::VisitPhi(HPhi* phi) {
} else {
// Ensure the number of inputs of a non-catch phi is the same as the number
// of its predecessors.
- const GrowableArray<HBasicBlock*>& predecessors =
- phi->GetBlock()->GetPredecessors();
- if (phi->InputCount() != predecessors.Size()) {
+ const ArenaVector<HBasicBlock*>& predecessors = phi->GetBlock()->GetPredecessors();
+ if (phi->InputCount() != predecessors.size()) {
AddError(StringPrintf(
"Phi %d in block %d has %zu inputs, "
"but block %d has %zu predecessors.",
phi->GetId(), phi->GetBlock()->GetBlockId(), phi->InputCount(),
- phi->GetBlock()->GetBlockId(), predecessors.Size()));
+ phi->GetBlock()->GetBlockId(), predecessors.size()));
} else {
// Ensure phi input at index I either comes from the Ith
// predecessor or from a block that dominates this predecessor.
for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
HInstruction* input = phi->InputAt(i);
- HBasicBlock* predecessor = predecessors.Get(i);
+ HBasicBlock* predecessor = predecessors[i];
if (!(input->GetBlock() == predecessor
|| input->GetBlock()->Dominates(predecessor))) {
AddError(StringPrintf(
@@ -646,6 +654,45 @@ void SSAChecker::VisitPhi(HPhi* phi) {
}
}
}
+
+ // Ensure that catch phis are sorted by their vreg number, as required by
+ // the register allocator and code generator. This does not apply to normal
+ // phis which can be constructed artifically.
+ if (phi->IsCatchPhi()) {
+ HInstruction* next_phi = phi->GetNext();
+ if (next_phi != nullptr && phi->GetRegNumber() > next_phi->AsPhi()->GetRegNumber()) {
+ AddError(StringPrintf("Catch phis %d and %d in block %d are not sorted by their "
+ "vreg numbers.",
+ phi->GetId(),
+ next_phi->GetId(),
+ phi->GetBlock()->GetBlockId()));
+ }
+ }
+
+ // Test phi equivalents. There should not be two of the same type and they
+ // should only be created for constants which were untyped in DEX.
+ for (HInstructionIterator phi_it(phi->GetBlock()->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* other_phi = phi_it.Current()->AsPhi();
+ if (phi != other_phi && phi->GetRegNumber() == other_phi->GetRegNumber()) {
+ if (phi->GetType() == other_phi->GetType()) {
+ std::stringstream type_str;
+ type_str << phi->GetType();
+ AddError(StringPrintf("Equivalent phi (%d) found for VReg %d with type: %s.",
+ phi->GetId(),
+ phi->GetRegNumber(),
+ type_str.str().c_str()));
+ } else {
+ ArenaBitVector visited(GetGraph()->GetArena(), 0, /* expandable */ true);
+ if (!IsConstantEquivalent(phi, other_phi, &visited)) {
+ AddError(StringPrintf("Two phis (%d and %d) found for VReg %d but they "
+ "are not equivalents of constants.",
+ phi->GetId(),
+ other_phi->GetId(),
+ phi->GetRegNumber()));
+ }
+ }
+ }
+ }
}
void SSAChecker::HandleBooleanInput(HInstruction* instruction, size_t input_index) {
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index 59d50926ad..7968e88117 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -99,7 +99,7 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock1) {
ASSERT_NE(false_block, return_block);
// Ensure the new block branches to the join block.
- ASSERT_EQ(false_block->GetSuccessors().Get(0), return_block);
+ ASSERT_EQ(false_block->GetSuccessor(0), return_block);
}
// Test that the successors of an if block stay consistent after a SimplifyCFG.
@@ -134,7 +134,7 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock2) {
ASSERT_NE(true_block, return_block);
// Ensure the new block branches to the join block.
- ASSERT_EQ(true_block->GetSuccessors().Get(0), return_block);
+ ASSERT_EQ(true_block->GetSuccessor(0), return_block);
}
// Test that the successors of an if block stay consistent after a SimplifyCFG.
@@ -163,12 +163,12 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges1) {
ASSERT_EQ(if_block->GetLastInstruction()->AsIf()->IfFalseSuccessor(), return_block);
// Ensure there is only one back edge.
- ASSERT_EQ(if_block->GetPredecessors().Size(), 2u);
- ASSERT_EQ(if_block->GetPredecessors().Get(0), entry_block);
- ASSERT_NE(if_block->GetPredecessors().Get(1), if_block);
+ ASSERT_EQ(if_block->GetPredecessors().size(), 2u);
+ ASSERT_EQ(if_block->GetPredecessor(0), entry_block);
+ ASSERT_NE(if_block->GetPredecessor(1), if_block);
// Ensure the new block is the back edge.
- ASSERT_EQ(if_block->GetPredecessors().Get(1),
+ ASSERT_EQ(if_block->GetPredecessor(1),
if_block->GetLastInstruction()->AsIf()->IfTrueSuccessor());
}
@@ -198,12 +198,12 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges2) {
ASSERT_EQ(if_block->GetLastInstruction()->AsIf()->IfTrueSuccessor(), return_block);
// Ensure there is only one back edge.
- ASSERT_EQ(if_block->GetPredecessors().Size(), 2u);
- ASSERT_EQ(if_block->GetPredecessors().Get(0), entry_block);
- ASSERT_NE(if_block->GetPredecessors().Get(1), if_block);
+ ASSERT_EQ(if_block->GetPredecessors().size(), 2u);
+ ASSERT_EQ(if_block->GetPredecessor(0), entry_block);
+ ASSERT_NE(if_block->GetPredecessor(1), if_block);
// Ensure the new block is the back edge.
- ASSERT_EQ(if_block->GetPredecessors().Get(1),
+ ASSERT_EQ(if_block->GetPredecessor(1),
if_block->GetLastInstruction()->AsIf()->IfFalseSuccessor());
}
@@ -238,11 +238,11 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders1) {
ASSERT_EQ(if_instr->IfFalseSuccessor(), return_block);
// Ensure there is only one pre header..
- ASSERT_EQ(loop_block->GetPredecessors().Size(), 2u);
+ ASSERT_EQ(loop_block->GetPredecessors().size(), 2u);
// Ensure the new block is the successor of the true block.
- ASSERT_EQ(if_instr->IfTrueSuccessor()->GetSuccessors().Size(), 1u);
- ASSERT_EQ(if_instr->IfTrueSuccessor()->GetSuccessors().Get(0),
+ ASSERT_EQ(if_instr->IfTrueSuccessor()->GetSuccessors().size(), 1u);
+ ASSERT_EQ(if_instr->IfTrueSuccessor()->GetSuccessor(0),
loop_block->GetLoopInformation()->GetPreHeader());
}
@@ -276,11 +276,11 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders2) {
ASSERT_EQ(if_instr->IfTrueSuccessor(), return_block);
// Ensure there is only one pre header..
- ASSERT_EQ(loop_block->GetPredecessors().Size(), 2u);
+ ASSERT_EQ(loop_block->GetPredecessors().size(), 2u);
// Ensure the new block is the successor of the false block.
- ASSERT_EQ(if_instr->IfFalseSuccessor()->GetSuccessors().Size(), 1u);
- ASSERT_EQ(if_instr->IfFalseSuccessor()->GetSuccessors().Get(0),
+ ASSERT_EQ(if_instr->IfFalseSuccessor()->GetSuccessors().size(), 1u);
+ ASSERT_EQ(if_instr->IfFalseSuccessor()->GetSuccessor(0),
loop_block->GetLoopInformation()->GetPreHeader());
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 069a7a460b..5b8e386fae 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -240,8 +240,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintPredecessors(HBasicBlock* block) {
AddIndent();
output_ << "predecessors";
- for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
- HBasicBlock* predecessor = block->GetPredecessors().Get(i);
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
output_ << " \"B" << predecessor->GetBlockId() << "\" ";
}
if (block->IsEntryBlock() && (disasm_info_ != nullptr)) {
@@ -254,7 +253,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
AddIndent();
output_ << "successors";
for (size_t i = 0; i < block->NumberOfNormalSuccessors(); ++i) {
- HBasicBlock* successor = block->GetSuccessors().Get(i);
+ HBasicBlock* successor = block->GetSuccessor(i);
output_ << " \"B" << successor->GetBlockId() << "\" ";
}
output_<< std::endl;
@@ -263,8 +262,8 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintExceptionHandlers(HBasicBlock* block) {
AddIndent();
output_ << "xhandlers";
- for (size_t i = block->NumberOfNormalSuccessors(); i < block->GetSuccessors().Size(); ++i) {
- HBasicBlock* handler = block->GetSuccessors().Get(i);
+ for (size_t i = block->NumberOfNormalSuccessors(); i < block->GetSuccessors().size(); ++i) {
+ HBasicBlock* handler = block->GetSuccessor(i);
output_ << " \"B" << handler->GetBlockId() << "\" ";
}
if (block->IsExitBlock() &&
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 833dfb00a1..5bb4e8e099 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -340,8 +340,8 @@ void GlobalValueNumberer::Run() {
void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
ValueSet* set = nullptr;
- const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
- if (predecessors.Size() == 0 || predecessors.Get(0)->IsEntryBlock()) {
+ const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
+ if (predecessors.size() == 0 || predecessors[0]->IsEntryBlock()) {
// The entry block should only accumulate constant instructions, and
// the builder puts constants only in the entry block.
// Therefore, there is no need to propagate the value set to the next block.
@@ -349,8 +349,8 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
} else {
HBasicBlock* dominator = block->GetDominator();
ValueSet* dominator_set = sets_.Get(dominator->GetBlockId());
- if (dominator->GetSuccessors().Size() == 1) {
- DCHECK_EQ(dominator->GetSuccessors().Get(0), block);
+ if (dominator->GetSuccessors().size() == 1) {
+ DCHECK_EQ(dominator->GetSuccessor(0), block);
set = dominator_set;
} else {
// We have to copy if the dominator has other successors, or `block` is not a successor
@@ -361,9 +361,9 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
if (block->IsLoopHeader()) {
DCHECK_EQ(block->GetDominator(), block->GetLoopInformation()->GetPreHeader());
set->Kill(side_effects_.GetLoopEffects(block));
- } else if (predecessors.Size() > 1) {
- for (size_t i = 0, e = predecessors.Size(); i < e; ++i) {
- set->IntersectWith(sets_.Get(predecessors.Get(i)->GetBlockId()));
+ } else if (predecessors.size() > 1) {
+ for (HBasicBlock* predecessor : predecessors) {
+ set->IntersectWith(sets_.Get(predecessor->GetBlockId()));
if (set->IsEmpty()) {
break;
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 3f5a6e7993..92c732c0c3 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -15,6 +15,7 @@
*/
#include "induction_var_analysis.h"
+#include "induction_var_range.h"
namespace art {
@@ -42,6 +43,40 @@ static bool IsEntryPhi(HLoopInformation* loop, HInstruction* instruction) {
instruction->GetBlock() == loop->GetHeader();
}
+/**
+ * Since graph traversal may enter a SCC at any position, an initial representation may be rotated,
+ * along dependences, viz. any of (a, b, c, d), (d, a, b, c) (c, d, a, b), (b, c, d, a) assuming
+ * a chain of dependences (mutual independent items may occur in arbitrary order). For proper
+ * classification, the lexicographically first entry-phi is rotated to the front.
+ */
+static void RotateEntryPhiFirst(HLoopInformation* loop,
+ ArenaVector<HInstruction*>* scc,
+ ArenaVector<HInstruction*>* new_scc) {
+ // Find very first entry-phi.
+ const HInstructionList& phis = loop->GetHeader()->GetPhis();
+ HInstruction* phi = nullptr;
+ size_t phi_pos = -1;
+ const size_t size = scc->size();
+ for (size_t i = 0; i < size; i++) {
+ if (IsEntryPhi(loop, scc->at(i)) && (phi == nullptr || phis.FoundBefore(scc->at(i), phi))) {
+ phi = scc->at(i);
+ phi_pos = i;
+ }
+ }
+
+ // If found, bring that entry-phi to front.
+ if (phi != nullptr) {
+ new_scc->clear();
+ for (size_t i = 0; i < size; i++) {
+ DCHECK_LT(phi_pos, size);
+ new_scc->push_back(scc->at(phi_pos));
+ if (++phi_pos >= size) phi_pos = 0;
+ }
+ DCHECK_EQ(size, new_scc->size());
+ scc->swap(*new_scc);
+ }
+}
+
//
// Class methods.
//
@@ -203,7 +238,15 @@ void HInductionVarAnalysis::ClassifyTrivial(HLoopInformation* loop, HInstruction
void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
const size_t size = scc_.size();
DCHECK_GE(size, 1u);
- HInstruction* phi = scc_[size - 1];
+
+ // Rotate proper entry-phi to front.
+ if (size > 1) {
+ ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter());
+ RotateEntryPhiFirst(loop, &scc_, &other);
+ }
+
+ // Analyze from phi onwards.
+ HInstruction* phi = scc_[0];
if (!IsEntryPhi(loop, phi)) {
return;
}
@@ -225,7 +268,7 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
// Inspect remainder of the cycle that resides in scc_. The cycle_ mapping assigns
// temporary meaning to its nodes, seeded from the phi instruction and back.
- for (size_t i = 0; i < size - 1; i++) {
+ for (size_t i = 1; i < size; i++) {
HInstruction* instruction = scc_[i];
InductionInfo* update = nullptr;
if (instruction->IsPhi()) {
@@ -249,19 +292,19 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
InductionInfo* induction = it->second;
switch (induction->induction_class) {
case kInvariant:
- // Classify phi (last element in scc_) and then the rest of the cycle "on-demand".
- // Statements are scanned in the Tarjan SCC order, with phi first.
+ // Classify first phi and then the rest of the cycle "on-demand".
+ // Statements are scanned in order.
AssignInfo(loop, phi, CreateInduction(kLinear, induction, initial));
- for (size_t i = 0; i < size - 1; i++) {
+ for (size_t i = 1; i < size; i++) {
ClassifyTrivial(loop, scc_[i]);
}
break;
case kPeriodic:
- // Classify all elements in the cycle with the found periodic induction while rotating
- // each first element to the end. Lastly, phi (last element in scc_) is classified.
- // Statements are scanned in the reverse Tarjan SCC order, with phi last.
- for (size_t i = 2; i <= size; i++) {
- AssignInfo(loop, scc_[size - i], induction);
+ // Classify all elements in the cycle with the found periodic induction while
+ // rotating each first element to the end. Lastly, phi is classified.
+ // Statements are scanned in reverse order.
+ for (size_t i = size - 1; i >= 1; i--) {
+ AssignInfo(loop, scc_[i], induction);
induction = RotatePeriodicInduction(induction->op_b, induction->op_a);
}
AssignInfo(loop, phi, induction);
@@ -511,12 +554,15 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
InductionInfo* stride = a->op_a;
InductionInfo* lo_val = a->op_b;
InductionInfo* hi_val = b;
- int64_t value = -1;
- if (IsIntAndGet(stride, &value)) {
- if ((value > 0 && (cmp == kCondLT || cmp == kCondLE)) ||
- (value < 0 && (cmp == kCondGT || cmp == kCondGE))) {
+ // Analyze the stride thoroughly, since its representation may be compound at this point.
+ InductionVarRange::Value v1 = InductionVarRange::GetMin(stride, nullptr);
+ InductionVarRange::Value v2 = InductionVarRange::GetMax(stride, nullptr);
+ if (v1.a_constant == 0 && v2.a_constant == 0 && v1.b_constant == v2.b_constant) {
+ const int32_t stride_value = v1.b_constant;
+ if ((stride_value > 0 && (cmp == kCondLT || cmp == kCondLE)) ||
+ (stride_value < 0 && (cmp == kCondGT || cmp == kCondGE))) {
bool is_strict = cmp == kCondLT || cmp == kCondGT;
- VisitTripCount(loop, lo_val, hi_val, stride, value, type, is_strict);
+ VisitTripCount(loop, lo_val, hi_val, stride, stride_value, type, is_strict);
}
}
}
@@ -544,7 +590,7 @@ void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
// least once. Otherwise TC is 0. Also, the expression assumes the loop does not
// have any early-exits. Otherwise, TC is an upper bound.
//
- bool cancels = is_strict && abs(stride_value) == 1; // compensation cancels conversion?
+ bool cancels = is_strict && std::abs(stride_value) == 1; // compensation cancels conversion?
if (!cancels) {
// Convert exclusive integral inequality into inclusive integral inequality,
// viz. condition i < U is i <= U - 1 and condition i > U is i >= U + 1.
@@ -557,7 +603,7 @@ void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
}
// Assign the trip-count expression to the loop control. Clients that use the information
- // should be aware that due to the L <= U assumption, the expression is only valid in the
+ // should be aware that due to the top-test assumption, the expression is only valid in the
// loop-body proper, and not yet in the loop-header. If the loop has any early exits, the
// trip-count forms a conservative upper bound on the number of loop iterations.
InductionInfo* trip_count =
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index bd903340ad..486e904bd1 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -126,6 +126,7 @@ HInductionVarAnalysis::InductionInfo* InductionVarRange::GetTripCount(HLoopInfor
}
InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction,
+ HInductionVarAnalysis::InductionInfo* trip,
int32_t fail_value) {
// Detect constants and chase the fetch a bit deeper into the HIR tree, so that it becomes
// more likely range analysis will compare the same instructions as terminal nodes.
@@ -134,9 +135,16 @@ InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction,
return Value(value);
} else if (instruction->IsAdd()) {
if (IsIntAndGet(instruction->InputAt(0), &value)) {
- return AddValue(Value(value), GetFetch(instruction->InputAt(1), fail_value), fail_value);
+ return AddValue(Value(value),
+ GetFetch(instruction->InputAt(1), trip, fail_value), fail_value);
} else if (IsIntAndGet(instruction->InputAt(1), &value)) {
- return AddValue(GetFetch(instruction->InputAt(0), fail_value), Value(value), fail_value);
+ return AddValue(GetFetch(instruction->InputAt(0), trip, fail_value),
+ Value(value), fail_value);
+ }
+ } else if (fail_value < 0) {
+ // Special case: within the loop-body, minimum of trip-count is 1.
+ if (trip != nullptr && instruction == trip->op_b->fetch) {
+ return Value(1);
}
}
return Value(instruction, 1, 0);
@@ -163,7 +171,7 @@ InductionVarRange::Value InductionVarRange::GetMin(HInductionVarAnalysis::Induct
case HInductionVarAnalysis::kDiv:
return GetDiv(info->op_a, info->op_b, trip, INT_MIN);
case HInductionVarAnalysis::kFetch:
- return GetFetch(info->fetch, INT_MIN);
+ return GetFetch(info->fetch, trip, INT_MIN);
}
break;
case HInductionVarAnalysis::kLinear:
@@ -200,7 +208,7 @@ InductionVarRange::Value InductionVarRange::GetMax(HInductionVarAnalysis::Induct
case HInductionVarAnalysis::kDiv:
return GetDiv(info->op_a, info->op_b, trip, INT_MAX);
case HInductionVarAnalysis::kFetch:
- return GetFetch(info->fetch, INT_MAX);
+ return GetFetch(info->fetch, trip, INT_MAX);
}
break;
case HInductionVarAnalysis::kLinear:
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index b079076852..e002e5ff6c 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -27,7 +27,7 @@ namespace art {
* API to obtain a conservative lower and upper bound value on each instruction in the HIR.
*
* For example, given a linear induction 2 * i + x where 0 <= i <= 10, range analysis yields lower
- * bound value x and upper bound value x + 20 for the expression, thus, the range [0, x + 20].
+ * bound value x and upper bound value x + 20 for the expression, thus, the range [x, x + 20].
*/
class InductionVarRange {
public:
@@ -39,7 +39,7 @@ class InductionVarRange {
*/
struct Value {
Value(HInstruction* i, int32_t a, int32_t b)
- : instruction(a ? i : nullptr),
+ : instruction(a != 0 ? i : nullptr),
a_constant(a),
b_constant(b) {}
explicit Value(int32_t b) : Value(nullptr, 0, b) {}
@@ -70,7 +70,9 @@ class InductionVarRange {
HInductionVarAnalysis::InductionInfo* GetTripCount(HLoopInformation* loop,
HInstruction* context);
- static Value GetFetch(HInstruction* instruction, int32_t fail_value);
+ static Value GetFetch(HInstruction* instruction,
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value);
static Value GetMin(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip);
@@ -78,10 +80,12 @@ class InductionVarRange {
HInductionVarAnalysis::InductionInfo* trip);
static Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
- HInductionVarAnalysis::InductionInfo* trip, int32_t fail_value);
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value);
static Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
- HInductionVarAnalysis::InductionInfo* trip, int32_t fail_value);
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value);
static Value AddValue(Value v1, Value v2, int32_t fail_value);
static Value SubValue(Value v1, Value v2, int32_t fail_value);
@@ -93,6 +97,7 @@ class InductionVarRange {
/** Results of prior induction variable analysis. */
HInductionVarAnalysis *induction_analysis_;
+ friend class HInductionVarAnalysis;
friend class InductionVarRangeTest;
DISALLOW_COPY_AND_ASSIGN(InductionVarRange);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 0547ce832d..efd4fcfc79 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -423,8 +423,8 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
}
bool has_throw_predecessor = false;
- for (size_t i = 0, e = exit_block->GetPredecessors().Size(); i < e; ++i) {
- if (exit_block->GetPredecessors().Get(i)->GetLastInstruction()->IsThrow()) {
+ for (HBasicBlock* predecessor : exit_block->GetPredecessors()) {
+ if (predecessor->GetLastInstruction()->IsThrow()) {
has_throw_predecessor = true;
break;
}
@@ -506,7 +506,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
ReferenceTypeInfo::TypeHandle return_handle =
handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
- return_handle, return_handle->IsFinal() /* is_exact */));
+ return_handle, return_handle->CannotBeAssignedFromOtherTypes() /* is_exact */));
}
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 41c239d60b..b71fdb8f1d 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -125,6 +125,28 @@ static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_s
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
+ case kIntrinsicRotateRight:
+ switch (GetType(method.d.data, true)) {
+ case Primitive::kPrimInt:
+ return Intrinsics::kIntegerRotateRight;
+ case Primitive::kPrimLong:
+ return Intrinsics::kLongRotateRight;
+ default:
+ LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+ UNREACHABLE();
+ }
+ case kIntrinsicRotateLeft:
+ switch (GetType(method.d.data, true)) {
+ case Primitive::kPrimInt:
+ return Intrinsics::kIntegerRotateLeft;
+ case Primitive::kPrimLong:
+ return Intrinsics::kLongRotateLeft;
+ default:
+ LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+ UNREACHABLE();
+ }
+
+ // Misc data processing.
case kIntrinsicNumberOfLeadingZeros:
switch (GetType(method.d.data, true)) {
case Primitive::kPrimInt:
@@ -135,6 +157,16 @@ static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_s
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
+ case kIntrinsicNumberOfTrailingZeros:
+ switch (GetType(method.d.data, true)) {
+ case Primitive::kPrimInt:
+ return Intrinsics::kIntegerNumberOfTrailingZeros;
+ case Primitive::kPrimLong:
+ return Intrinsics::kLongNumberOfTrailingZeros;
+ default:
+ LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+ UNREACHABLE();
+ }
// Abs.
case kIntrinsicAbsDouble:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 6040a407d8..cc8ddb6299 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -266,6 +266,227 @@ void IntrinsicCodeGeneratorARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
}
+static void GenNumberOfTrailingZeros(LocationSummary* locations,
+ Primitive::Type type,
+ ArmAssembler* assembler) {
+ DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+
+ Register out = locations->Out().AsRegister<Register>();
+
+ if (type == Primitive::kPrimLong) {
+ Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Label end;
+ __ rbit(out, in_reg_lo);
+ __ clz(out, out);
+ __ CompareAndBranchIfNonZero(in_reg_lo, &end);
+ __ rbit(out, in_reg_hi);
+ __ clz(out, out);
+ __ AddConstant(out, 32);
+ __ Bind(&end);
+ } else {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ __ rbit(out, in);
+ __ clz(out, out);
+ }
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
+static void GenIntegerRotate(LocationSummary* locations,
+ ArmAssembler* assembler,
+ bool is_left) {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ Location rhs = locations->InAt(1);
+ Register out = locations->Out().AsRegister<Register>();
+
+ if (rhs.IsConstant()) {
+ // Arm32 and Thumb2 assemblers require a rotation on the interval [1,31],
+ // so map all rotations to a +ve. equivalent in that range.
+ // (e.g. left *or* right by -2 bits == 30 bits in the same direction.)
+ uint32_t rot = rhs.GetConstant()->AsIntConstant()->GetValue() & 0x1F;
+ if (rot) {
+ // Rotate, mapping left rotations to right equivalents if necessary.
+ // (e.g. left by 2 bits == right by 30.)
+ __ Ror(out, in, is_left ? (0x20 - rot) : rot);
+ } else if (out != in) {
+ __ Mov(out, in);
+ }
+ } else {
+ if (is_left) {
+ __ rsb(out, rhs.AsRegister<Register>(), ShifterOperand(0));
+ __ Ror(out, in, out);
+ } else {
+ __ Ror(out, in, rhs.AsRegister<Register>());
+ }
+ }
+}
+
+// Gain some speed by mapping all Long rotates onto equivalent pairs of Integer
+// rotates by swapping input regs (effectively rotating by the first 32-bits of
+// a larger rotation) or flipping direction (thus treating larger right/left
+// rotations as sub-word sized rotations in the other direction) as appropriate.
+static void GenLongRotate(LocationSummary* locations,
+ ArmAssembler* assembler,
+ bool is_left) {
+ Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Location rhs = locations->InAt(1);
+ Register out_reg_lo = locations->Out().AsRegisterPairLow<Register>();
+ Register out_reg_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+ if (rhs.IsConstant()) {
+ uint32_t rot = rhs.GetConstant()->AsIntConstant()->GetValue();
+ // Map all left rotations to right equivalents.
+ if (is_left) {
+ rot = 0x40 - rot;
+ }
+ // Map all rotations to +ve. equivalents on the interval [0,63].
+ rot &= 0x3F;
+ // For rotates over a word in size, 'pre-rotate' by 32-bits to keep rotate
+ // logic below to a simple pair of binary orr.
+ // (e.g. 34 bits == in_reg swap + 2 bits right.)
+ if (rot >= 0x20) {
+ rot -= 0x20;
+ std::swap(in_reg_hi, in_reg_lo);
+ }
+ // Rotate, or mov to out for zero or word size rotations.
+ if (rot) {
+ __ Lsr(out_reg_hi, in_reg_hi, rot);
+ __ orr(out_reg_hi, out_reg_hi, ShifterOperand(in_reg_lo, arm::LSL, 0x20 - rot));
+ __ Lsr(out_reg_lo, in_reg_lo, rot);
+ __ orr(out_reg_lo, out_reg_lo, ShifterOperand(in_reg_hi, arm::LSL, 0x20 - rot));
+ } else {
+ __ Mov(out_reg_lo, in_reg_lo);
+ __ Mov(out_reg_hi, in_reg_hi);
+ }
+ } else {
+ Register shift_left = locations->GetTemp(0).AsRegister<Register>();
+ Register shift_right = locations->GetTemp(1).AsRegister<Register>();
+ Label end;
+ Label right;
+
+ __ and_(shift_left, rhs.AsRegister<Register>(), ShifterOperand(0x1F));
+ __ Lsrs(shift_right, rhs.AsRegister<Register>(), 6);
+ __ rsb(shift_right, shift_left, ShifterOperand(0x20), AL, kCcKeep);
+
+ if (is_left) {
+ __ b(&right, CS);
+ } else {
+ __ b(&right, CC);
+ std::swap(shift_left, shift_right);
+ }
+
+ // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
+ // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
+ __ Lsl(out_reg_hi, in_reg_hi, shift_left);
+ __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+ __ add(out_reg_hi, out_reg_hi, ShifterOperand(out_reg_lo));
+ __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+ __ Lsr(shift_left, in_reg_hi, shift_right);
+ __ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_left));
+ __ b(&end);
+
+ // out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
+ // out_reg_lo = (reg_lo >> shift_right) | (reg_hi << shift_left).
+ __ Bind(&right);
+ __ Lsr(out_reg_hi, in_reg_hi, shift_right);
+ __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+ __ add(out_reg_hi, out_reg_hi, ShifterOperand(out_reg_lo));
+ __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+ __ Lsl(shift_right, in_reg_hi, shift_left);
+ __ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_right));
+
+ __ Bind(&end);
+ }
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerRotateRight(HInvoke* invoke) {
+ GenIntegerRotate(invoke->GetLocations(), GetAssembler(), false /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (invoke->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(invoke->InputAt(1)->AsConstant()));
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongRotateRight(HInvoke* invoke) {
+ GenLongRotate(invoke->GetLocations(), GetAssembler(), false /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerRotateLeft(HInvoke* invoke) {
+ GenIntegerRotate(invoke->GetLocations(), GetAssembler(), true /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (invoke->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(invoke->InputAt(1)->AsConstant()));
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongRotateLeft(HInvoke* invoke) {
+ GenLongRotate(invoke->GetLocations(), GetAssembler(), true /* is_left */);
+}
+
static void MathAbsFP(LocationSummary* locations, bool is64bit, ArmAssembler* assembler) {
Location in = locations->InAt(0);
Location out = locations->Out();
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 1dbca345b5..b0cfd0d1bc 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -41,12 +41,12 @@ using helpers::DRegisterFrom;
using helpers::FPRegisterFrom;
using helpers::HeapOperand;
using helpers::LocationFrom;
+using helpers::OperandFrom;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
-
namespace {
ALWAYS_INLINE inline MemOperand AbsoluteHeapOperandFrom(Location location, size_t offset = 0) {
@@ -286,6 +286,131 @@ void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke)
GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
}
+static void GenNumberOfTrailingZeros(LocationSummary* locations,
+ Primitive::Type type,
+ vixl::MacroAssembler* masm) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+
+ __ Rbit(RegisterFrom(out, type), RegisterFrom(in, type));
+ __ Clz(RegisterFrom(out, type), RegisterFrom(out, type));
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
+static void GenRotateRight(LocationSummary* locations,
+ Primitive::Type type,
+ vixl::MacroAssembler* masm) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+ Operand rhs = OperandFrom(locations->InAt(1), type);
+
+ if (rhs.IsImmediate()) {
+ uint32_t shift = rhs.immediate() & (RegisterFrom(in, type).SizeInBits() - 1);
+ __ Ror(RegisterFrom(out, type),
+ RegisterFrom(in, type),
+ shift);
+ } else {
+ DCHECK(rhs.shift() == vixl::LSL && rhs.shift_amount() == 0);
+ __ Ror(RegisterFrom(out, type),
+ RegisterFrom(in, type),
+ rhs.reg());
+ }
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
+static void GenRotateLeft(LocationSummary* locations,
+ Primitive::Type type,
+ vixl::MacroAssembler* masm) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+ Operand rhs = OperandFrom(locations->InAt(1), type);
+
+ if (rhs.IsImmediate()) {
+ uint32_t regsize = RegisterFrom(in, type).SizeInBits();
+ uint32_t shift = (regsize - rhs.immediate()) & (regsize - 1);
+ __ Ror(RegisterFrom(out, type), RegisterFrom(in, type), shift);
+ } else {
+ DCHECK(rhs.shift() == vixl::LSL && rhs.shift_amount() == 0);
+ __ Neg(RegisterFrom(out, type),
+ Operand(RegisterFrom(locations->InAt(1), type)));
+ __ Ror(RegisterFrom(out, type),
+ RegisterFrom(in, type),
+ RegisterFrom(out, type));
+ }
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
static void GenReverse(LocationSummary* locations,
Primitive::Type type,
vixl::MacroAssembler* masm) {
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 7e5339ec21..bfe5e55c56 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -29,9 +29,15 @@
V(IntegerReverse, kStatic, kNeedsEnvironmentOrCache) \
V(IntegerReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(IntegerNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(IntegerRotateRight, kStatic, kNeedsEnvironmentOrCache) \
+ V(IntegerRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
V(LongReverse, kStatic, kNeedsEnvironmentOrCache) \
V(LongReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(LongNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(LongRotateRight, kStatic, kNeedsEnvironmentOrCache) \
+ V(LongRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
V(ShortReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
V(MathAbsDouble, kStatic, kNeedsEnvironmentOrCache) \
V(MathAbsFloat, kStatic, kNeedsEnvironmentOrCache) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index daf56d05ef..e302317d14 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -507,7 +507,7 @@ static void GenMinMaxFP(LocationSummary* locations, bool is_min, bool is_double,
XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
- Label nan, done, op2_label;
+ NearLabel nan, done, op2_label;
if (is_double) {
__ ucomisd(out, op2);
} else {
@@ -841,7 +841,7 @@ void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) {
Register out = locations->Out().AsRegister<Register>();
XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
X86Assembler* assembler = GetAssembler();
// Generate 0.5 into inPlusPointFive.
@@ -888,9 +888,9 @@ void IntrinsicLocationsBuilderX86::VisitStringCharAt(HInvoke* invoke) {
void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- // Location of reference to data array
+ // Location of reference to data array.
const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
- // Location of count
+ // Location of count.
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
Register obj = locations->InAt(0).AsRegister<Register>();
@@ -917,6 +917,183 @@ void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
+ // We need at least two of the positions or length to be an integer constant,
+ // or else we won't have enough free registers.
+ HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+ HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+ HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+
+ int num_constants =
+ ((src_pos != nullptr) ? 1 : 0)
+ + ((dest_pos != nullptr) ? 1 : 0)
+ + ((length != nullptr) ? 1 : 0);
+
+ if (num_constants < 2) {
+ // Not enough free registers.
+ return;
+ }
+
+ // As long as we are checking, we might as well check to see if the src and dest
+ // positions are >= 0.
+ if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
+ (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
+ // We will have to fail anyways.
+ return;
+ }
+
+ // And since we are already checking, check the length too.
+ if (length != nullptr) {
+ int32_t len = length->GetValue();
+ if (len < 0) {
+ // Just call as normal.
+ return;
+ }
+ }
+
+ // Okay, it is safe to generate inline code.
+ LocationSummary* locations =
+ new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ // arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
+ locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
+
+ // And we need some temporaries. We will use REP MOVSW, so we need fixed registers.
+ locations->AddTemp(Location::RegisterLocation(ESI));
+ locations->AddTemp(Location::RegisterLocation(EDI));
+ locations->AddTemp(Location::RegisterLocation(ECX));
+}
+
+static void CheckPosition(X86Assembler* assembler,
+ Location pos,
+ Register input,
+ Register length,
+ SlowPathCodeX86* slow_path,
+ Register input_len,
+ Register temp) {
+ // Where is the length in the String?
+ const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+ if (pos.IsConstant()) {
+ int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
+ if (pos_const == 0) {
+ // Check that length(input) >= length.
+ __ cmpl(Address(input, length_offset), length);
+ __ j(kLess, slow_path->GetEntryLabel());
+ } else {
+ // Check that length(input) >= pos.
+ __ movl(input_len, Address(input, length_offset));
+ __ cmpl(input_len, Immediate(pos_const));
+ __ j(kLess, slow_path->GetEntryLabel());
+
+ // Check that (length(input) - pos) >= length.
+ __ leal(temp, Address(input_len, -pos_const));
+ __ cmpl(temp, length);
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+ } else {
+ // Check that pos >= 0.
+ Register pos_reg = pos.AsRegister<Register>();
+ __ testl(pos_reg, pos_reg);
+ __ j(kLess, slow_path->GetEntryLabel());
+
+ // Check that pos <= length(input).
+ __ cmpl(Address(input, length_offset), pos_reg);
+ __ j(kLess, slow_path->GetEntryLabel());
+
+ // Check that (length(input) - pos) >= length.
+ __ movl(temp, Address(input, length_offset));
+ __ subl(temp, pos_reg);
+ __ cmpl(temp, length);
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+}
+
+void IntrinsicCodeGeneratorX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register src = locations->InAt(0).AsRegister<Register>();
+ Location srcPos = locations->InAt(1);
+ Register dest = locations->InAt(2).AsRegister<Register>();
+ Location destPos = locations->InAt(3);
+ Location length = locations->InAt(4);
+
+ // Temporaries that we need for MOVSW.
+ Register src_base = locations->GetTemp(0).AsRegister<Register>();
+ DCHECK_EQ(src_base, ESI);
+ Register dest_base = locations->GetTemp(1).AsRegister<Register>();
+ DCHECK_EQ(dest_base, EDI);
+ Register count = locations->GetTemp(2).AsRegister<Register>();
+ DCHECK_EQ(count, ECX);
+
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Bail out if the source and destination are the same (to handle overlap).
+ __ cmpl(src, dest);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // Bail out if the source is null.
+ __ testl(src, src);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // Bail out if the destination is null.
+ __ testl(dest, dest);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // If the length is negative, bail out.
+ // We have already checked in the LocationsBuilder for the constant case.
+ if (!length.IsConstant()) {
+ __ cmpl(length.AsRegister<Register>(), length.AsRegister<Register>());
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+
+ // We need the count in ECX.
+ if (length.IsConstant()) {
+ __ movl(count, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ movl(count, length.AsRegister<Register>());
+ }
+
+ // Validity checks: source.
+ CheckPosition(assembler, srcPos, src, count, slow_path, src_base, dest_base);
+
+ // Validity checks: dest.
+ CheckPosition(assembler, destPos, dest, count, slow_path, src_base, dest_base);
+
+ // Okay, everything checks out. Finally time to do the copy.
+ // Check assumption that sizeof(Char) is 2 (used in scaling below).
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+
+ const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+ if (srcPos.IsConstant()) {
+ int32_t srcPos_const = srcPos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(src_base, Address(src, char_size * srcPos_const + data_offset));
+ } else {
+ __ leal(src_base, Address(src, srcPos.AsRegister<Register>(),
+ ScaleFactor::TIMES_2, data_offset));
+ }
+ if (destPos.IsConstant()) {
+ int32_t destPos_const = destPos.GetConstant()->AsIntConstant()->GetValue();
+
+ __ leal(dest_base, Address(dest, char_size * destPos_const + data_offset));
+ } else {
+ __ leal(dest_base, Address(dest, destPos.AsRegister<Register>(),
+ ScaleFactor::TIMES_2, data_offset));
+ }
+
+ // Do the move.
+ __ rep_movsw();
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -970,9 +1147,7 @@ void IntrinsicCodeGeneratorX86::VisitStringEquals(HInvoke* invoke) {
Register edi = locations->GetTemp(1).AsRegister<Register>();
Register esi = locations->Out().AsRegister<Register>();
- Label end;
- Label return_true;
- Label return_false;
+ NearLabel end, return_true, return_false;
// Get offsets of count, value, and class fields within a string object.
const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
@@ -1004,8 +1179,7 @@ void IntrinsicCodeGeneratorX86::VisitStringEquals(HInvoke* invoke) {
__ cmpl(ecx, Address(arg, count_offset));
__ j(kNotEqual, &return_false);
// Return true if both strings are empty.
- __ testl(ecx, ecx);
- __ j(kEqual, &return_true);
+ __ jecxz(&return_true);
// Load starting addresses of string values into ESI/EDI as required for repe_cmpsl instruction.
__ leal(esi, Address(str, value_offset));
@@ -1115,7 +1289,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Do a zero-length check.
// TODO: Support jecxz.
- Label not_found_label;
+ NearLabel not_found_label;
__ testl(string_length, string_length);
__ j(kEqual, &not_found_label);
@@ -1158,7 +1332,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
__ subl(string_length, counter);
__ leal(out, Address(string_length, -1));
- Label done;
+ NearLabel done;
__ jmp(&done);
// Failed to match; return -1.
@@ -1878,7 +2052,7 @@ static void GenLeadingZeros(X86Assembler* assembler, HInvoke* invoke, bool is_lo
}
// BSR sets ZF if the input was zero, and the output is undefined.
- Label all_zeroes, done;
+ NearLabel all_zeroes, done;
__ j(kEqual, &all_zeroes);
// Correct the result from BSR to get the final CLZ result.
@@ -1897,7 +2071,7 @@ static void GenLeadingZeros(X86Assembler* assembler, HInvoke* invoke, bool is_lo
DCHECK(src.IsRegisterPair());
Register src_lo = src.AsRegisterPairLow<Register>();
Register src_hi = src.AsRegisterPairHigh<Register>();
- Label handle_low, done, all_zeroes;
+ NearLabel handle_low, done, all_zeroes;
// Is the high word zero?
__ testl(src_hi, src_hi);
@@ -1954,8 +2128,13 @@ void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED)
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index f78a7268c1..51980af36d 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -405,7 +405,7 @@ static void GenMinMaxFP(LocationSummary* locations,
XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
- Label nan, done, op2_label;
+ NearLabel nan, done, op2_label;
if (is_double) {
__ ucomisd(out, op2);
} else {
@@ -702,7 +702,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
X86_64Assembler* assembler = GetAssembler();
// Load 0.5 into inPlusPointFive.
@@ -750,7 +750,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
X86_64Assembler* assembler = GetAssembler();
// Load 0.5 into inPlusPointFive.
@@ -797,9 +797,9 @@ void IntrinsicLocationsBuilderX86_64::VisitStringCharAt(HInvoke* invoke) {
void IntrinsicCodeGeneratorX86_64::VisitStringCharAt(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- // Location of reference to data array
+ // Location of reference to data array.
const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
- // Location of count
+ // Location of count.
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
@@ -826,6 +826,171 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCharAt(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
+ // Check to see if we have known failures that will cause us to have to bail out
+ // to the runtime, and just generate the runtime call directly.
+ HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+ HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+
+ // The positions must be non-negative.
+ if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
+ (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
+ // We will have to fail anyways.
+ return;
+ }
+
+ // The length must be > 0.
+ HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+ if (length != nullptr) {
+ int32_t len = length->GetValue();
+ if (len < 0) {
+ // Just call as normal.
+ return;
+ }
+ }
+
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ // arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
+ locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
+
+ // And we need some temporaries. We will use REP MOVSW, so we need fixed registers.
+ locations->AddTemp(Location::RegisterLocation(RSI));
+ locations->AddTemp(Location::RegisterLocation(RDI));
+ locations->AddTemp(Location::RegisterLocation(RCX));
+}
+
+static void CheckPosition(X86_64Assembler* assembler,
+ Location pos,
+ CpuRegister input,
+ CpuRegister length,
+ SlowPathCodeX86_64* slow_path,
+ CpuRegister input_len,
+ CpuRegister temp) {
+ // Where is the length in the String?
+ const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+ if (pos.IsConstant()) {
+ int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
+ if (pos_const == 0) {
+ // Check that length(input) >= length.
+ __ cmpl(Address(input, length_offset), length);
+ __ j(kLess, slow_path->GetEntryLabel());
+ } else {
+ // Check that length(input) >= pos.
+ __ movl(input_len, Address(input, length_offset));
+ __ cmpl(input_len, Immediate(pos_const));
+ __ j(kLess, slow_path->GetEntryLabel());
+
+ // Check that (length(input) - pos) >= length.
+ __ leal(temp, Address(input_len, -pos_const));
+ __ cmpl(temp, length);
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+ } else {
+ // Check that pos >= 0.
+ CpuRegister pos_reg = pos.AsRegister<CpuRegister>();
+ __ testl(pos_reg, pos_reg);
+ __ j(kLess, slow_path->GetEntryLabel());
+
+ // Check that pos <= length(input).
+ __ cmpl(Address(input, length_offset), pos_reg);
+ __ j(kLess, slow_path->GetEntryLabel());
+
+ // Check that (length(input) - pos) >= length.
+ __ movl(temp, Address(input, length_offset));
+ __ subl(temp, pos_reg);
+ __ cmpl(temp, length);
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ CpuRegister src = locations->InAt(0).AsRegister<CpuRegister>();
+ Location srcPos = locations->InAt(1);
+ CpuRegister dest = locations->InAt(2).AsRegister<CpuRegister>();
+ Location destPos = locations->InAt(3);
+ Location length = locations->InAt(4);
+
+ // Temporaries that we need for MOVSW.
+ CpuRegister src_base = locations->GetTemp(0).AsRegister<CpuRegister>();
+ DCHECK_EQ(src_base.AsRegister(), RSI);
+ CpuRegister dest_base = locations->GetTemp(1).AsRegister<CpuRegister>();
+ DCHECK_EQ(dest_base.AsRegister(), RDI);
+ CpuRegister count = locations->GetTemp(2).AsRegister<CpuRegister>();
+ DCHECK_EQ(count.AsRegister(), RCX);
+
+ SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Bail out if the source and destination are the same.
+ __ cmpl(src, dest);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // Bail out if the source is null.
+ __ testl(src, src);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // Bail out if the destination is null.
+ __ testl(dest, dest);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // If the length is negative, bail out.
+ // We have already checked in the LocationsBuilder for the constant case.
+ if (!length.IsConstant()) {
+ __ testl(length.AsRegister<CpuRegister>(), length.AsRegister<CpuRegister>());
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+
+ // We need the count in RCX.
+ if (length.IsConstant()) {
+ __ movl(count, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ movl(count, length.AsRegister<CpuRegister>());
+ }
+
+ // Validity checks: source.
+ CheckPosition(assembler, srcPos, src, count, slow_path, src_base, dest_base);
+
+ // Validity checks: dest.
+ CheckPosition(assembler, destPos, dest, count, slow_path, src_base, dest_base);
+
+ // Okay, everything checks out. Finally time to do the copy.
+ // Check assumption that sizeof(Char) is 2 (used in scaling below).
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+
+ const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+ if (srcPos.IsConstant()) {
+ int32_t srcPos_const = srcPos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(src_base, Address(src, char_size * srcPos_const + data_offset));
+ } else {
+ __ leal(src_base, Address(src, srcPos.AsRegister<CpuRegister>(),
+ ScaleFactor::TIMES_2, data_offset));
+ }
+ if (destPos.IsConstant()) {
+ int32_t destPos_const = destPos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(dest_base, Address(dest, char_size * destPos_const + data_offset));
+ } else {
+ __ leal(dest_base, Address(dest, destPos.AsRegister<CpuRegister>(),
+ ScaleFactor::TIMES_2, data_offset));
+ }
+
+ // Do the move.
+ __ rep_movsw();
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
@@ -879,9 +1044,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringEquals(HInvoke* invoke) {
CpuRegister rdi = locations->GetTemp(1).AsRegister<CpuRegister>();
CpuRegister rsi = locations->Out().AsRegister<CpuRegister>();
- Label end;
- Label return_true;
- Label return_false;
+ NearLabel end, return_true, return_false;
// Get offsets of count, value, and class fields within a string object.
const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
@@ -913,8 +1076,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringEquals(HInvoke* invoke) {
__ cmpl(rcx, Address(arg, count_offset));
__ j(kNotEqual, &return_false);
// Return true if both strings are empty.
- __ testl(rcx, rcx);
- __ j(kEqual, &return_true);
+ __ jrcxz(&return_true);
// Load starting addresses of string values into RSI/RDI as required for repe_cmpsq instruction.
__ leal(rsi, Address(str, value_offset));
@@ -1024,7 +1186,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Do a length check.
// TODO: Support jecxz.
- Label not_found_label;
+ NearLabel not_found_label;
__ testl(string_length, string_length);
__ j(kEqual, &not_found_label);
@@ -1066,7 +1228,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
__ subl(string_length, counter);
__ leal(out, Address(string_length, -1));
- Label done;
+ NearLabel done;
__ jmp(&done);
// Failed to match; return -1.
@@ -1731,7 +1893,7 @@ static void GenLeadingZeros(X86_64Assembler* assembler, HInvoke* invoke, bool is
}
// BSR sets ZF if the input was zero, and the output is undefined.
- Label is_zero, done;
+ NearLabel is_zero, done;
__ j(kEqual, &is_zero);
// Correct the result from BSR to get the CLZ result.
@@ -1772,8 +1934,13 @@ void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSE
}
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index bc4a663b86..ec4a9ec916 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -63,8 +63,8 @@ class LICMTest : public testing::Test {
// Provide boiler-plate instructions.
parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
entry_->AddInstruction(parameter_);
- constant_ = new (&allocator_) HConstant(Primitive::kPrimInt);
- loop_preheader_->AddInstruction(constant_);
+ constant_ = graph_->GetIntConstant(42);
+ loop_preheader_->AddInstruction(new (&allocator_) HGoto());
loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
loop_body_->AddInstruction(new (&allocator_) HGoto());
exit_->AddInstruction(new (&allocator_) HExit());
@@ -99,23 +99,6 @@ class LICMTest : public testing::Test {
// The actual LICM tests.
//
-TEST_F(LICMTest, ConstantHoisting) {
- BuildLoop();
-
- // Populate the loop with instructions: set array to constant.
- HInstruction* constant = new (&allocator_) HConstant(Primitive::kPrimDouble);
- loop_body_->InsertInstructionBefore(constant, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
- parameter_, constant_, constant, Primitive::kPrimDouble, 0);
- loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
-
- EXPECT_EQ(constant->GetBlock(), loop_body_);
- EXPECT_EQ(set_array->GetBlock(), loop_body_);
- PerformLICM();
- EXPECT_EQ(constant->GetBlock(), loop_preheader_);
- EXPECT_EQ(set_array->GetBlock(), loop_body_);
-}
-
TEST_F(LICMTest, FieldHoisting) {
BuildLoop();
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 650c8e5fed..cc12a10354 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -20,6 +20,7 @@
#include "ssa_builder.h"
#include "base/bit_vector-inl.h"
#include "base/bit_utils.h"
+#include "mirror/class-inl.h"
#include "utils/growable_array.h"
#include "scoped_thread_state_change.h"
@@ -68,8 +69,8 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) {
if (!visited.IsBitSet(i)) {
HBasicBlock* block = blocks_.Get(i);
// We only need to update the successor, which might be live.
- for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
- block->GetSuccessors().Get(j)->RemovePredecessor(block);
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ successor->RemovePredecessor(block);
}
// Remove the block from the list of blocks, so that further analyses
// never see it.
@@ -86,8 +87,7 @@ void HGraph::VisitBlockForBackEdges(HBasicBlock* block,
visited->SetBit(id);
visiting->SetBit(id);
- for (size_t i = 0; i < block->GetSuccessors().Size(); i++) {
- HBasicBlock* successor = block->GetSuccessors().Get(i);
+ for (HBasicBlock* successor : block->GetSuccessors()) {
if (visiting->IsBitSet(successor->GetBlockId())) {
successor->AddBackEdge(block);
} else {
@@ -134,7 +134,7 @@ void HGraph::ClearDominanceInformation() {
}
void HBasicBlock::ClearDominanceInformation() {
- dominated_blocks_.Reset();
+ dominated_blocks_.clear();
dominator_ = nullptr;
}
@@ -143,8 +143,8 @@ void HGraph::ComputeDominanceInformation() {
GrowableArray<size_t> visits(arena_, blocks_.Size());
visits.SetSize(blocks_.Size());
reverse_post_order_.Add(entry_block_);
- for (size_t i = 0; i < entry_block_->GetSuccessors().Size(); i++) {
- VisitBlockForDominatorTree(entry_block_->GetSuccessors().Get(i), entry_block_, &visits);
+ for (HBasicBlock* successor : entry_block_->GetSuccessors()) {
+ VisitBlockForDominatorTree(successor, entry_block_, &visits);
}
}
@@ -179,11 +179,11 @@ void HGraph::VisitBlockForDominatorTree(HBasicBlock* block,
// Once all the forward edges have been visited, we know the immediate
// dominator of the block. We can then start visiting its successors.
if (visits->Get(block->GetBlockId()) ==
- block->GetPredecessors().Size() - block->NumberOfBackEdges()) {
+ block->GetPredecessors().size() - block->NumberOfBackEdges()) {
block->GetDominator()->AddDominatedBlock(block);
reverse_post_order_.Add(block);
- for (size_t i = 0; i < block->GetSuccessors().Size(); i++) {
- VisitBlockForDominatorTree(block->GetSuccessors().Get(i), block, visits);
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ VisitBlockForDominatorTree(successor, block, visits);
}
}
}
@@ -224,14 +224,14 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
// Make sure the loop has only one pre header. This simplifies SSA building by having
// to just look at the pre header to know which locals are initialized at entry of the
// loop.
- size_t number_of_incomings = header->GetPredecessors().Size() - info->NumberOfBackEdges();
+ size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges();
if (number_of_incomings != 1) {
HBasicBlock* pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
AddBlock(pre_header);
pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc()));
- for (size_t pred = 0; pred < header->GetPredecessors().Size(); ++pred) {
- HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
+ for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
+ HBasicBlock* predecessor = header->GetPredecessor(pred);
if (!info->IsBackEdge(*predecessor)) {
predecessor->ReplaceSuccessor(header, pre_header);
pred--;
@@ -241,13 +241,13 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
}
// Make sure the first predecessor of a loop header is the incoming block.
- if (info->IsBackEdge(*header->GetPredecessors().Get(0))) {
- HBasicBlock* to_swap = header->GetPredecessors().Get(0);
- for (size_t pred = 1, e = header->GetPredecessors().Size(); pred < e; ++pred) {
- HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
+ if (info->IsBackEdge(*header->GetPredecessor(0))) {
+ HBasicBlock* to_swap = header->GetPredecessor(0);
+ for (size_t pred = 1, e = header->GetPredecessors().size(); pred < e; ++pred) {
+ HBasicBlock* predecessor = header->GetPredecessor(pred);
if (!info->IsBackEdge(*predecessor)) {
- header->predecessors_.Put(pred, to_swap);
- header->predecessors_.Put(0, predecessor);
+ header->predecessors_[pred] = to_swap;
+ header->predecessors_[0] = predecessor;
break;
}
}
@@ -267,7 +267,7 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
}
static bool CheckIfPredecessorAtIsExceptional(const HBasicBlock& block, size_t pred_idx) {
- HBasicBlock* predecessor = block.GetPredecessors().Get(pred_idx);
+ HBasicBlock* predecessor = block.GetPredecessor(pred_idx);
if (!predecessor->EndsWithTryBoundary()) {
// Only edges from HTryBoundary can be exceptional.
return false;
@@ -296,7 +296,7 @@ void HGraph::SimplifyCatchBlocks() {
}
bool exceptional_predecessors_only = true;
- for (size_t j = 0; j < catch_block->GetPredecessors().Size(); ++j) {
+ for (size_t j = 0; j < catch_block->GetPredecessors().size(); ++j) {
if (!CheckIfPredecessorAtIsExceptional(*catch_block, j)) {
exceptional_predecessors_only = false;
break;
@@ -313,9 +313,9 @@ void HGraph::SimplifyCatchBlocks() {
// a MOVE_EXCEPTION instruction, as guaranteed by the verifier.
DCHECK(!catch_block->GetFirstInstruction()->IsLoadException());
HBasicBlock* normal_block = catch_block->SplitBefore(catch_block->GetFirstInstruction());
- for (size_t j = 0; j < catch_block->GetPredecessors().Size(); ++j) {
+ for (size_t j = 0; j < catch_block->GetPredecessors().size(); ++j) {
if (!CheckIfPredecessorAtIsExceptional(*catch_block, j)) {
- catch_block->GetPredecessors().Get(j)->ReplaceSuccessor(catch_block, normal_block);
+ catch_block->GetPredecessor(j)->ReplaceSuccessor(catch_block, normal_block);
--j;
}
}
@@ -337,7 +337,7 @@ void HGraph::ComputeTryBlockInformation() {
// Infer try membership from the first predecessor. Having simplified loops,
// the first predecessor can never be a back edge and therefore it must have
// been visited already and had its try membership set.
- HBasicBlock* first_predecessor = block->GetPredecessors().Get(0);
+ HBasicBlock* first_predecessor = block->GetPredecessor(0);
DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors();
if (try_entry != nullptr) {
@@ -346,16 +346,6 @@ void HGraph::ComputeTryBlockInformation() {
}
}
-bool HGraph::HasTryCatch() const {
- for (size_t i = 0, e = blocks_.Size(); i < e; ++i) {
- HBasicBlock* block = blocks_.Get(i);
- if (block != nullptr && (block->IsTryBlock() || block->IsCatchBlock())) {
- return true;
- }
- }
- return false;
-}
-
void HGraph::SimplifyCFG() {
// Simplify the CFG for future analysis, and code generation:
// (1): Split critical edges.
@@ -364,10 +354,10 @@ void HGraph::SimplifyCFG() {
HBasicBlock* block = blocks_.Get(i);
if (block == nullptr) continue;
if (block->NumberOfNormalSuccessors() > 1) {
- for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
- HBasicBlock* successor = block->GetSuccessors().Get(j);
+ for (size_t j = 0; j < block->GetSuccessors().size(); ++j) {
+ HBasicBlock* successor = block->GetSuccessor(j);
DCHECK(!successor->IsCatchBlock());
- if (successor->GetPredecessors().Size() > 1) {
+ if (successor->GetPredecessors().size() > 1) {
SplitCriticalEdge(block, successor);
--j;
}
@@ -486,8 +476,8 @@ void HLoopInformation::PopulateRecursive(HBasicBlock* block) {
blocks_.SetBit(block->GetBlockId());
block->SetInLoop(this);
- for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
- PopulateRecursive(block->GetPredecessors().Get(i));
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ PopulateRecursive(predecessor);
}
}
@@ -1138,12 +1128,11 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) {
new_block->instructions_.SetBlockOfInstructions(new_block);
AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc()));
- for (size_t i = 0, e = GetSuccessors().Size(); i < e; ++i) {
- HBasicBlock* successor = GetSuccessors().Get(i);
- new_block->successors_.Add(successor);
- successor->predecessors_.Put(successor->GetPredecessorIndexOf(this), new_block);
+ for (HBasicBlock* successor : GetSuccessors()) {
+ new_block->successors_.push_back(successor);
+ successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
}
- successors_.Reset();
+ successors_.clear();
AddSuccessor(new_block);
GetGraph()->AddBlock(new_block);
@@ -1163,19 +1152,17 @@ HBasicBlock* HBasicBlock::SplitAfter(HInstruction* cursor) {
instructions_.last_instruction_ = cursor;
new_block->instructions_.SetBlockOfInstructions(new_block);
- for (size_t i = 0, e = GetSuccessors().Size(); i < e; ++i) {
- HBasicBlock* successor = GetSuccessors().Get(i);
- new_block->successors_.Add(successor);
- successor->predecessors_.Put(successor->GetPredecessorIndexOf(this), new_block);
+ for (HBasicBlock* successor : GetSuccessors()) {
+ new_block->successors_.push_back(successor);
+ successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
}
- successors_.Reset();
+ successors_.clear();
- for (size_t i = 0, e = GetDominatedBlocks().Size(); i < e; ++i) {
- HBasicBlock* dominated = GetDominatedBlocks().Get(i);
+ for (HBasicBlock* dominated : GetDominatedBlocks()) {
dominated->dominator_ = new_block;
- new_block->dominated_blocks_.Add(dominated);
+ new_block->dominated_blocks_.push_back(dominated);
}
- dominated_blocks_.Reset();
+ dominated_blocks_.clear();
return new_block;
}
@@ -1228,7 +1215,7 @@ bool HBasicBlock::HasSinglePhi() const {
}
bool HTryBoundary::HasSameExceptionHandlersAs(const HTryBoundary& other) const {
- if (GetBlock()->GetSuccessors().Size() != other.GetBlock()->GetSuccessors().Size()) {
+ if (GetBlock()->GetSuccessors().size() != other.GetBlock()->GetSuccessors().size()) {
return false;
}
@@ -1288,7 +1275,7 @@ void HBasicBlock::DisconnectAndDelete() {
// Dominators must be removed after all the blocks they dominate. This way
// a loop header is removed last, a requirement for correct loop information
// iteration.
- DCHECK(dominated_blocks_.IsEmpty());
+ DCHECK(dominated_blocks_.empty());
// Remove the block from all loops it is included in.
for (HLoopInformationOutwardIterator it(*this); !it.Done(); it.Advance()) {
@@ -1304,36 +1291,34 @@ void HBasicBlock::DisconnectAndDelete() {
// Disconnect the block from its predecessors and update their control-flow
// instructions.
- for (size_t i = 0, e = predecessors_.Size(); i < e; ++i) {
- HBasicBlock* predecessor = predecessors_.Get(i);
+ for (HBasicBlock* predecessor : predecessors_) {
HInstruction* last_instruction = predecessor->GetLastInstruction();
predecessor->RemoveInstruction(last_instruction);
predecessor->RemoveSuccessor(this);
- if (predecessor->GetSuccessors().Size() == 1u) {
+ if (predecessor->GetSuccessors().size() == 1u) {
DCHECK(last_instruction->IsIf());
predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc()));
} else {
// The predecessor has no remaining successors and therefore must be dead.
// We deliberately leave it without a control-flow instruction so that the
// SSAChecker fails unless it is not removed during the pass too.
- DCHECK_EQ(predecessor->GetSuccessors().Size(), 0u);
+ DCHECK_EQ(predecessor->GetSuccessors().size(), 0u);
}
}
- predecessors_.Reset();
+ predecessors_.clear();
// Disconnect the block from its successors and update their phis.
- for (size_t i = 0, e = successors_.Size(); i < e; ++i) {
- HBasicBlock* successor = successors_.Get(i);
+ for (HBasicBlock* successor : successors_) {
// Delete this block from the list of predecessors.
size_t this_index = successor->GetPredecessorIndexOf(this);
- successor->predecessors_.DeleteAt(this_index);
+ successor->predecessors_.erase(successor->predecessors_.begin() + this_index);
// Check that `successor` has other predecessors, otherwise `this` is the
// dominator of `successor` which violates the order DCHECKed at the top.
- DCHECK(!successor->predecessors_.IsEmpty());
+ DCHECK(!successor->predecessors_.empty());
// Remove this block's entries in the successor's phis.
- if (successor->predecessors_.Size() == 1u) {
+ if (successor->predecessors_.size() == 1u) {
// The successor has just one predecessor left. Replace phis with the only
// remaining input.
for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
@@ -1347,7 +1332,7 @@ void HBasicBlock::DisconnectAndDelete() {
}
}
}
- successors_.Reset();
+ successors_.clear();
// Disconnect from the dominator.
dominator_->RemoveDominatedBlock(this);
@@ -1361,11 +1346,9 @@ void HBasicBlock::DisconnectAndDelete() {
void HBasicBlock::MergeWith(HBasicBlock* other) {
DCHECK_EQ(GetGraph(), other->GetGraph());
- DCHECK(GetDominatedBlocks().Contains(other));
- DCHECK_EQ(GetSuccessors().Size(), 1u);
- DCHECK_EQ(GetSuccessors().Get(0), other);
- DCHECK_EQ(other->GetPredecessors().Size(), 1u);
- DCHECK_EQ(other->GetPredecessors().Get(0), this);
+ DCHECK(ContainsElement(dominated_blocks_, other));
+ DCHECK_EQ(GetSingleSuccessor(), other);
+ DCHECK_EQ(other->GetSinglePredecessor(), this);
DCHECK(other->GetPhis().IsEmpty());
// Move instructions from `other` to `this`.
@@ -1385,24 +1368,23 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
}
// Update links to the successors of `other`.
- successors_.Reset();
- while (!other->successors_.IsEmpty()) {
- HBasicBlock* successor = other->successors_.Get(0);
+ successors_.clear();
+ while (!other->successors_.empty()) {
+ HBasicBlock* successor = other->GetSuccessor(0);
successor->ReplacePredecessor(other, this);
}
// Update the dominator tree.
- dominated_blocks_.Delete(other);
- for (size_t i = 0, e = other->GetDominatedBlocks().Size(); i < e; ++i) {
- HBasicBlock* dominated = other->GetDominatedBlocks().Get(i);
- dominated_blocks_.Add(dominated);
+ RemoveDominatedBlock(other);
+ for (HBasicBlock* dominated : other->GetDominatedBlocks()) {
+ dominated_blocks_.push_back(dominated);
dominated->SetDominator(this);
}
- other->dominated_blocks_.Reset();
+ other->dominated_blocks_.clear();
other->dominator_ = nullptr;
// Clear the list of predecessors of `other` in preparation of deleting it.
- other->predecessors_.Reset();
+ other->predecessors_.clear();
// Delete `other` from the graph. The function updates reverse post order.
graph_->DeleteDeadBlock(other);
@@ -1411,11 +1393,10 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
void HBasicBlock::MergeWithInlined(HBasicBlock* other) {
DCHECK_NE(GetGraph(), other->GetGraph());
- DCHECK(GetDominatedBlocks().IsEmpty());
- DCHECK(GetSuccessors().IsEmpty());
+ DCHECK(GetDominatedBlocks().empty());
+ DCHECK(GetSuccessors().empty());
DCHECK(!EndsWithControlFlowInstruction());
- DCHECK_EQ(other->GetPredecessors().Size(), 1u);
- DCHECK(other->GetPredecessors().Get(0)->IsEntryBlock());
+ DCHECK(other->GetSinglePredecessor()->IsEntryBlock());
DCHECK(other->GetPhis().IsEmpty());
DCHECK(!other->IsInLoop());
@@ -1424,34 +1405,33 @@ void HBasicBlock::MergeWithInlined(HBasicBlock* other) {
other->instructions_.SetBlockOfInstructions(this);
// Update links to the successors of `other`.
- successors_.Reset();
- while (!other->successors_.IsEmpty()) {
- HBasicBlock* successor = other->successors_.Get(0);
+ successors_.clear();
+ while (!other->successors_.empty()) {
+ HBasicBlock* successor = other->GetSuccessor(0);
successor->ReplacePredecessor(other, this);
}
// Update the dominator tree.
- for (size_t i = 0, e = other->GetDominatedBlocks().Size(); i < e; ++i) {
- HBasicBlock* dominated = other->GetDominatedBlocks().Get(i);
- dominated_blocks_.Add(dominated);
+ for (HBasicBlock* dominated : other->GetDominatedBlocks()) {
+ dominated_blocks_.push_back(dominated);
dominated->SetDominator(this);
}
- other->dominated_blocks_.Reset();
+ other->dominated_blocks_.clear();
other->dominator_ = nullptr;
other->graph_ = nullptr;
}
void HBasicBlock::ReplaceWith(HBasicBlock* other) {
- while (!GetPredecessors().IsEmpty()) {
- HBasicBlock* predecessor = GetPredecessors().Get(0);
+ while (!GetPredecessors().empty()) {
+ HBasicBlock* predecessor = GetPredecessor(0);
predecessor->ReplaceSuccessor(this, other);
}
- while (!GetSuccessors().IsEmpty()) {
- HBasicBlock* successor = GetSuccessors().Get(0);
+ while (!GetSuccessors().empty()) {
+ HBasicBlock* successor = GetSuccessor(0);
successor->ReplacePredecessor(this, other);
}
- for (size_t i = 0; i < dominated_blocks_.Size(); ++i) {
- other->AddDominatedBlock(dominated_blocks_.Get(i));
+ for (HBasicBlock* dominated : GetDominatedBlocks()) {
+ other->AddDominatedBlock(dominated);
}
GetDominator()->ReplaceDominatedBlock(this, other);
other->SetDominator(GetDominator());
@@ -1474,9 +1454,9 @@ static void MakeRoomFor(GrowableArray<HBasicBlock*>* blocks,
void HGraph::DeleteDeadBlock(HBasicBlock* block) {
DCHECK_EQ(block->GetGraph(), this);
- DCHECK(block->GetSuccessors().IsEmpty());
- DCHECK(block->GetPredecessors().IsEmpty());
- DCHECK(block->GetDominatedBlocks().IsEmpty());
+ DCHECK(block->GetSuccessors().empty());
+ DCHECK(block->GetPredecessors().empty());
+ DCHECK(block->GetDominatedBlocks().empty());
DCHECK(block->GetDominator() == nullptr);
for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
@@ -1550,16 +1530,16 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
HBasicBlock* at = invoke->GetBlock();
HBasicBlock* to = at->SplitAfter(invoke);
- HBasicBlock* first = entry_block_->GetSuccessors().Get(0);
+ HBasicBlock* first = entry_block_->GetSuccessor(0);
DCHECK(!first->IsInLoop());
at->MergeWithInlined(first);
exit_block_->ReplaceWith(to);
// Update all predecessors of the exit block (now the `to` block)
// to not `HReturn` but `HGoto` instead.
- bool returns_void = to->GetPredecessors().Get(0)->GetLastInstruction()->IsReturnVoid();
- if (to->GetPredecessors().Size() == 1) {
- HBasicBlock* predecessor = to->GetPredecessors().Get(0);
+ bool returns_void = to->GetPredecessor(0)->GetLastInstruction()->IsReturnVoid();
+ if (to->GetPredecessors().size() == 1) {
+ HBasicBlock* predecessor = to->GetPredecessor(0);
HInstruction* last = predecessor->GetLastInstruction();
if (!returns_void) {
return_value = last->InputAt(0);
@@ -1573,8 +1553,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
to->AddPhi(return_value->AsPhi());
}
- for (size_t i = 0, e = to->GetPredecessors().Size(); i < e; ++i) {
- HBasicBlock* predecessor = to->GetPredecessors().Get(i);
+ for (HBasicBlock* predecessor : to->GetPredecessors()) {
HInstruction* last = predecessor->GetLastInstruction();
if (!returns_void) {
return_value->AsPhi()->AddInput(last->InputAt(0));
@@ -1726,8 +1705,8 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
AddBlock(new_pre_header);
header->ReplacePredecessor(pre_header, new_pre_header);
- pre_header->successors_.Reset();
- pre_header->dominated_blocks_.Reset();
+ pre_header->successors_.clear();
+ pre_header->dominated_blocks_.clear();
pre_header->AddSuccessor(if_block);
if_block->AddSuccessor(dummy_block); // True successor
@@ -1735,15 +1714,15 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
dummy_block->AddSuccessor(new_pre_header);
deopt_block->AddSuccessor(new_pre_header);
- pre_header->dominated_blocks_.Add(if_block);
+ pre_header->dominated_blocks_.push_back(if_block);
if_block->SetDominator(pre_header);
- if_block->dominated_blocks_.Add(dummy_block);
+ if_block->dominated_blocks_.push_back(dummy_block);
dummy_block->SetDominator(if_block);
- if_block->dominated_blocks_.Add(deopt_block);
+ if_block->dominated_blocks_.push_back(deopt_block);
deopt_block->SetDominator(if_block);
- if_block->dominated_blocks_.Add(new_pre_header);
+ if_block->dominated_blocks_.push_back(new_pre_header);
new_pre_header->SetDominator(if_block);
- new_pre_header->dominated_blocks_.Add(header);
+ new_pre_header->dominated_blocks_.push_back(header);
header->SetDominator(new_pre_header);
size_t index_of_header = 0;
@@ -1785,7 +1764,7 @@ void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) {
DCHECK(upper_bound_rti.IsSupertypeOf(rti))
<< " upper_bound_rti: " << upper_bound_rti
<< " rti: " << rti;
- DCHECK(!upper_bound_rti.GetTypeHandle()->IsFinal() || rti.IsExact());
+ DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact());
}
}
reference_type_info_ = rti;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 23d605b7b5..d52a4f7575 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -17,11 +17,13 @@
#ifndef ART_COMPILER_OPTIMIZING_NODES_H_
#define ART_COMPILER_OPTIMIZING_NODES_H_
+#include <algorithm>
#include <array>
#include <type_traits>
#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "base/stl_util.h"
#include "dex/compiler_enums.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
@@ -155,6 +157,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
number_of_in_vregs_(0),
temporaries_vreg_slots_(0),
has_bounds_checks_(false),
+ has_try_catch_(false),
debuggable_(debuggable),
current_instruction_id_(start_instruction_id),
dex_file_(dex_file),
@@ -280,7 +283,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
}
uint16_t GetNumberOfVRegs() const {
- DCHECK(!in_ssa_form_);
return number_of_vregs_;
}
@@ -358,8 +360,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
return instruction_set_;
}
- // TODO: Remove once the full compilation pipeline is enabled for try/catch.
- bool HasTryCatch() const;
+ bool HasTryCatch() const { return has_try_catch_; }
+ void SetHasTryCatch(bool value) { has_try_catch_ = value; }
private:
void VisitBlockForDominatorTree(HBasicBlock* block,
@@ -431,6 +433,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// Has bounds checks. We can totally skip BCE if it's false.
bool has_bounds_checks_;
+ // Flag whether there are any try/catch blocks in the graph. We will skip
+ // try/catch-related passes if false.
+ bool has_try_catch_;
+
// Indicates whether the graph should be compiled in a way that
// ensures full debuggability. If false, we can apply more
// aggressive optimizations that may limit the level of debugging.
@@ -630,26 +636,44 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
public:
HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
- predecessors_(graph->GetArena(), kDefaultNumberOfPredecessors),
- successors_(graph->GetArena(), kDefaultNumberOfSuccessors),
+ predecessors_(graph->GetArena()->Adapter(kArenaAllocPredecessors)),
+ successors_(graph->GetArena()->Adapter(kArenaAllocSuccessors)),
loop_information_(nullptr),
dominator_(nullptr),
- dominated_blocks_(graph->GetArena(), kDefaultNumberOfDominatedBlocks),
+ dominated_blocks_(graph->GetArena()->Adapter(kArenaAllocDominated)),
block_id_(-1),
dex_pc_(dex_pc),
lifetime_start_(kNoLifetime),
lifetime_end_(kNoLifetime),
- try_catch_information_(nullptr) {}
+ try_catch_information_(nullptr) {
+ predecessors_.reserve(kDefaultNumberOfPredecessors);
+ successors_.reserve(kDefaultNumberOfSuccessors);
+ dominated_blocks_.reserve(kDefaultNumberOfDominatedBlocks);
+ }
- const GrowableArray<HBasicBlock*>& GetPredecessors() const {
+ const ArenaVector<HBasicBlock*>& GetPredecessors() const {
return predecessors_;
}
- const GrowableArray<HBasicBlock*>& GetSuccessors() const {
+ HBasicBlock* GetPredecessor(size_t pred_idx) const {
+ DCHECK_LT(pred_idx, predecessors_.size());
+ return predecessors_[pred_idx];
+ }
+
+ const ArenaVector<HBasicBlock*>& GetSuccessors() const {
return successors_;
}
- const GrowableArray<HBasicBlock*>& GetDominatedBlocks() const {
+ HBasicBlock* GetSuccessor(size_t succ_idx) const {
+ DCHECK_LT(succ_idx, successors_.size());
+ return successors_[succ_idx];
+ }
+
+ bool HasSuccessor(const HBasicBlock* block, size_t start_from = 0u) {
+ return ContainsElement(successors_, block, start_from);
+ }
+
+ const ArenaVector<HBasicBlock*>& GetDominatedBlocks() const {
return dominated_blocks_;
}
@@ -689,18 +713,16 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
HBasicBlock* GetDominator() const { return dominator_; }
void SetDominator(HBasicBlock* dominator) { dominator_ = dominator; }
- void AddDominatedBlock(HBasicBlock* block) { dominated_blocks_.Add(block); }
- void RemoveDominatedBlock(HBasicBlock* block) { dominated_blocks_.Delete(block); }
+ void AddDominatedBlock(HBasicBlock* block) { dominated_blocks_.push_back(block); }
+
+ void RemoveDominatedBlock(HBasicBlock* block) {
+ RemoveElement(dominated_blocks_, block);
+ }
+
void ReplaceDominatedBlock(HBasicBlock* existing, HBasicBlock* new_block) {
- for (size_t i = 0, e = dominated_blocks_.Size(); i < e; ++i) {
- if (dominated_blocks_.Get(i) == existing) {
- dominated_blocks_.Put(i, new_block);
- return;
- }
- }
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+ ReplaceElement(dominated_blocks_, existing, new_block);
}
+
void ClearDominanceInformation();
int NumberOfBackEdges() const {
@@ -715,24 +737,22 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
const HInstructionList& GetPhis() const { return phis_; }
void AddSuccessor(HBasicBlock* block) {
- successors_.Add(block);
- block->predecessors_.Add(this);
+ successors_.push_back(block);
+ block->predecessors_.push_back(this);
}
void ReplaceSuccessor(HBasicBlock* existing, HBasicBlock* new_block) {
size_t successor_index = GetSuccessorIndexOf(existing);
- DCHECK_NE(successor_index, static_cast<size_t>(-1));
existing->RemovePredecessor(this);
- new_block->predecessors_.Add(this);
- successors_.Put(successor_index, new_block);
+ new_block->predecessors_.push_back(this);
+ successors_[successor_index] = new_block;
}
void ReplacePredecessor(HBasicBlock* existing, HBasicBlock* new_block) {
size_t predecessor_index = GetPredecessorIndexOf(existing);
- DCHECK_NE(predecessor_index, static_cast<size_t>(-1));
existing->RemoveSuccessor(this);
- new_block->successors_.Add(this);
- predecessors_.Put(predecessor_index, new_block);
+ new_block->successors_.push_back(this);
+ predecessors_[predecessor_index] = new_block;
}
// Insert `this` between `predecessor` and `successor. This method
@@ -740,85 +760,69 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
// `predecessor` and `successor`.
void InsertBetween(HBasicBlock* predecessor, HBasicBlock* successor) {
size_t predecessor_index = successor->GetPredecessorIndexOf(predecessor);
- DCHECK_NE(predecessor_index, static_cast<size_t>(-1));
size_t successor_index = predecessor->GetSuccessorIndexOf(successor);
- DCHECK_NE(successor_index, static_cast<size_t>(-1));
- successor->predecessors_.Put(predecessor_index, this);
- predecessor->successors_.Put(successor_index, this);
- successors_.Add(successor);
- predecessors_.Add(predecessor);
+ successor->predecessors_[predecessor_index] = this;
+ predecessor->successors_[successor_index] = this;
+ successors_.push_back(successor);
+ predecessors_.push_back(predecessor);
}
void RemovePredecessor(HBasicBlock* block) {
- predecessors_.Delete(block);
+ predecessors_.erase(predecessors_.begin() + GetPredecessorIndexOf(block));
}
void RemoveSuccessor(HBasicBlock* block) {
- successors_.Delete(block);
+ successors_.erase(successors_.begin() + GetSuccessorIndexOf(block));
}
void ClearAllPredecessors() {
- predecessors_.Reset();
+ predecessors_.clear();
}
void AddPredecessor(HBasicBlock* block) {
- predecessors_.Add(block);
- block->successors_.Add(this);
+ predecessors_.push_back(block);
+ block->successors_.push_back(this);
}
void SwapPredecessors() {
- DCHECK_EQ(predecessors_.Size(), 2u);
- HBasicBlock* temp = predecessors_.Get(0);
- predecessors_.Put(0, predecessors_.Get(1));
- predecessors_.Put(1, temp);
+ DCHECK_EQ(predecessors_.size(), 2u);
+ std::swap(predecessors_[0], predecessors_[1]);
}
void SwapSuccessors() {
- DCHECK_EQ(successors_.Size(), 2u);
- HBasicBlock* temp = successors_.Get(0);
- successors_.Put(0, successors_.Get(1));
- successors_.Put(1, temp);
+ DCHECK_EQ(successors_.size(), 2u);
+ std::swap(successors_[0], successors_[1]);
}
size_t GetPredecessorIndexOf(HBasicBlock* predecessor) const {
- for (size_t i = 0, e = predecessors_.Size(); i < e; ++i) {
- if (predecessors_.Get(i) == predecessor) {
- return i;
- }
- }
- return -1;
+ return IndexOfElement(predecessors_, predecessor);
}
size_t GetSuccessorIndexOf(HBasicBlock* successor) const {
- for (size_t i = 0, e = successors_.Size(); i < e; ++i) {
- if (successors_.Get(i) == successor) {
- return i;
- }
- }
- return -1;
+ return IndexOfElement(successors_, successor);
}
HBasicBlock* GetSinglePredecessor() const {
- DCHECK_EQ(GetPredecessors().Size(), 1u);
- return GetPredecessors().Get(0);
+ DCHECK_EQ(GetPredecessors().size(), 1u);
+ return GetPredecessor(0);
}
HBasicBlock* GetSingleSuccessor() const {
- DCHECK_EQ(GetSuccessors().Size(), 1u);
- return GetSuccessors().Get(0);
+ DCHECK_EQ(GetSuccessors().size(), 1u);
+ return GetSuccessor(0);
}
// Returns whether the first occurrence of `predecessor` in the list of
// predecessors is at index `idx`.
bool IsFirstIndexOfPredecessor(HBasicBlock* predecessor, size_t idx) const {
- DCHECK_EQ(GetPredecessors().Get(idx), predecessor);
+ DCHECK_EQ(GetPredecessor(idx), predecessor);
return GetPredecessorIndexOf(predecessor) == idx;
}
// Returns the number of non-exceptional successors. SsaChecker ensures that
// these are stored at the beginning of the successor list.
size_t NumberOfNormalSuccessors() const {
- return EndsWithTryBoundary() ? 1 : GetSuccessors().Size();
+ return EndsWithTryBoundary() ? 1 : GetSuccessors().size();
}
// Split the block into two blocks just before `cursor`. Returns the newly
@@ -883,8 +887,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
bool IsLoopPreHeaderFirstPredecessor() const {
DCHECK(IsLoopHeader());
- DCHECK(!GetPredecessors().IsEmpty());
- return GetPredecessors().Get(0) == GetLoopInformation()->GetPreHeader();
+ return GetPredecessor(0) == GetLoopInformation()->GetPreHeader();
}
HLoopInformation* GetLoopInformation() const {
@@ -954,13 +957,13 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
private:
HGraph* graph_;
- GrowableArray<HBasicBlock*> predecessors_;
- GrowableArray<HBasicBlock*> successors_;
+ ArenaVector<HBasicBlock*> predecessors_;
+ ArenaVector<HBasicBlock*> successors_;
HInstructionList instructions_;
HInstructionList phis_;
HLoopInformation* loop_information_;
HBasicBlock* dominator_;
- GrowableArray<HBasicBlock*> dominated_blocks_;
+ ArenaVector<HBasicBlock*> dominated_blocks_;
int block_id_;
// The dex program counter of the first instruction of this block.
const uint32_t dex_pc_;
@@ -2188,6 +2191,8 @@ class HConstant : public HExpression<0> {
virtual bool IsZero() const { return false; }
virtual bool IsOne() const { return false; }
+ virtual uint64_t GetValueAsUint64() const = 0;
+
DECLARE_INSTRUCTION(Constant);
private:
@@ -2200,6 +2205,8 @@ class HNullConstant : public HConstant {
return true;
}
+ uint64_t GetValueAsUint64() const OVERRIDE { return 0; }
+
size_t ComputeHashCode() const OVERRIDE { return 0; }
DECLARE_INSTRUCTION(NullConstant);
@@ -2217,6 +2224,8 @@ class HIntConstant : public HConstant {
public:
int32_t GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE { return static_cast<uint64_t>(value_); }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsIntConstant());
return other->AsIntConstant()->value_ == value_;
@@ -2248,6 +2257,8 @@ class HLongConstant : public HConstant {
public:
int64_t GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE { return value_; }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsLongConstant());
return other->AsLongConstant()->value_ == value_;
@@ -2283,11 +2294,11 @@ class HIf : public HTemplateInstruction<1> {
bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* IfTrueSuccessor() const {
- return GetBlock()->GetSuccessors().Get(0);
+ return GetBlock()->GetSuccessor(0);
}
HBasicBlock* IfFalseSuccessor() const {
- return GetBlock()->GetSuccessors().Get(1);
+ return GetBlock()->GetSuccessor(1);
}
DECLARE_INSTRUCTION(If);
@@ -2315,14 +2326,13 @@ class HTryBoundary : public HTemplateInstruction<0> {
bool IsControlFlow() const OVERRIDE { return true; }
// Returns the block's non-exceptional successor (index zero).
- HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors().Get(0); }
+ HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessor(0); }
// Returns whether `handler` is among its exception handlers (non-zero index
// successors).
bool HasExceptionHandler(const HBasicBlock& handler) const {
DCHECK(handler.IsCatchBlock());
- return GetBlock()->GetSuccessors().Contains(
- const_cast<HBasicBlock*>(&handler), /* start_from */ 1);
+ return GetBlock()->HasSuccessor(&handler, 1u /* Skip first successor. */);
}
// If not present already, adds `handler` to its block's list of exception
@@ -2352,8 +2362,8 @@ class HExceptionHandlerIterator : public ValueObject {
explicit HExceptionHandlerIterator(const HTryBoundary& try_boundary)
: block_(*try_boundary.GetBlock()), index_(block_.NumberOfNormalSuccessors()) {}
- bool Done() const { return index_ == block_.GetSuccessors().Size(); }
- HBasicBlock* Current() const { return block_.GetSuccessors().Get(index_); }
+ bool Done() const { return index_ == block_.GetSuccessors().size(); }
+ HBasicBlock* Current() const { return block_.GetSuccessor(index_); }
size_t CurrentSuccessorIndex() const { return index_; }
void Advance() { ++index_; }
@@ -2868,10 +2878,13 @@ class HFloatConstant : public HConstant {
public:
float GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE {
+ return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
+ }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsFloatConstant());
- return bit_cast<uint32_t, float>(other->AsFloatConstant()->value_) ==
- bit_cast<uint32_t, float>(value_);
+ return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
}
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
@@ -2909,10 +2922,11 @@ class HDoubleConstant : public HConstant {
public:
double GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsDoubleConstant());
- return bit_cast<uint64_t, double>(other->AsDoubleConstant()->value_) ==
- bit_cast<uint64_t, double>(value_);
+ return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
}
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
@@ -4005,6 +4019,13 @@ class HPhi : public HInstruction {
bool IsDead() const { return !is_live_; }
bool IsLive() const { return is_live_; }
+ bool IsVRegEquivalentOf(HInstruction* other) const {
+ return other != nullptr
+ && other->IsPhi()
+ && other->AsPhi()->GetBlock() == GetBlock()
+ && other->AsPhi()->GetRegNumber() == GetRegNumber();
+ }
+
// Returns the next equivalent phi (starting from the current one) or null if there is none.
// An equivalent phi is a phi having the same dex register and type.
// It assumes that phis with the same dex register are adjacent.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f549ba8391..8fc1e4e47d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -51,6 +51,7 @@
#include "graph_checker.h"
#include "graph_visualizer.h"
#include "gvn.h"
+#include "induction_var_analysis.h"
#include "inliner.h"
#include "instruction_simplifier.h"
#include "intrinsics.h"
@@ -462,7 +463,8 @@ static void RunOptimizations(HGraph* graph,
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
LICM* licm = new (arena) LICM(graph, *side_effects);
- BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph);
+ HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
+ BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, induction);
ReferenceTypePropagation* type_propagation =
new (arena) ReferenceTypePropagation(graph, handles);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
@@ -485,34 +487,44 @@ static void RunOptimizations(HGraph* graph,
RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
+ // TODO: Update passes incompatible with try/catch so we have the same
+ // pipeline for all methods.
if (graph->HasTryCatch()) {
- // TODO: Update the optimizations below to work correctly under try/catch
- // semantics. The optimizations above suffice for running codegen
- // in the meanwhile.
- return;
+ HOptimization* optimizations2[] = {
+ side_effects,
+ gvn,
+ dce2,
+ // The codegen has a few assumptions that only the instruction simplifier
+ // can satisfy. For example, the code generator does not expect to see a
+ // HTypeConversion from a type to the same type.
+ simplify4,
+ };
+
+ RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
+ } else {
+ MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
+
+ HOptimization* optimizations2[] = {
+ // BooleanSimplifier depends on the InstructionSimplifier removing
+ // redundant suspend checks to recognize empty blocks.
+ boolean_simplify,
+ fold2, // TODO: if we don't inline we can also skip fold2.
+ side_effects,
+ gvn,
+ licm,
+ induction,
+ bce,
+ simplify3,
+ dce2,
+ // The codegen has a few assumptions that only the instruction simplifier
+ // can satisfy. For example, the code generator does not expect to see a
+ // HTypeConversion from a type to the same type.
+ simplify4,
+ };
+
+ RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
}
- MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
-
- HOptimization* optimizations2[] = {
- // BooleanSimplifier depends on the InstructionSimplifier removing redundant
- // suspend checks to recognize empty blocks.
- boolean_simplify,
- fold2, // TODO: if we don't inline we can also skip fold2.
- side_effects,
- gvn,
- licm,
- bce,
- simplify3,
- dce2,
- // The codegen has a few assumptions that only the instruction simplifier can
- // satisfy. For example, the code generator does not expect to see a
- // HTypeConversion from a type to the same type.
- simplify4,
- };
-
- RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
-
RunArchOptimizations(driver->GetInstructionSet(), graph, stats, pass_observer);
}
@@ -560,17 +572,18 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer) const {
+ if (graph->HasTryCatch() && graph->IsDebuggable()) {
+ // TODO: b/24054676, stop creating catch phis eagerly to avoid special cases like phis without
+ // inputs.
+ return nullptr;
+ }
+
ScopedObjectAccess soa(Thread::Current());
StackHandleScopeCollection handles(soa.Self());
soa.Self()->TransitionFromRunnableToSuspended(kNative);
RunOptimizations(graph, compiler_driver, compilation_stats_.get(),
dex_compilation_unit, pass_observer, &handles);
- if (graph->HasTryCatch()) {
- soa.Self()->TransitionFromSuspendedToRunnable();
- return nullptr;
- }
-
AllocateRegisters(graph, codegen, pass_observer);
ArenaAllocator* arena = graph->GetArena();
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index 934514eeda..34850a564c 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -71,23 +71,23 @@ class HPrettyPrinter : public HGraphVisitor {
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
PrintString("BasicBlock ");
PrintInt(block->GetBlockId());
- const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
- if (!predecessors.IsEmpty()) {
+ const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
+ if (!predecessors.empty()) {
PrintString(", pred: ");
- for (size_t i = 0; i < predecessors.Size() -1; i++) {
- PrintInt(predecessors.Get(i)->GetBlockId());
+ for (size_t i = 0; i < predecessors.size() -1; i++) {
+ PrintInt(predecessors[i]->GetBlockId());
PrintString(", ");
}
- PrintInt(predecessors.Peek()->GetBlockId());
+ PrintInt(predecessors.back()->GetBlockId());
}
- const GrowableArray<HBasicBlock*>& successors = block->GetSuccessors();
- if (!successors.IsEmpty()) {
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ if (!successors.empty()) {
PrintString(", succ: ");
- for (size_t i = 0; i < successors.Size() - 1; i++) {
- PrintInt(successors.Get(i)->GetBlockId());
+ for (size_t i = 0; i < successors.size() - 1; i++) {
+ PrintInt(successors[i]->GetBlockId());
PrintString(", ");
}
- PrintInt(successors.Peek()->GetBlockId());
+ PrintInt(successors.back()->GetBlockId());
}
PrintNewLine();
HGraphVisitor::VisitBasicBlock(block);
@@ -131,7 +131,7 @@ class StringPrettyPrinter : public HPrettyPrinter {
PrintString(" ");
PrintInt(gota->GetId());
PrintString(": Goto ");
- PrintInt(current_block_->GetSuccessors().Get(0)->GetBlockId());
+ PrintInt(current_block_->GetSuccessor(0)->GetBlockId());
PrintNewLine();
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 0384e46671..a88c5431c5 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -167,7 +167,7 @@ static HBoundType* CreateBoundType(ArenaAllocator* arena,
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
// Narrow the type as much as possible.
- if (class_rti.GetTypeHandle()->IsFinal()) {
+ if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
bound_type->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
} else if (obj_rti.IsValid() && class_rti.IsSupertypeOf(obj_rti)) {
@@ -380,7 +380,7 @@ void RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
} else if (klass != nullptr) {
ScopedObjectAccess soa(Thread::Current());
ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(klass);
- is_exact = is_exact || klass->IsFinal();
+ is_exact = is_exact || klass->CannotBeAssignedFromOtherTypes();
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
} else {
instr->SetReferenceTypeInfo(
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 37c8bc5f51..a4f1f458fd 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -56,6 +56,7 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
long_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
float_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
double_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
+ catch_phi_spill_slots_(0),
safepoints_(allocator, 0),
processing_core_registers_(false),
number_of_registers_(-1),
@@ -124,9 +125,7 @@ void RegisterAllocator::AllocateRegisters() {
}
}
-void RegisterAllocator::BlockRegister(Location location,
- size_t start,
- size_t end) {
+void RegisterAllocator::BlockRegister(Location location, size_t start, size_t end) {
int reg = location.reg();
DCHECK(location.IsRegister() || location.IsFpuRegister());
LiveInterval* interval = location.IsRegister()
@@ -147,6 +146,19 @@ void RegisterAllocator::BlockRegister(Location location,
interval->AddRange(start, end);
}
+void RegisterAllocator::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
+ for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
+ BlockRegister(Location::RegisterLocation(i), start, end);
+ }
+ }
+ for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) {
+ BlockRegister(Location::FpuRegisterLocation(i), start, end);
+ }
+ }
+}
+
void RegisterAllocator::AllocateRegistersInternal() {
// Iterate post-order, to ensure the list is sorted, and the last added interval
// is the one with the lowest start position.
@@ -159,6 +171,13 @@ void RegisterAllocator::AllocateRegistersInternal() {
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
ProcessInstruction(inst_it.Current());
}
+
+ if (block->IsCatchBlock()) {
+ // By blocking all registers at the top of each catch block, we force
+ // intervals used after catch to spill.
+ size_t position = block->GetLifetimeStart();
+ BlockRegisters(position, position + 1);
+ }
}
number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
@@ -275,21 +294,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
}
if (locations->WillCall()) {
- // Block all registers.
- for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
- if (!codegen_->IsCoreCalleeSaveRegister(i)) {
- BlockRegister(Location::RegisterLocation(i),
- position,
- position + 1);
- }
- }
- for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
- if (!codegen_->IsFloatingPointCalleeSaveRegister(i)) {
- BlockRegister(Location::FpuRegisterLocation(i),
- position,
- position + 1);
- }
- }
+ BlockRegisters(position, position + 1, /* caller_save_only */ true);
}
for (size_t i = 0; i < instruction->InputCount(); ++i) {
@@ -378,6 +383,10 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
DCHECK(output.IsUnallocated() || output.IsConstant());
}
+ if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
+ AllocateSpillSlotForCatchPhi(instruction->AsPhi());
+ }
+
// If needed, add interval to the list of unhandled intervals.
if (current->HasSpillSlot() || instruction->IsConstant()) {
// Split just before first register use.
@@ -1212,14 +1221,13 @@ LiveInterval* RegisterAllocator::SplitBetween(LiveInterval* interval, size_t fro
* moves in B3.
*/
if (block_from->GetDominator() != nullptr) {
- const GrowableArray<HBasicBlock*>& dominated = block_from->GetDominator()->GetDominatedBlocks();
- for (size_t i = 0; i < dominated.Size(); ++i) {
- size_t position = dominated.Get(i)->GetLifetimeStart();
+ for (HBasicBlock* dominated : block_from->GetDominator()->GetDominatedBlocks()) {
+ size_t position = dominated->GetLifetimeStart();
if ((position > from) && (block_to->GetLifetimeStart() > position)) {
// Even if we found a better block, we continue iterating in case
// a dominated block is closer.
// Note that dominated blocks are not sorted in liveness order.
- block_to = dominated.Get(i);
+ block_to = dominated;
DCHECK_NE(block_to, block_from);
}
}
@@ -1283,6 +1291,8 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
}
HInstruction* defined_by = parent->GetDefinedBy();
+ DCHECK(!defined_by->IsPhi() || !defined_by->AsPhi()->IsCatchPhi());
+
if (defined_by->IsParameterValue()) {
// Parameters have their own stack slot.
parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
@@ -1299,12 +1309,6 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
return;
}
- LiveInterval* last_sibling = interval;
- while (last_sibling->GetNextSibling() != nullptr) {
- last_sibling = last_sibling->GetNextSibling();
- }
- size_t end = last_sibling->GetEnd();
-
GrowableArray<size_t>* spill_slots = nullptr;
switch (interval->GetType()) {
case Primitive::kPrimDouble:
@@ -1337,6 +1341,7 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
}
}
+ size_t end = interval->GetLastSibling()->GetEnd();
if (parent->NeedsTwoSpillSlots()) {
if (slot == spill_slots->Size()) {
// We need a new spill slot.
@@ -1372,6 +1377,28 @@ static bool IsValidDestination(Location destination) {
|| destination.IsDoubleStackSlot();
}
+void RegisterAllocator::AllocateSpillSlotForCatchPhi(HPhi* phi) {
+ LiveInterval* interval = phi->GetLiveInterval();
+
+ HInstruction* previous_phi = phi->GetPrevious();
+ DCHECK(previous_phi == nullptr ||
+ previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
+ << "Phis expected to be sorted by vreg number, so that equivalent phis are adjacent.";
+
+ if (phi->IsVRegEquivalentOf(previous_phi)) {
+ // This is an equivalent of the previous phi. We need to assign the same
+ // catch phi slot.
+ DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot());
+ interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
+ } else {
+ // Allocate a new spill slot for this catch phi.
+ // TODO: Reuse spill slots when intervals of phis from different catch
+ // blocks do not overlap.
+ interval->SetSpillSlot(catch_phi_spill_slots_);
+ catch_phi_spill_slots_ += interval->NeedsTwoSpillSlots() ? 2 : 1;
+ }
+}
+
void RegisterAllocator::AddMove(HParallelMove* move,
Location source,
Location destination,
@@ -1498,7 +1525,7 @@ void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
DCHECK(IsValidDestination(destination)) << destination;
if (source.Equals(destination)) return;
- DCHECK_EQ(block->GetSuccessors().Size(), 1u);
+ DCHECK_EQ(block->NumberOfNormalSuccessors(), 1u);
HInstruction* last = block->GetLastInstruction();
// We insert moves at exit for phi predecessors and connecting blocks.
// A block ending with an if cannot branch to a block with phis because
@@ -1725,13 +1752,13 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
// If `from` has only one successor, we can put the moves at the exit of it. Otherwise
// we need to put the moves at the entry of `to`.
- if (from->GetSuccessors().Size() == 1) {
+ if (from->NumberOfNormalSuccessors() == 1) {
InsertParallelMoveAtExitOf(from,
interval->GetParent()->GetDefinedBy(),
source->ToLocation(),
destination->ToLocation());
} else {
- DCHECK_EQ(to->GetPredecessors().Size(), 1u);
+ DCHECK_EQ(to->GetPredecessors().size(), 1u);
InsertParallelMoveAtEntryOf(to,
interval->GetParent()->GetDefinedBy(),
source->ToLocation(),
@@ -1769,17 +1796,25 @@ void RegisterAllocator::Resolve() {
} else if (instruction->IsCurrentMethod()) {
// The current method is always at offset 0.
DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
+ } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
+ DCHECK(current->HasSpillSlot());
+ size_t slot = current->GetSpillSlot()
+ + GetNumberOfSpillSlots()
+ + reserved_out_slots_
+ - catch_phi_spill_slots_;
+ current->SetSpillSlot(slot * kVRegSize);
} else if (current->HasSpillSlot()) {
// Adjust the stack slot, now that we know the number of them for each type.
// The way this implementation lays out the stack is the following:
- // [parameter slots ]
- // [double spill slots ]
- // [long spill slots ]
- // [float spill slots ]
- // [int/ref values ]
- // [maximum out values ] (number of arguments for calls)
- // [art method ].
- uint32_t slot = current->GetSpillSlot();
+ // [parameter slots ]
+ // [catch phi spill slots ]
+ // [double spill slots ]
+ // [long spill slots ]
+ // [float spill slots ]
+ // [int/ref values ]
+ // [maximum out values ] (number of arguments for calls)
+ // [art method ].
+ size_t slot = current->GetSpillSlot();
switch (current->GetType()) {
case Primitive::kPrimDouble:
slot += long_spill_slots_.Size();
@@ -1829,12 +1864,22 @@ void RegisterAllocator::Resolve() {
// Resolve non-linear control flow across branches. Order does not matter.
for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- BitVector* live = liveness_.GetLiveInSet(*block);
- for (uint32_t idx : live->Indexes()) {
- HInstruction* current = liveness_.GetInstructionFromSsaIndex(idx);
- LiveInterval* interval = current->GetLiveInterval();
- for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
- ConnectSplitSiblings(interval, block->GetPredecessors().Get(i), block);
+ if (block->IsCatchBlock()) {
+ // Instructions live at the top of catch blocks were forced to spill.
+ if (kIsDebugBuild) {
+ BitVector* live = liveness_.GetLiveInSet(*block);
+ for (uint32_t idx : live->Indexes()) {
+ LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
+ DCHECK(!interval->GetSiblingAt(block->GetLifetimeStart())->HasRegister());
+ }
+ }
+ } else {
+ BitVector* live = liveness_.GetLiveInSet(*block);
+ for (uint32_t idx : live->Indexes()) {
+ LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ ConnectSplitSiblings(interval, predecessor, block);
+ }
}
}
}
@@ -1842,16 +1887,20 @@ void RegisterAllocator::Resolve() {
// Resolve phi inputs. Order does not matter.
for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
- for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
- HInstruction* phi = inst_it.Current();
- for (size_t i = 0, e = current->GetPredecessors().Size(); i < e; ++i) {
- HBasicBlock* predecessor = current->GetPredecessors().Get(i);
- DCHECK_EQ(predecessor->GetSuccessors().Size(), 1u);
- HInstruction* input = phi->InputAt(i);
- Location source = input->GetLiveInterval()->GetLocationAt(
- predecessor->GetLifetimeEnd() - 1);
- Location destination = phi->GetLiveInterval()->ToLocation();
- InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
+ if (current->IsCatchBlock()) {
+ // Catch phi values are set at runtime by the exception delivery mechanism.
+ } else {
+ for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
+ for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
+ HBasicBlock* predecessor = current->GetPredecessor(i);
+ DCHECK_EQ(predecessor->NumberOfNormalSuccessors(), 1u);
+ HInstruction* input = phi->InputAt(i);
+ Location source = input->GetLiveInterval()->GetLocationAt(
+ predecessor->GetLifetimeEnd() - 1);
+ Location destination = phi->GetLiveInterval()->ToLocation();
+ InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
+ }
}
}
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index c29fe75921..e0304643e6 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -29,6 +29,7 @@ class HBasicBlock;
class HGraph;
class HInstruction;
class HParallelMove;
+class HPhi;
class LiveInterval;
class Location;
class SsaLivenessAnalysis;
@@ -72,7 +73,8 @@ class RegisterAllocator {
return int_spill_slots_.Size()
+ long_spill_slots_.Size()
+ float_spill_slots_.Size()
- + double_spill_slots_.Size();
+ + double_spill_slots_.Size()
+ + catch_phi_spill_slots_;
}
static constexpr const char* kRegisterAllocatorPassName = "register";
@@ -99,10 +101,17 @@ class RegisterAllocator {
// Update the interval for the register in `location` to cover [start, end).
void BlockRegister(Location location, size_t start, size_t end);
+ void BlockRegisters(size_t start, size_t end, bool caller_save_only = false);
- // Allocate a spill slot for the given interval.
+ // Allocate a spill slot for the given interval. Should be called in linear
+ // order of interval starting positions.
void AllocateSpillSlotFor(LiveInterval* interval);
+ // Allocate a spill slot for the given catch phi. Will allocate the same slot
+ // for phis which share the same vreg. Must be called in reverse linear order
+ // of lifetime positions and ascending vreg numbers for correctness.
+ void AllocateSpillSlotForCatchPhi(HPhi* phi);
+
// Connect adjacent siblings within blocks.
void ConnectSiblings(LiveInterval* interval);
@@ -202,6 +211,11 @@ class RegisterAllocator {
GrowableArray<size_t> float_spill_slots_;
GrowableArray<size_t> double_spill_slots_;
+ // Spill slots allocated to catch phis. This category is special-cased because
+ // (1) slots are allocated prior to linear scan and in reverse linear order,
+ // (2) equivalent phis need to share slots despite having different types.
+ size_t catch_phi_spill_slots_;
+
// Instructions that need a safepoint.
GrowableArray<HInstruction*> safepoints_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 561c3b4964..e6209b9583 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -241,8 +241,8 @@ void SsaBuilder::BuildSsa() {
HBasicBlock* block = loop_headers_.Get(i);
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
HPhi* phi = it.Current()->AsPhi();
- for (size_t pred = 0; pred < block->GetPredecessors().Size(); pred++) {
- HInstruction* input = ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber());
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ HInstruction* input = ValueOfLocal(predecessor, phi->GetRegNumber());
phi->AddInput(input);
}
}
@@ -369,16 +369,16 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
// Save the loop header so that the last phase of the analysis knows which
// blocks need to be updated.
loop_headers_.Add(block);
- } else if (block->GetPredecessors().Size() > 0) {
+ } else if (block->GetPredecessors().size() > 0) {
// All predecessors have already been visited because we are visiting in reverse post order.
// We merge the values of all locals, creating phis if those values differ.
for (size_t local = 0; local < current_locals_->Size(); local++) {
bool one_predecessor_has_no_value = false;
bool is_different = false;
- HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(0), local);
+ HInstruction* value = ValueOfLocal(block->GetPredecessor(0), local);
- for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
- HInstruction* current = ValueOfLocal(block->GetPredecessors().Get(i), local);
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ HInstruction* current = ValueOfLocal(predecessor, local);
if (current == nullptr) {
one_predecessor_has_no_value = true;
break;
@@ -395,9 +395,9 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
if (is_different) {
HPhi* phi = new (GetGraph()->GetArena()) HPhi(
- GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid);
- for (size_t i = 0; i < block->GetPredecessors().Size(); i++) {
- HInstruction* pred_value = ValueOfLocal(block->GetPredecessors().Get(i), local);
+ GetGraph()->GetArena(), local, block->GetPredecessors().size(), Primitive::kPrimVoid);
+ for (size_t i = 0; i < block->GetPredecessors().size(); i++) {
+ HInstruction* pred_value = ValueOfLocal(block->GetPredecessor(i), local);
phi->SetRawInputAt(i, pred_value);
}
block->AddPhi(phi);
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 40502c173b..63635f3127 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -73,7 +73,7 @@ void SsaLivenessAnalysis::LinearizeGraph() {
forward_predecessors.SetSize(graph_->GetBlocks().Size());
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- size_t number_of_forward_predecessors = block->GetPredecessors().Size();
+ size_t number_of_forward_predecessors = block->GetPredecessors().size();
if (block->IsLoopHeader()) {
number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
}
@@ -89,8 +89,7 @@ void SsaLivenessAnalysis::LinearizeGraph() {
do {
HBasicBlock* current = worklist.Pop();
graph_->linear_order_.Add(current);
- for (size_t i = 0, e = current->GetSuccessors().Size(); i < e; ++i) {
- HBasicBlock* successor = current->GetSuccessors().Get(i);
+ for (HBasicBlock* successor : current->GetSuccessors()) {
int block_id = successor->GetBlockId();
size_t number_of_remaining_predecessors = forward_predecessors.Get(block_id);
if (number_of_remaining_predecessors == 1) {
@@ -185,17 +184,27 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
// Set phi inputs of successors of this block corresponding to this block
// as live_in.
- for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
- HBasicBlock* successor = block->GetSuccessors().Get(i);
+ for (HBasicBlock* successor : block->GetSuccessors()) {
live_in->Union(GetLiveInSet(*successor));
- size_t phi_input_index = successor->GetPredecessorIndexOf(block);
- for (HInstructionIterator inst_it(successor->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
- HInstruction* phi = inst_it.Current();
- HInstruction* input = phi->InputAt(phi_input_index);
- input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block);
- // A phi input whose last user is the phi dies at the end of the predecessor block,
- // and not at the phi's lifetime position.
- live_in->SetBit(input->GetSsaIndex());
+ if (successor->IsCatchBlock()) {
+ // Inputs of catch phis will be kept alive through their environment
+ // uses, allowing the runtime to copy their values to the corresponding
+ // catch phi spill slots when an exception is thrown.
+ // The only instructions which may not be recorded in the environments
+ // are constants created by the SSA builder as typed equivalents of
+ // untyped constants from the bytecode, or phis with only such constants
+ // as inputs (verified by SSAChecker). Their raw binary value must
+ // therefore be the same and we only need to keep alive one.
+ } else {
+ size_t phi_input_index = successor->GetPredecessorIndexOf(block);
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HInstruction* phi = phi_it.Current();
+ HInstruction* input = phi->InputAt(phi_input_index);
+ input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block);
+ // A phi input whose last user is the phi dies at the end of the predecessor block,
+ // and not at the phi's lifetime position.
+ live_in->SetBit(input->GetSsaIndex());
+ }
}
}
@@ -296,8 +305,7 @@ bool SsaLivenessAnalysis::UpdateLiveOut(const HBasicBlock& block) {
BitVector* live_out = GetLiveOutSet(block);
bool changed = false;
// The live_out set of a block is the union of live_in sets of its successors.
- for (size_t i = 0, e = block.GetSuccessors().Size(); i < e; ++i) {
- HBasicBlock* successor = block.GetSuccessors().Get(i);
+ for (HBasicBlock* successor : block.GetSuccessors()) {
if (live_out->Union(GetLiveInSet(*successor))) {
changed = true;
}
@@ -342,8 +350,8 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until,
// will avoid a move between the two blocks.
HBasicBlock* block = liveness.GetBlockFromPosition(GetStart() / 2);
size_t next_register_use = FirstRegisterUse();
- for (size_t i = 0; i < block->GetPredecessors().Size(); ++i) {
- size_t position = block->GetPredecessors().Get(i)->GetLifetimeEnd() - 1;
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ size_t position = predecessor->GetLifetimeEnd() - 1;
// We know positions above GetStart() do not have a location yet.
if (position < GetStart()) {
LiveInterval* existing = GetParent()->GetSiblingAt(position);
@@ -376,17 +384,16 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until,
return reg;
}
}
- const GrowableArray<HBasicBlock*>& predecessors = user->GetBlock()->GetPredecessors();
// If the instruction dies at the phi assignment, we can try having the
// same register.
- if (end == predecessors.Get(input_index)->GetLifetimeEnd()) {
+ if (end == user->GetBlock()->GetPredecessor(input_index)->GetLifetimeEnd()) {
for (size_t i = 0, e = user->InputCount(); i < e; ++i) {
if (i == input_index) {
continue;
}
HInstruction* input = user->InputAt(i);
Location location = input->GetLiveInterval()->GetLocationAt(
- predecessors.Get(i)->GetLifetimeEnd() - 1);
+ user->GetBlock()->GetPredecessor(i)->GetLifetimeEnd() - 1);
if (location.IsRegisterKind()) {
int reg = RegisterOrLowRegister(location);
if (free_until[reg] >= use_position) {
@@ -420,10 +427,11 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until,
int LiveInterval::FindHintAtDefinition() const {
if (defined_by_->IsPhi()) {
// Try to use the same register as one of the inputs.
- const GrowableArray<HBasicBlock*>& predecessors = defined_by_->GetBlock()->GetPredecessors();
+ const ArenaVector<HBasicBlock*>& predecessors = defined_by_->GetBlock()->GetPredecessors();
for (size_t i = 0, e = defined_by_->InputCount(); i < e; ++i) {
HInstruction* input = defined_by_->InputAt(i);
- size_t end = predecessors.Get(i)->GetLifetimeEnd();
+ DCHECK_LT(i, predecessors.size());
+ size_t end = predecessors[i]->GetLifetimeEnd();
LiveInterval* input_interval = input->GetLiveInterval()->GetSiblingAt(end - 1);
if (input_interval->GetEnd() == end) {
// If the input dies at the end of the predecessor, we know its register can
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index a7044de850..ef396cbdba 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1209,6 +1209,9 @@ class SsaLivenessAnalysis : public ValueObject {
// A value that's not live in compiled code may still be needed in interpreter,
// due to code motion, etc.
if (env_holder->IsDeoptimize()) return true;
+ // A value live at a throwing instruction in a try block may be copied by
+ // the exception handler to its location at the top of the catch block.
+ if (env_holder->CanThrowIntoCatchBlock()) return true;
if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true;
return instruction->GetType() == Primitive::kPrimNot;
}
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 1f1530fa1e..1f0bac59e0 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -286,7 +286,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
stack_map.SetDexRegisterMapOffset(
stack_map_encoding_,
code_info.GetStackMapAt(entry.same_dex_register_map_as_, stack_map_encoding_)
- .GetDexRegisterMapOffset(stack_map_encoding_));
+ .GetDexRegisterMapOffset(stack_map_encoding_));
} else {
// New dex registers maps should be added to the stack map.
MemoryRegion register_region = dex_register_locations_region.Subregion(
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index 5ca66a1de6..e745d94b89 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -36,7 +36,7 @@ static void TestCode(const uint16_t* data) {
bool graph_built = builder.BuildGraph(*item);
ASSERT_TRUE(graph_built);
- HBasicBlock* first_block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+ HBasicBlock* first_block = graph->GetEntryBlock()->GetSuccessor(0);
HInstruction* first_instruction = first_block->GetFirstInstruction();
// Account for some tests having a store local as first instruction.
ASSERT_TRUE(first_instruction->IsSuspendCheck()