summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2017-10-03 14:49:14 +0100
committer Vladimir Marko <vmarko@google.com> 2017-10-06 17:53:50 +0100
commitca6fff898afcb62491458ae8bcd428bfb3043da1 (patch)
tree195a6b16d3a4b34acc2faf91ce56f448efb15e07 /compiler/optimizing
parentaa7273e56fbafc2692c8d20a31b50d2f4bdd2aa1 (diff)
ART: Use ScopedArenaAllocator for pass-local data.
Passes using local ArenaAllocator were hiding their memory usage from the allocation counting, making it difficult to track down where memory was used. Using ScopedArenaAllocator reveals the memory usage. This changes the HGraph constructor which requires a lot of changes in tests. Refactor these tests to limit the amount of work needed the next time we change that constructor. Test: m test-art-host-gtest Test: testrunner.py --host Test: Build with kArenaAllocatorCountAllocations = true. Bug: 64312607 Change-Id: I34939e4086b500d6e827ff3ef2211d1a421ac91a
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/block_builder.h2
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc139
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc404
-rw-r--r--compiler/optimizing/builder.cc7
-rw-r--r--compiler/optimizing/builder.h7
-rw-r--r--compiler/optimizing/cha_guard_optimization.cc6
-rw-r--r--compiler/optimizing/code_generator.cc22
-rw-r--r--compiler/optimizing/code_generator.h28
-rw-r--r--compiler/optimizing/code_generator_arm64.cc226
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc213
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h4
-rw-r--r--compiler/optimizing/code_generator_mips.cc178
-rw-r--r--compiler/optimizing/code_generator_mips64.cc173
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc50
-rw-r--r--compiler/optimizing/code_generator_vector_arm_vixl.cc48
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc48
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc48
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc52
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc52
-rw-r--r--compiler/optimizing/code_generator_x86.cc213
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc196
-rw-r--r--compiler/optimizing/code_sinking.cc22
-rw-r--r--compiler/optimizing/codegen_test.cc182
-rw-r--r--compiler/optimizing/codegen_test_utils.h15
-rw-r--r--compiler/optimizing/constant_folding_test.cc60
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc13
-rw-r--r--compiler/optimizing/dead_code_elimination.cc4
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc19
-rw-r--r--compiler/optimizing/dominator_test.cc11
-rw-r--r--compiler/optimizing/emit_swap_mips_test.cc18
-rw-r--r--compiler/optimizing/find_loops_test.cc58
-rw-r--r--compiler/optimizing/graph_checker.h8
-rw-r--r--compiler/optimizing/graph_checker_test.cc36
-rw-r--r--compiler/optimizing/graph_test.cc150
-rw-r--r--compiler/optimizing/gvn.cc2
-rw-r--r--compiler/optimizing/gvn_test.cc450
-rw-r--r--compiler/optimizing/induction_var_analysis.cc21
-rw-r--r--compiler/optimizing/induction_var_analysis.h6
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc232
-rw-r--r--compiler/optimizing/induction_var_range.cc57
-rw-r--r--compiler/optimizing/induction_var_range_test.cc68
-rw-r--r--compiler/optimizing/inliner.cc60
-rw-r--r--compiler/optimizing/instruction_builder.cc336
-rw-r--r--compiler/optimizing/instruction_builder.h14
-rw-r--r--compiler/optimizing/instruction_simplifier.cc83
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc12
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc12
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc10
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc339
-rw-r--r--compiler/optimizing/intrinsics_arm64.h6
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc338
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.h2
-rw-r--r--compiler/optimizing/intrinsics_mips.cc303
-rw-r--r--compiler/optimizing/intrinsics_mips.h2
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc331
-rw-r--r--compiler/optimizing/intrinsics_mips64.h2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc390
-rw-r--r--compiler/optimizing/intrinsics_x86.h2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc386
-rw-r--r--compiler/optimizing/intrinsics_x86_64.h2
-rw-r--r--compiler/optimizing/licm.cc8
-rw-r--r--compiler/optimizing/licm_test.cc106
-rw-r--r--compiler/optimizing/linear_order.cc29
-rw-r--r--compiler/optimizing/linear_order.h16
-rw-r--r--compiler/optimizing/linearize_test.cc13
-rw-r--r--compiler/optimizing/live_ranges_test.cc33
-rw-r--r--compiler/optimizing/liveness_test.cc11
-rw-r--r--compiler/optimizing/load_store_analysis.h10
-rw-r--r--compiler/optimizing/load_store_analysis_test.cc165
-rw-r--r--compiler/optimizing/load_store_elimination.cc14
-rw-r--r--compiler/optimizing/locations.cc17
-rw-r--r--compiler/optimizing/locations.h5
-rw-r--r--compiler/optimizing/loop_optimization.cc31
-rw-r--r--compiler/optimizing/loop_optimization.h14
-rw-r--r--compiler/optimizing/loop_optimization_test.cc53
-rw-r--r--compiler/optimizing/nodes.cc72
-rw-r--r--compiler/optimizing/nodes.h56
-rw-r--r--compiler/optimizing/nodes_test.cc92
-rw-r--r--compiler/optimizing/nodes_vector_test.cc300
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc12
-rw-r--r--compiler/optimizing/optimizing_compiler.cc121
-rw-r--r--compiler/optimizing/optimizing_unit_test.h74
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc4
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc8
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc2
-rw-r--r--compiler/optimizing/pretty_printer_test.cc13
-rw-r--r--compiler/optimizing/reference_type_propagation.cc4
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc20
-rw-r--r--compiler/optimizing/register_allocator_test.cc407
-rw-r--r--compiler/optimizing/scheduler.cc16
-rw-r--r--compiler/optimizing/scheduler.h56
-rw-r--r--compiler/optimizing/scheduler_arm.h2
-rw-r--r--compiler/optimizing/scheduler_arm64.h2
-rw-r--r--compiler/optimizing/scheduler_test.cc144
-rw-r--r--compiler/optimizing/select_generator.cc8
-rw-r--r--compiler/optimizing/side_effects_analysis.h4
-rw-r--r--compiler/optimizing/ssa_builder.cc12
-rw-r--r--compiler/optimizing/ssa_builder.h6
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc10
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h7
-rw-r--r--compiler/optimizing/ssa_liveness_analysis_test.cc102
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc6
-rw-r--r--compiler/optimizing/ssa_phi_elimination.h4
-rw-r--r--compiler/optimizing/ssa_test.cc11
-rw-r--r--compiler/optimizing/suspend_check_test.cc13
109 files changed, 4096 insertions, 4205 deletions
diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h
index 6adce815f4..4a0f78ce3d 100644
--- a/compiler/optimizing/block_builder.h
+++ b/compiler/optimizing/block_builder.h
@@ -29,7 +29,7 @@ class HBasicBlockBuilder : public ValueObject {
HBasicBlockBuilder(HGraph* graph,
const DexFile* const dex_file,
const DexFile::CodeItem& code_item)
- : arena_(graph->GetArena()),
+ : arena_(graph->GetAllocator()),
graph_(graph),
dex_file_(dex_file),
code_item_(code_item),
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index a7f7bce07a..0255e7302c 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -513,18 +513,18 @@ class BCEVisitor : public HGraphVisitor {
maps_(graph->GetBlocks().size(),
ArenaSafeMap<int, ValueRange*>(
std::less<int>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
first_index_bounds_check_map_(
std::less<int>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
early_exit_loop_(
std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
taken_test_loop_(
std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+ finite_loop_(graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
has_dom_based_dynamic_bce_(false),
initial_block_size_(graph->GetBlocks().size()),
side_effects_(side_effects),
@@ -668,8 +668,8 @@ class BCEVisitor : public HGraphVisitor {
if (successor != nullptr) {
bool overflow;
bool underflow;
- ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* new_left_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
left_range->GetBound(),
right_range->GetBound().Add(left_compensation, &overflow, &underflow));
if (!overflow && !underflow) {
@@ -677,8 +677,8 @@ class BCEVisitor : public HGraphVisitor {
new_left_range);
}
- ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* new_right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
left_range->GetBound().Add(right_compensation, &overflow, &underflow),
right_range->GetBound());
if (!overflow && !underflow) {
@@ -750,8 +750,8 @@ class BCEVisitor : public HGraphVisitor {
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -762,8 +762,8 @@ class BCEVisitor : public HGraphVisitor {
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondGT || cond == kCondGE) {
@@ -774,8 +774,8 @@ class BCEVisitor : public HGraphVisitor {
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -785,8 +785,8 @@ class BCEVisitor : public HGraphVisitor {
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondNE || cond == kCondEQ) {
@@ -795,8 +795,8 @@ class BCEVisitor : public HGraphVisitor {
// length == [c,d] yields [c, d] along true
// length != [c,d] yields [c, d] along false
if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
}
@@ -804,8 +804,8 @@ class BCEVisitor : public HGraphVisitor {
// length == 0 yields [1, max] along false
// length != 0 yields [1, max] along true
if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound(nullptr, 1), ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound(nullptr, 1), ValueBound::Max());
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
}
@@ -826,7 +826,7 @@ class BCEVisitor : public HGraphVisitor {
// Non-constant index.
ValueBound lower = ValueBound(nullptr, 0); // constant 0
ValueBound upper = ValueBound(array_length, -1); // array_length - 1
- ValueRange array_range(GetGraph()->GetArena(), lower, upper);
+ ValueRange array_range(GetGraph()->GetAllocator(), lower, upper);
// Try index range obtained by dominator-based analysis.
ValueRange* index_range = LookupValueRange(index, block);
if (index_range != nullptr && index_range->FitsIn(&array_range)) {
@@ -875,8 +875,8 @@ class BCEVisitor : public HGraphVisitor {
} else {
ValueBound lower = ValueBound(nullptr, constant + 1);
ValueBound upper = ValueBound::Max();
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
AssignRange(block, array_length, range);
}
}
@@ -938,8 +938,8 @@ class BCEVisitor : public HGraphVisitor {
ValueRange* range = nullptr;
if (increment == 0) {
// Add constant 0. It's really a fixed value.
- range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(initial_value, 0),
ValueBound(initial_value, 0));
} else {
@@ -959,8 +959,8 @@ class BCEVisitor : public HGraphVisitor {
bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
}
}
- range = new (GetGraph()->GetArena()) MonotonicValueRange(
- GetGraph()->GetArena(),
+ range = new (GetGraph()->GetAllocator()) MonotonicValueRange(
+ GetGraph()->GetAllocator(),
phi,
initial_value,
increment,
@@ -1039,8 +1039,8 @@ class BCEVisitor : public HGraphVisitor {
!ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
if ((c0 - c1) <= 0) {
// array.length + (c0 - c1) won't overflow/underflow.
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, right_const - upper.GetConstant()),
ValueBound(array_length, right_const - lower.GetConstant()));
AssignRange(sub->GetBlock(), sub, range);
@@ -1087,8 +1087,8 @@ class BCEVisitor : public HGraphVisitor {
// than array_length.
return;
}
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
ValueBound(left, 0));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1113,8 +1113,8 @@ class BCEVisitor : public HGraphVisitor {
if (constant > 0) {
// constant serves as a mask so any number masked with it
// gets a [0, constant] value range.
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, 0),
ValueBound(nullptr, constant));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1139,8 +1139,8 @@ class BCEVisitor : public HGraphVisitor {
// array[i % 10]; // index value range [0, 9]
// array[i % -10]; // index value range [0, 9]
// }
- ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, 1 - right_const),
ValueBound(nullptr, right_const - 1));
@@ -1169,8 +1169,8 @@ class BCEVisitor : public HGraphVisitor {
if (right->IsArrayLength()) {
ValueBound lower = ValueBound::Min(); // ideally, lower should be '1-array_length'.
ValueBound upper = ValueBound(right, -1); // array_length - 1
- ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
lower,
upper);
ValueRange* left_range = LookupValueRange(left, instruction->GetBlock());
@@ -1195,8 +1195,8 @@ class BCEVisitor : public HGraphVisitor {
// which isn't available as an instruction yet. new_array will
// be treated the same as new_array.length when it's used in a ValueBound.
ValueBound upper = ValueBound(new_array, -right_const);
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
if (existing_range != nullptr) {
range = existing_range->Narrow(range);
@@ -1260,14 +1260,15 @@ class BCEVisitor : public HGraphVisitor {
if (base == nullptr) {
DCHECK_GE(min_c, 0);
} else {
- HInstruction* lower = new (GetGraph()->GetArena())
+ HInstruction* lower = new (GetGraph()->GetAllocator())
HAdd(DataType::Type::kInt32, base, GetGraph()->GetIntConstant(min_c));
- upper = new (GetGraph()->GetArena()) HAdd(DataType::Type::kInt32, base, upper);
+ upper = new (GetGraph()->GetAllocator()) HAdd(DataType::Type::kInt32, base, upper);
block->InsertInstructionBefore(lower, bounds_check);
block->InsertInstructionBefore(upper, bounds_check);
- InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper));
+ InsertDeoptInBlock(bounds_check, new (GetGraph()->GetAllocator()) HAbove(lower, upper));
}
- InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAboveOrEqual(upper, array_length));
+ InsertDeoptInBlock(
+ bounds_check, new (GetGraph()->GetAllocator()) HAboveOrEqual(upper, array_length));
// Flag that this kind of deoptimization has occurred.
has_dom_based_dynamic_bce_ = true;
}
@@ -1291,9 +1292,9 @@ class BCEVisitor : public HGraphVisitor {
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
// Another bounds check in same or dominated block?
HInstruction* user = use.GetUser();
@@ -1377,7 +1378,7 @@ class BCEVisitor : public HGraphVisitor {
v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
- ValueRange index_range(GetGraph()->GetArena(),
+ ValueRange index_range(GetGraph()->GetAllocator(),
ValueBound(v1.instruction, v1.b_constant),
ValueBound(v2.instruction, v2.b_constant));
// If analysis reveals a certain OOB, disable dynamic BCE. Otherwise,
@@ -1410,9 +1411,9 @@ class BCEVisitor : public HGraphVisitor {
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
HInstruction* user = use.GetUser();
if (user->IsBoundsCheck() && loop == user->GetBlock()->GetLoopInformation()) {
@@ -1498,7 +1499,8 @@ class BCEVisitor : public HGraphVisitor {
if (min_c != max_c) {
DCHECK(min_lower == nullptr && min_upper != nullptr &&
max_lower == nullptr && max_upper != nullptr);
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_upper, max_upper));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(min_upper, max_upper));
} else {
DCHECK(min_lower == nullptr && min_upper == nullptr &&
max_lower == nullptr && max_upper != nullptr);
@@ -1508,15 +1510,17 @@ class BCEVisitor : public HGraphVisitor {
if (min_c != max_c) {
DCHECK(min_lower != nullptr && min_upper != nullptr &&
max_lower != nullptr && max_upper != nullptr);
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_lower, max_lower));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(min_lower, max_lower));
} else {
DCHECK(min_lower == nullptr && min_upper == nullptr &&
max_lower != nullptr && max_upper != nullptr);
}
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(max_lower, max_upper));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(max_lower, max_upper));
}
InsertDeoptInLoop(
- loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(max_upper, array_length));
+ loop, block, new (GetGraph()->GetAllocator()) HAboveOrEqual(max_upper, array_length));
} else {
// TODO: if rejected, avoid doing this again for subsequent instructions in this set?
}
@@ -1610,7 +1614,7 @@ class BCEVisitor : public HGraphVisitor {
TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
- new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
+ new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
ReplaceInstruction(check, array);
return true;
@@ -1685,8 +1689,8 @@ class BCEVisitor : public HGraphVisitor {
block->InsertInstructionBefore(condition, block->GetLastInstruction());
DeoptimizationKind kind =
is_null_check ? DeoptimizationKind::kLoopNullBCE : DeoptimizationKind::kLoopBoundsBCE;
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, kind, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(), condition, kind, suspend->GetDexPc());
block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
if (suspend->HasEnvironment()) {
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
@@ -1698,8 +1702,11 @@ class BCEVisitor : public HGraphVisitor {
void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) {
HBasicBlock* block = bounds_check->GetBlock();
block->InsertInstructionBefore(condition, bounds_check);
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, DeoptimizationKind::kBlockBCE, bounds_check->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(),
+ condition,
+ DeoptimizationKind::kBlockBCE,
+ bounds_check->GetDexPc());
block->InsertInstructionBefore(deoptimize, bounds_check);
deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
}
@@ -1763,18 +1770,18 @@ class BCEVisitor : public HGraphVisitor {
HBasicBlock* false_block = if_block->GetSuccessors()[1]; // False successor.
// Goto instructions.
- true_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
- false_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
- new_preheader->AddInstruction(new (GetGraph()->GetArena()) HGoto());
+ true_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
+ false_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
+ new_preheader->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
// Insert the taken-test to see if the loop body is entered. If the
// loop isn't entered at all, it jumps around the deoptimization block.
- if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); // placeholder
+ if_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); // placeholder
HInstruction* condition = induction_range_.GenerateTakenTest(
header->GetLastInstruction(), GetGraph(), if_block);
DCHECK(condition != nullptr);
if_block->RemoveInstruction(if_block->GetLastInstruction());
- if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
+ if_block->AddInstruction(new (GetGraph()->GetAllocator()) HIf(condition));
taken_test_loop_.Put(loop_id, true_block);
}
@@ -1853,8 +1860,8 @@ class BCEVisitor : public HGraphVisitor {
case DataType::Type::kFloat64: zero = graph->GetDoubleConstant(0); break;
default: zero = graph->GetConstant(type, 0); break;
}
- HPhi* phi = new (graph->GetArena())
- HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
+ HPhi* phi = new (graph->GetAllocator())
+ HPhi(graph->GetAllocator(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
phi->SetRawInputAt(0, instruction);
phi->SetRawInputAt(1, zero);
if (type == DataType::Type::kReference) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 851838c4b8..1523478613 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -32,10 +32,9 @@ namespace art {
/**
* Fixture class for the BoundsCheckElimination tests.
*/
-class BoundsCheckEliminationTest : public testing::Test {
+class BoundsCheckEliminationTest : public OptimizingUnitTest {
public:
- BoundsCheckEliminationTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
+ BoundsCheckEliminationTest() : graph_(CreateGraph()) {
graph_->SetHasBoundsChecks(true);
}
@@ -57,8 +56,6 @@ class BoundsCheckEliminationTest : public testing::Test {
BoundsCheckElimination(graph_, side_effects, &induction).Run();
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
@@ -67,12 +64,12 @@ class BoundsCheckEliminationTest : public testing::Test {
// else if (i >= array.length) { array[i] = 1; // Can't eliminate. }
// else { array[i] = 1; // Can eliminate. }
TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -80,70 +77,70 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HInstruction* constant_1 = graph_->GetIntConstant(1);
HInstruction* constant_0 = graph_->GetIntConstant(0);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check2 = new (&allocator_)
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check2 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check2, constant_1, DataType::Type::kInt32, 0);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(bounds_check2);
block2->AddInstruction(array_set);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- cmp = new (&allocator_) HLessThan(parameter2, array_length);
- if_inst = new (&allocator_) HIf(cmp);
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ cmp = new (GetAllocator()) HLessThan(parameter2, array_length);
+ if_inst = new (GetAllocator()) HIf(cmp);
block3->AddInstruction(null_check);
block3->AddInstruction(array_length);
block3->AddInstruction(cmp);
block3->AddInstruction(if_inst);
- HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block4 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block4);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check4 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check4 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0);
block4->AddInstruction(null_check);
block4->AddInstruction(array_length);
block4->AddInstruction(bounds_check4);
block4->AddInstruction(array_set);
- HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block5 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block5);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check5 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check5 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0);
block5->AddInstruction(null_check);
block5->AddInstruction(array_length);
block5->AddInstruction(bounds_check5);
block5->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
block2->AddSuccessor(exit);
block4->AddSuccessor(exit);
block5->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(block3); // True successor
block1->AddSuccessor(block2); // False successor
@@ -164,12 +161,12 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
// if (j < array.length) array[j] = 1; // Can't eliminate.
// }
TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -178,39 +175,40 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
HInstruction* constant_0 = graph_->GetIntConstant(0);
HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HInstruction* cmp = new (&allocator_) HLessThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HInstruction* cmp = new (GetAllocator()) HLessThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, parameter2, constant_max_int);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length);
- if_inst = new (&allocator_) HIf(cmp2);
+ HInstruction* add =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, parameter2, constant_max_int);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* cmp2 = new (GetAllocator()) HGreaterThanOrEqual(add, array_length);
+ if_inst = new (GetAllocator()) HIf(cmp2);
block2->AddInstruction(add);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator_)
+ HBoundsCheck* bounds_check = new (GetAllocator())
HBoundsCheck(add, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check, constant_1, DataType::Type::kInt32, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
@@ -228,12 +226,12 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
// if (j > 0) array[j] = 1; // Can't eliminate.
// }
TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -242,41 +240,42 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
HInstruction* constant_0 = graph_->GetIntConstant(0);
HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, array_length);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(null_check);
block1->AddInstruction(array_length);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, parameter2, constant_max_int);
- HInstruction* sub2 = new (&allocator_) HSub(DataType::Type::kInt32, sub1, constant_max_int);
- HInstruction* cmp2 = new (&allocator_) HLessThanOrEqual(sub2, constant_0);
- if_inst = new (&allocator_) HIf(cmp2);
+ HInstruction* sub1 =
+ new (GetAllocator()) HSub(DataType::Type::kInt32, parameter2, constant_max_int);
+ HInstruction* sub2 = new (GetAllocator()) HSub(DataType::Type::kInt32, sub1, constant_max_int);
+ HInstruction* cmp2 = new (GetAllocator()) HLessThanOrEqual(sub2, constant_0);
+ if_inst = new (GetAllocator()) HIf(cmp2);
block2->AddInstruction(sub1);
block2->AddInstruction(sub2);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator_)
+ HBoundsCheck* bounds_check = new (GetAllocator())
HBoundsCheck(sub2, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check, constant_1, DataType::Type::kInt32, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
@@ -292,10 +291,10 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
// array[5] = 1; // Can eliminate.
// array[4] = 1; // Can eliminate.
TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
@@ -304,49 +303,49 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
HInstruction* constant_6 = graph_->GetIntConstant(6);
HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check6 = new (&allocator_)
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check6 = new (GetAllocator())
HBoundsCheck(constant_6, array_length, 0);
- HInstruction* array_set = new (&allocator_) HArraySet(
+ HInstruction* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check6, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check6);
block->AddInstruction(array_set);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check5 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check5 = new (GetAllocator())
HBoundsCheck(constant_5, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check5);
block->AddInstruction(array_set);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check4 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check4 = new (GetAllocator())
HBoundsCheck(constant_4, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check4);
block->AddInstruction(array_set);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
RunBCE();
@@ -429,28 +428,28 @@ static HInstruction* BuildSSAGraph1(HGraph* graph,
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1a) {
// for (int i=0; i<array.length; i++) { array[i] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1b) {
// for (int i=1; i<array.length; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1c) {
// for (int i=-1; i<array.length; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, -1, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), -1, 1);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) {
// for (int i=0; i<=array.length; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -458,14 +457,14 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) {
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1e) {
// for (int i=0; i<array.length; i += 2) {
// array[i] = 10; // Can't eliminate due to overflow concern. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 2);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 2);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1f) {
// for (int i=1; i<array.length; i += 2) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 2);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 2);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -546,35 +545,35 @@ static HInstruction* BuildSSAGraph2(HGraph *graph,
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2a) {
// for (int i=array.length; i>0; i--) { array[i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2b) {
// for (int i=array.length; i>1; i--) { array[i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 1);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2c) {
// for (int i=array.length; i>-1; i--) { array[i-1] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, -1);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), -1);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2d) {
// for (int i=array.length; i>=0; i--) { array[i-1] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -1, kCondLT);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -1, kCondLT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2e) {
// for (int i=array.length; i>0; i-=2) { array[i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -2);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -2);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -653,7 +652,7 @@ static HInstruction* BuildSSAGraph3(HGraph* graph,
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) {
// int[] array = new int[10];
// for (int i=0; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -661,7 +660,7 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) {
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) {
// int[] array = new int[10];
// for (int i=1; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 1, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 1, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -669,7 +668,7 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) {
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) {
// int[] array = new int[10];
// for (int i=0; i<=10; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -677,7 +676,7 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) {
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3d) {
// int[] array = new int[10];
// for (int i=1; i<10; i+=8) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 8, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 8, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -759,21 +758,21 @@ static HInstruction* BuildSSAGraph4(HGraph* graph,
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4a) {
// for (int i=0; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4b) {
// for (int i=1; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 1);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) {
// for (int i=0; i<=array.length; i++) { array[array.length-i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -790,10 +789,10 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) {
// }
// }
TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
@@ -801,23 +800,23 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
HInstruction* constant_minus_1 = graph_->GetIntConstant(-1);
HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
- HBasicBlock* outer_header = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* outer_header = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(outer_header);
- HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HAdd* add = new (&allocator_) HAdd(DataType::Type::kInt32, array_length, constant_minus_1);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi_i = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HAdd* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_length, constant_minus_1);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_i, add);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
outer_header->AddPhi(phi_i);
outer_header->AddInstruction(null_check);
outer_header->AddInstruction(array_length);
@@ -826,15 +825,15 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
outer_header->AddInstruction(if_inst);
phi_i->AddInput(constant_0);
- HBasicBlock* inner_header = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_header = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_header);
- HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HSub* sub = new (&allocator_) HSub(DataType::Type::kInt32, array_length, phi_i);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, sub, constant_minus_1);
- cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add);
- if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi_j = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HSub* sub = new (GetAllocator()) HSub(DataType::Type::kInt32, array_length, phi_i);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, sub, constant_minus_1);
+ cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_j, add);
+ if_inst = new (GetAllocator()) HIf(cmp);
inner_header->AddPhi(phi_j);
inner_header->AddInstruction(null_check);
inner_header->AddInstruction(array_length);
@@ -844,25 +843,25 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_header->AddInstruction(if_inst);
phi_j->AddInput(constant_0);
- HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_compare = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_compare);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- HArrayGet* array_get_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check1 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ HArrayGet* array_get_j = new (GetAllocator())
HArrayGet(null_check, bounds_check1, DataType::Type::kInt32, 0);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
inner_body_compare->AddInstruction(bounds_check1);
inner_body_compare->AddInstruction(array_get_j);
- HInstruction* j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- HArrayGet* array_get_j_plus_1 = new (&allocator_)
+ HInstruction* j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check2 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ HArrayGet* array_get_j_plus_1 = new (GetAllocator())
HArrayGet(null_check, bounds_check2, DataType::Type::kInt32, 0);
- cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
- if_inst = new (&allocator_) HIf(cmp);
+ cmp = new (GetAllocator()) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
+ if_inst = new (GetAllocator()) HIf(cmp);
inner_body_compare->AddInstruction(j_plus_1);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
@@ -871,14 +870,14 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_compare->AddInstruction(cmp);
inner_body_compare->AddInstruction(if_inst);
- HBasicBlock* inner_body_swap = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_swap = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_swap);
- j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
// temp = array[j+1]
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- array_get_j_plus_1 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check3 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ array_get_j_plus_1 = new (GetAllocator())
HArrayGet(null_check, bounds_check3, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(j_plus_1);
inner_body_swap->AddInstruction(null_check);
@@ -886,48 +885,48 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_swap->AddInstruction(bounds_check3);
inner_body_swap->AddInstruction(array_get_j_plus_1);
// array[j+1] = array[j]
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- array_get_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check4 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ array_get_j = new (GetAllocator())
HArrayGet(null_check, bounds_check4, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check4);
inner_body_swap->AddInstruction(array_get_j);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- HArraySet* array_set_j_plus_1 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check5 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ HArraySet* array_set_j_plus_1 = new (GetAllocator())
HArraySet(null_check, bounds_check5, array_get_j, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check5);
inner_body_swap->AddInstruction(array_set_j_plus_1);
// array[j] = temp
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- HArraySet* array_set_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check6 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ HArraySet* array_set_j = new (GetAllocator())
HArraySet(null_check, bounds_check6, array_get_j_plus_1, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check6);
inner_body_swap->AddInstruction(array_set_j);
- inner_body_swap->AddInstruction(new (&allocator_) HGoto());
+ inner_body_swap->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* inner_body_add = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_add = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_add);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
inner_body_add->AddInstruction(add);
- inner_body_add->AddInstruction(new (&allocator_) HGoto());
+ inner_body_add->AddInstruction(new (GetAllocator()) HGoto());
phi_j->AddInput(add);
- HBasicBlock* outer_body_add = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* outer_body_add = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(outer_body_add);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_i, constant_1);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_i, constant_1);
outer_body_add->AddInstruction(add);
- outer_body_add->AddInstruction(new (&allocator_) HGoto());
+ outer_body_add->AddInstruction(new (GetAllocator()) HGoto());
phi_i->AddInput(add);
block->AddSuccessor(outer_header);
@@ -961,10 +960,10 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
// array[param_i%10] = 10; // Can't eliminate, when param_i < 0
// }
TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* param_i = new (&allocator_)
+ HInstruction* param_i = new (GetAllocator())
HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(param_i);
@@ -974,17 +973,17 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
HInstruction* constant_200 = graph_->GetIntConstant(200);
HInstruction* constant_minus_10 = graph_->GetIntConstant(-10);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (&allocator_) HNewArray(constant_10, constant_10, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
block->AddInstruction(new_array);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header);
graph_->AddBlock(loop_body);
@@ -994,9 +993,9 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
loop_header->AddSuccessor(loop_body); // false successor
loop_body->AddSuccessor(loop_header);
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi, constant_200);
- HInstruction* if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi, constant_200);
+ HInstruction* if_inst = new (GetAllocator()) HIf(cmp);
loop_header->AddPhi(phi);
loop_header->AddInstruction(cmp);
loop_header->AddInstruction(if_inst);
@@ -1005,49 +1004,52 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
//////////////////////////////////////////////////////////////////////////////////
// LOOP BODY:
// array[i % 10] = 10;
- HRem* i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_10, 0);
- HBoundsCheck* bounds_check_i_mod_10 = new (&allocator_) HBoundsCheck(i_mod_10, constant_10, 0);
- HInstruction* array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_10, 0);
+ HBoundsCheck* bounds_check_i_mod_10 = new (GetAllocator()) HBoundsCheck(i_mod_10, constant_10, 0);
+ HInstruction* array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_10);
loop_body->AddInstruction(bounds_check_i_mod_10);
loop_body->AddInstruction(array_set);
// array[i % 1] = 10;
- HRem* i_mod_1 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0);
- HBoundsCheck* bounds_check_i_mod_1 = new (&allocator_) HBoundsCheck(i_mod_1, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_1 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0);
+ HBoundsCheck* bounds_check_i_mod_1 = new (GetAllocator()) HBoundsCheck(i_mod_1, constant_10, 0);
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_1, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_1);
loop_body->AddInstruction(bounds_check_i_mod_1);
loop_body->AddInstruction(array_set);
// array[i % 200] = 10;
- HRem* i_mod_200 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0);
- HBoundsCheck* bounds_check_i_mod_200 = new (&allocator_) HBoundsCheck(i_mod_200, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_200 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0);
+ HBoundsCheck* bounds_check_i_mod_200 = new (GetAllocator()) HBoundsCheck(
+ i_mod_200, constant_10, 0);
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_200, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_200);
loop_body->AddInstruction(bounds_check_i_mod_200);
loop_body->AddInstruction(array_set);
// array[i % -10] = 10;
- HRem* i_mod_minus_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_minus_10, 0);
- HBoundsCheck* bounds_check_i_mod_minus_10 = new (&allocator_) HBoundsCheck(
+ HRem* i_mod_minus_10 = new (GetAllocator()) HRem(
+ DataType::Type::kInt32, phi, constant_minus_10, 0);
+ HBoundsCheck* bounds_check_i_mod_minus_10 = new (GetAllocator()) HBoundsCheck(
i_mod_minus_10, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_minus_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_minus_10);
loop_body->AddInstruction(bounds_check_i_mod_minus_10);
loop_body->AddInstruction(array_set);
// array[i%array.length] = 10;
- HNullCheck* null_check = new (&allocator_) HNullCheck(new_array, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HRem* i_mod_array_length = new (&allocator_) HRem(DataType::Type::kInt32, phi, array_length, 0);
- HBoundsCheck* bounds_check_i_mod_array_len = new (&allocator_) HBoundsCheck(
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(new_array, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HRem* i_mod_array_length = new (GetAllocator()) HRem(
+ DataType::Type::kInt32, phi, array_length, 0);
+ HBoundsCheck* bounds_check_i_mod_array_len = new (GetAllocator()) HBoundsCheck(
i_mod_array_length, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check_i_mod_array_len, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
@@ -1056,23 +1058,23 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
loop_body->AddInstruction(array_set);
// array[param_i % 10] = 10;
- HRem* param_i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, param_i, constant_10, 0);
- HBoundsCheck* bounds_check_param_i_mod_10 = new (&allocator_) HBoundsCheck(
+ HRem* param_i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, param_i, constant_10, 0);
+ HBoundsCheck* bounds_check_param_i_mod_10 = new (GetAllocator()) HBoundsCheck(
param_i_mod_10, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_param_i_mod_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(param_i_mod_10);
loop_body->AddInstruction(bounds_check_param_i_mod_10);
loop_body->AddInstruction(array_set);
// array[param_i%array.length] = 10;
- null_check = new (&allocator_) HNullCheck(new_array, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HRem* param_i_mod_array_length = new (&allocator_) HRem(
+ null_check = new (GetAllocator()) HNullCheck(new_array, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HRem* param_i_mod_array_length = new (GetAllocator()) HRem(
DataType::Type::kInt32, param_i, array_length, 0);
- HBoundsCheck* bounds_check_param_i_mod_array_len = new (&allocator_) HBoundsCheck(
+ HBoundsCheck* bounds_check_param_i_mod_array_len = new (GetAllocator()) HBoundsCheck(
param_i_mod_array_length, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check_param_i_mod_array_len, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
@@ -1081,13 +1083,13 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
loop_body->AddInstruction(array_set);
// i++;
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, constant_1);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, constant_1);
loop_body->AddInstruction(add);
- loop_body->AddInstruction(new (&allocator_) HGoto());
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
phi->AddInput(add);
//////////////////////////////////////////////////////////////////////////////////
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
RunBCE();
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 0e708ed408..76350a6d55 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -32,13 +32,12 @@
namespace art {
HGraphBuilder::HGraphBuilder(HGraph* graph,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
- Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
: graph_(graph),
dex_file_(&graph->GetDexFile()),
@@ -63,7 +62,7 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
code_generator,
interpreter_metadata,
compiler_stats,
- dex_cache,
+ dex_compilation_unit->GetDexCache(),
handles) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 9524fe2534..6c5985a3de 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -36,13 +36,12 @@ class CodeGenerator;
class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(HGraph* graph,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
- Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles);
// Only for unit testing.
@@ -89,7 +88,7 @@ class HGraphBuilder : public ValueObject {
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
- DexCompilationUnit* const dex_compilation_unit_;
+ const DexCompilationUnit* const dex_compilation_unit_;
CompilerDriver* const compiler_driver_;
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index c806dbfef6..3addaeecd9 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -36,7 +36,7 @@ class CHAGuardVisitor : HGraphVisitor {
: HGraphVisitor(graph),
block_has_cha_guard_(GetGraph()->GetBlocks().size(),
0,
- graph->GetArena()->Adapter(kArenaAllocCHA)),
+ graph->GetAllocator()->Adapter(kArenaAllocCHA)),
instruction_iterator_(nullptr) {
number_of_guards_to_visit_ = GetGraph()->GetNumberOfCHAGuards();
DCHECK_NE(number_of_guards_to_visit_, 0u);
@@ -202,8 +202,8 @@ bool CHAGuardVisitor::HoistGuard(HShouldDeoptimizeFlag* flag,
HInstruction* suspend = loop_info->GetSuspendCheck();
// Need a new deoptimize instruction that copies the environment
// of the suspend instruction for the loop.
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc());
pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction());
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
suspend->GetEnvironment(), loop_info->GetHeader());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 3cb37926af..ba26cfc70f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -322,7 +322,7 @@ void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
void CodeGenerator::CreateCommonInvokeLocationSummary(
HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnMainOnly);
@@ -420,7 +420,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary(
bool is_get = field_access->IsUnresolvedInstanceFieldGet()
|| field_access->IsUnresolvedStaticFieldGet();
- ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
@@ -541,7 +541,7 @@ void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
Location runtime_return_location) {
DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
DCHECK_EQ(cls->InputCount(), 1u);
- LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
cls, LocationSummary::kCallOnMainOnly);
locations->SetInAt(0, Location::NoLocation());
locations->AddTemp(runtime_type_index_location);
@@ -617,7 +617,7 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats) {
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* arena = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
@@ -712,7 +712,7 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
// One can write loops through try/catch, which we do not support for OSR anyway.
return;
}
- ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
for (HBasicBlock* block : graph.GetReversePostOrder()) {
if (block->IsLoopHeader()) {
HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
@@ -721,7 +721,8 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
}
}
- ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<size_t> covered(
+ loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
IterationRange<DexInstructionIterator> instructions = code_item.Instructions();
for (auto it = instructions.begin(); it != instructions.end(); ++it) {
const uint32_t dex_pc = it.GetDexPC(instructions.begin());
@@ -909,7 +910,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
}
void CodeGenerator::RecordCatchBlockInfo() {
- ArenaAllocator* arena = graph_->GetArena();
+ ArenaAllocator* arena = graph_->GetAllocator();
for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
@@ -1194,7 +1195,8 @@ LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* in
if (can_throw_into_catch_block) {
call_kind = LocationSummary::kCallOnSlowPath;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
}
@@ -1237,7 +1239,7 @@ void CodeGenerator::EmitParallelMoves(Location from1,
Location from2,
Location to2,
DataType::Type type2) {
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(from1, to1, type1, nullptr);
parallel_move.AddMove(from2, to2, type2, nullptr);
GetMoveResolver()->EmitNativeCode(&parallel_move);
@@ -1400,7 +1402,7 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnSlowPath,
kIntrinsified);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index ac3c8394e6..2c3cf262b1 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -605,26 +605,26 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
fpu_spill_mask_(0),
first_register_slot_in_slow_path_(0),
allocated_registers_(RegisterSet::Empty()),
- blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
- kArenaAllocCodeGenerator)),
- blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
- kArenaAllocCodeGenerator)),
+ blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
+ kArenaAllocCodeGenerator)),
+ blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
+ kArenaAllocCodeGenerator)),
number_of_core_registers_(number_of_core_registers),
number_of_fpu_registers_(number_of_fpu_registers),
number_of_register_pairs_(number_of_register_pairs),
core_callee_save_mask_(core_callee_save_mask),
fpu_callee_save_mask_(fpu_callee_save_mask),
- stack_map_stream_(graph->GetArena(), graph->GetInstructionSet()),
+ stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()),
block_order_(nullptr),
jit_string_roots_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_roots_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
disasm_info_(nullptr),
stats_(stats),
graph_(graph),
compiler_options_(compiler_options),
- slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
current_slow_path_(nullptr),
current_block_index_(0),
is_leaf_(true),
@@ -668,8 +668,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// We use raw array allocations instead of ArenaVector<> because Labels are
// non-constructible and non-movable and as such cannot be held in a vector.
size_t size = GetGraph()->GetBlocks().size();
- LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
- kArenaAllocCodeGenerator);
+ LabelType* labels =
+ GetGraph()->GetAllocator()->AllocArray<LabelType>(size, kArenaAllocCodeGenerator);
for (size_t i = 0; i != size; ++i) {
new(labels + i) LabelType();
}
@@ -823,7 +823,8 @@ class SlowPathGenerator {
SlowPathGenerator(HGraph* graph, CodeGenerator* codegen)
: graph_(graph),
codegen_(codegen),
- slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {}
+ slow_path_map_(std::less<uint32_t>(),
+ graph->GetAllocator()->Adapter(kArenaAllocSlowPaths)) {}
// Creates and adds a new slow-path, if needed, or returns existing one otherwise.
// Templating the method (rather than the whole class) on the slow-path type enables
@@ -857,10 +858,11 @@ class SlowPathGenerator {
}
} else {
// First time this dex-pc is seen.
- iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}});
+ iter = slow_path_map_.Put(dex_pc,
+ {{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}});
}
// Cannot share: create and add new slow-path for this particular dex-pc.
- SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction);
+ SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction);
iter->second.emplace_back(std::make_pair(instruction, slow_path));
codegen_->AddSlowPath(slow_path);
return slow_path;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 468e93a8c0..9be9117967 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -620,7 +620,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
@@ -1294,7 +1294,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
LocationFrom(calling_convention.GetRegisterAt(0)),
type,
@@ -1453,28 +1453,28 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
callee_saved_fp_registers.GetList(),
compiler_options,
stats),
- block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -2204,7 +2204,7 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct
SuspendCheckSlowPathARM64* slow_path =
down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -2235,36 +2235,9 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- /* No unimplemented IR. */
-
-#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
-
-enum UnimplementedInstructionBreakCode {
- // Using a base helps identify when we hit such breakpoints.
- UnimplementedInstructionBreakCodeBaseCode = 0x900,
-#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
-#undef ENUM_UNIMPLEMENTED_INSTRUCTION
-};
-
-#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
- void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
- __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
- } \
- void LocationsBuilderARM64::Visit##name(H##name* instr) { \
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
- locations->SetOut(Location::Any()); \
- }
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
-#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
-
-#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
-#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
-
void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
DCHECK_EQ(instr->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2293,10 +2266,10 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction,
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -2378,7 +2351,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (IsConstantZeroBitPattern(instruction->InputAt(1))) {
locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
@@ -2485,7 +2458,7 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2556,7 +2529,7 @@ void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) {
DCHECK(DataType::IsIntegralType(instr->GetType())) << instr->GetType();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
locations->SetInAt(0, Location::RequiresRegister());
// There is no immediate variant of negated bitwise instructions in AArch64.
locations->SetInAt(1, Location::RequiresRegister());
@@ -2588,7 +2561,7 @@ void LocationsBuilderARM64::VisitDataProcWithShifterOp(
DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
instruction->GetType() == DataType::Type::kInt64);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
if (instruction->GetInstrKind() == HInstruction::kNeg) {
locations->SetInAt(0, Location::ConstantLocation(instruction->InputAt(0)->AsConstant()));
} else {
@@ -2659,7 +2632,7 @@ void InstructionCodeGeneratorARM64::VisitDataProcWithShifterOp(
void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -2673,7 +2646,7 @@ void InstructionCodeGeneratorARM64::VisitIntermediateAddress(HIntermediateAddres
void LocationsBuilderARM64::VisitIntermediateAddressIndex(HIntermediateAddressIndex* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
HIntConstant* shift = instruction->GetShift()->AsIntConstant();
@@ -2705,7 +2678,7 @@ void InstructionCodeGeneratorARM64::VisitIntermediateAddressIndex(
void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall);
HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex);
if (instr->GetOpKind() == HInstruction::kSub &&
accumulator->IsConstant() &&
@@ -2759,10 +2732,10 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -2929,7 +2902,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
}
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2953,7 +2926,7 @@ void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
DataType::Type value_type = instruction->GetComponentType();
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -3039,7 +3012,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl::aarch64::Label non_zero;
@@ -3154,7 +3127,7 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
BoundsCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
__ B(slow_path->GetEntryLabel(), hs);
@@ -3162,7 +3135,7 @@ void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction)
void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3171,7 +3144,7 @@ void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
@@ -3210,7 +3183,7 @@ void InstructionCodeGeneratorARM64::GenerateFcmp(HInstruction* instruction) {
void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
DataType::Type in_type = compare->InputAt(0)->GetType();
switch (in_type) {
case DataType::Type::kBool:
@@ -3276,7 +3249,7 @@ void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
}
void LocationsBuilderARM64::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -3482,7 +3455,7 @@ void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* ins
void LocationsBuilderARM64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -3528,7 +3501,7 @@ void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -3554,7 +3527,7 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction
void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3572,7 +3545,7 @@ void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3726,7 +3699,7 @@ void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruct
}
void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3747,7 +3720,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
}
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -3768,7 +3741,7 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -3790,7 +3763,7 @@ static inline Condition GetConditionForSelect(HCondition* condition) {
}
void LocationsBuilderARM64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
@@ -3859,7 +3832,7 @@ void InstructionCodeGeneratorARM64::VisitSelect(HSelect* select) {
}
void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -3928,7 +3901,8 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4083,8 +4057,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -4115,8 +4089,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -4161,7 +4135,8 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64.
@@ -4203,8 +4178,8 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARM64* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl::aarch64::Label done;
@@ -4372,7 +4347,7 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
}
void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4381,7 +4356,7 @@ void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTR
}
void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4472,7 +4447,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4485,7 +4460,7 @@ void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* inv
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4896,7 +4871,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5037,7 +5012,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
bool do_clinit = cls->MustGenerateClinitCheck();
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5058,7 +5033,7 @@ static MemOperand GetExceptionTlsAddress() {
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -5067,7 +5042,7 @@ void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instructi
}
void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -5094,7 +5069,7 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
@@ -5177,7 +5152,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -5210,7 +5185,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5219,8 +5194,8 @@ void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant AT
}
void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -5239,7 +5214,7 @@ void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* ins
void LocationsBuilderARM64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -5279,7 +5254,7 @@ void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -5316,8 +5291,8 @@ void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
}
void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(LocationFrom(x0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -5335,8 +5310,8 @@ void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(LocationFrom(kArtMethodRegister));
@@ -5372,7 +5347,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5390,7 +5365,7 @@ void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
}
void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5418,7 +5393,8 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5448,7 +5424,7 @@ void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction
}
void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -5465,7 +5441,7 @@ void InstructionCodeGeneratorARM64::VisitParameterValue(
void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(LocationFrom(kArtMethodRegister));
}
@@ -5475,7 +5451,7 @@ void InstructionCodeGeneratorARM64::VisitCurrentMethod(
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -5491,7 +5467,7 @@ void LocationsBuilderARM64::VisitRem(HRem* rem) {
LocationSummary::CallKind call_kind =
DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -5563,7 +5539,7 @@ void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_ba
}
void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type return_type = instruction->InputAt(0)->GetType();
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
@@ -5697,8 +5673,8 @@ void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5722,8 +5698,8 @@ void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -5735,7 +5711,7 @@ void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall);
DataType::Type input_type = conversion->GetInputType();
DataType::Type result_type = conversion->GetResultType();
DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
@@ -5829,7 +5805,7 @@ void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRI
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -6053,7 +6029,7 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -6312,7 +6288,7 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
instruction,
ref,
obj,
@@ -6370,7 +6346,7 @@ void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction*
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
instruction,
ref,
obj,
@@ -6497,7 +6473,7 @@ void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6533,7 +6509,7 @@ void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instructio
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -6542,7 +6518,7 @@ void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instructio
void LocationsBuilderARM64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 21da9557e5..e53773c73d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -489,7 +489,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARM64(switch_instr));
+ jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
return jump_tables_.back().get();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d4fb064107..d7137a3b28 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -763,7 +763,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
@@ -1414,7 +1414,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL {
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
LocationFrom(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -2421,26 +2421,26 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
ComputeSRegisterListMask(kFpuCalleeSaves),
compiler_options,
stats),
- block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
// Give D30 and D31 as scratch register to VIXL. The register allocator only works on
@@ -2810,7 +2810,7 @@ void CodeGeneratorARMVIXL::MoveConstant(Location location, int32_t value) {
void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, DataType::Type dst_type) {
// TODO(VIXL): Maybe refactor to have the 'move' implementation here and use it in
// `ParallelMoveResolverARMVIXL::EmitMove`, as is done in the `arm64` backend.
- HParallelMove move(GetGraph()->GetArena());
+ HParallelMove move(GetGraph()->GetAllocator());
move.AddMove(src, dst, dst_type, nullptr);
GetMoveResolver()->EmitNativeCode(&move);
}
@@ -3030,7 +3030,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instru
}
void LocationsBuilderARMVIXL::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3047,7 +3047,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) {
}
void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -3068,7 +3068,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -3081,7 +3081,7 @@ void InstructionCodeGeneratorARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimiz
}
void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
const bool is_floating_point = DataType::IsFloatingPointType(select->GetType());
if (is_floating_point) {
@@ -3222,7 +3222,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
}
void LocationsBuilderARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -3312,7 +3312,7 @@ void CodeGeneratorARMVIXL::GenerateConditionWithZero(IfCondition condition,
void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64:
@@ -3471,7 +3471,7 @@ void InstructionCodeGeneratorARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) {
void LocationsBuilderARMVIXL::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3481,7 +3481,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIntConstant(HIntConstant* constant AT
void LocationsBuilderARMVIXL::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3491,7 +3491,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNullConstant(HNullConstant* constant
void LocationsBuilderARMVIXL::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3501,7 +3501,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLongConstant(HLongConstant* constant
void LocationsBuilderARMVIXL::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3512,7 +3512,7 @@ void InstructionCodeGeneratorARMVIXL::VisitFloatConstant(
void LocationsBuilderARMVIXL::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3548,7 +3548,7 @@ void InstructionCodeGeneratorARMVIXL::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE
void LocationsBuilderARMVIXL::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
@@ -3722,7 +3722,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic*
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3794,7 +3794,7 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) {
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
switch (result_type) {
case DataType::Type::kUint8:
@@ -4158,7 +4158,7 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve
void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4223,7 +4223,7 @@ void InstructionCodeGeneratorARMVIXL::VisitAdd(HAdd* add) {
void LocationsBuilderARMVIXL::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4285,7 +4285,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSub(HSub* sub) {
void LocationsBuilderARMVIXL::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -4494,7 +4494,7 @@ void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) {
call_kind = LocationSummary::kCallOnMainOnly;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case DataType::Type::kInt32: {
@@ -4607,7 +4607,7 @@ void LocationsBuilderARMVIXL::VisitRem(HRem* rem) {
call_kind = LocationSummary::kNoCall;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32: {
@@ -4734,7 +4734,7 @@ void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
DivZeroCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4872,7 +4872,7 @@ void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) {
void LocationsBuilderARMVIXL::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4918,7 +4918,7 @@ void LocationsBuilderARMVIXL::HandleShift(HBinaryOperation* op) {
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32: {
@@ -5148,8 +5148,8 @@ void InstructionCodeGeneratorARMVIXL::VisitUShr(HUShr* ushr) {
}
void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
if (instruction->IsStringAlloc()) {
locations->AddTemp(LocationFrom(kMethodRegister));
} else {
@@ -5182,8 +5182,8 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
}
void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetOut(LocationFrom(r0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -5203,7 +5203,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -5220,7 +5220,7 @@ void InstructionCodeGeneratorARMVIXL::VisitParameterValue(
void LocationsBuilderARMVIXL::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(LocationFrom(kMethodRegister));
}
@@ -5231,7 +5231,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCurrentMethod(
void LocationsBuilderARMVIXL::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5257,7 +5257,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) {
void LocationsBuilderARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5268,7 +5268,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -5359,7 +5359,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -5437,7 +5437,7 @@ void LocationsBuilderARMVIXL::HandleFieldSet(
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
DataType::Type field_type = field_info.GetFieldType();
@@ -5600,10 +5600,10 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5960,7 +5960,7 @@ void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) {
void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) {
NullCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) NullCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction);
AddSlowPath(slow_path);
__ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
}
@@ -6041,10 +6041,10 @@ void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6325,7 +6325,7 @@ void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) {
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -6433,7 +6433,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
SlowPathCodeARMVIXL* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARMVIXL(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl32::Label non_zero;
@@ -6607,7 +6607,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6631,7 +6631,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayLength(HArrayLength* instruction
void LocationsBuilderARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->GetOffset()));
@@ -6694,7 +6694,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction
int32_t index = Int32ConstantFrom(index_loc);
if (index < 0 || index >= length) {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
} else {
@@ -6705,13 +6705,13 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction
}
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(index_loc), length);
codegen_->AddSlowPath(slow_path);
__ B(hs, slow_path->GetEntryLabel());
} else {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0));
codegen_->AddSlowPath(slow_path);
__ B(ls, slow_path->GetEntryLabel());
@@ -6745,8 +6745,8 @@ void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instructi
}
void LocationsBuilderARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6770,7 +6770,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instru
SuspendCheckSlowPathARMVIXL* slow_path =
down_cast<SuspendCheckSlowPathARMVIXL*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARMVIXL(instruction, successor);
+ slow_path =
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -7085,7 +7086,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7208,7 +7209,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(
+ LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -7225,7 +7226,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -7235,10 +7236,10 @@ void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
LoadClassSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
- check,
- check->GetDexPc(),
- /* do_clinit */ true);
+ new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ /* do_clinit */ true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
@@ -7279,7 +7280,7 @@ HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(LocationFrom(r0));
@@ -7348,7 +7349,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathARMVIXL(load);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7382,7 +7383,7 @@ static int32_t GetExceptionTlsOffset() {
void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7393,7 +7394,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadException(HLoadException* load) {
void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -7404,8 +7405,8 @@ void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear
}
void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -7457,7 +7458,8 @@ void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7674,8 +7676,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7703,8 +7705,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7741,7 +7743,8 @@ void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -7781,8 +7784,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARMVIXL* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl32::Label done;
@@ -7957,8 +7960,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
}
void LocationsBuilderARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -7989,7 +7992,7 @@ void LocationsBuilderARMVIXL::VisitXor(HXor* instruction) {
void LocationsBuilderARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
// Note: GVN reorders commutative operations to have the constant on the right hand side.
@@ -8012,7 +8015,7 @@ void InstructionCodeGeneratorARMVIXL::VisitXor(HXor* instruction) {
void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
@@ -8079,7 +8082,7 @@ void LocationsBuilderARMVIXL::VisitDataProcWithShifterOp(
DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
instruction->GetType() == DataType::Type::kInt64);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
const bool overlap = instruction->GetType() == DataType::Type::kInt64 &&
HDataProcWithShifterOp::IsExtensionOp(instruction->GetOpKind());
@@ -8443,7 +8446,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -8692,7 +8695,7 @@ void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstructio
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
AddSlowPath(slow_path);
@@ -8738,8 +8741,8 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
+ SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
+ LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
instruction,
ref,
obj,
@@ -8850,7 +8853,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -8886,7 +8889,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -9191,7 +9194,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall);
locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex,
Location::RequiresRegister());
locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
@@ -9228,7 +9231,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundType(HBoundType* instruction ATT
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold &&
codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) {
@@ -9342,7 +9345,7 @@ void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, DataType::Type t
TODO_VIXL32(FATAL);
} else {
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, trg, type, nullptr);
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
@@ -9350,7 +9353,7 @@ void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, DataType::Type t
void LocationsBuilderARMVIXL::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 58b85259e7..c46d17ccec 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -119,7 +119,7 @@ class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
: switch_instr_(switch_instr),
table_start_(),
- bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
uint32_t num_entries = switch_instr_->GetNumEntries();
for (uint32_t i = 0; i < num_entries; i++) {
VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced);
@@ -739,7 +739,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr));
+ jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
return jump_tables_.back().get();
}
void EmitJumpTables();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 3c592e7e37..7ea7b9cee2 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -557,7 +557,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -968,7 +968,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1100,19 +1100,19 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena(), &isa_features),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator(), &isa_features),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
@@ -1998,7 +1998,7 @@ void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATT
void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -2023,7 +2023,7 @@ InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
DCHECK_EQ(instruction->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type type = instruction->GetResultType();
switch (type) {
case DataType::Type::kInt32: {
@@ -2289,7 +2289,7 @@ void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction)
void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2542,10 +2542,10 @@ void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2824,7 +2824,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
}
void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2868,7 +2868,7 @@ void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -2986,7 +2986,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
SlowPathCodeMIPS* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
MipsLabel non_zero;
@@ -3141,7 +3141,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
void LocationsBuilderMIPS::VisitIntermediateArrayAddressIndex(
HIntermediateArrayAddressIndex* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
HIntConstant* shift = instruction->GetShift()->AsIntConstant();
@@ -3171,7 +3171,7 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Register index = locations->InAt(0).AsRegister<Register>();
@@ -3222,7 +3222,8 @@ void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -3262,8 +3263,8 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -3417,7 +3418,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3426,7 +3427,7 @@ void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -3440,7 +3441,7 @@ void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
DataType::Type in_type = compare->InputAt(0)->GetType();
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (in_type) {
case DataType::Type::kBool:
@@ -3601,7 +3602,7 @@ void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
}
void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->InputAt(0)->GetType()) {
default:
case DataType::Type::kInt64:
@@ -3815,7 +3816,7 @@ void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -3882,7 +3883,8 @@ void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path =
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
DataType::Type type = instruction->GetType();
@@ -3929,7 +3931,7 @@ void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction)
void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3946,7 +3948,7 @@ void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5526,7 +5528,7 @@ void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instructi
}
void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -5543,7 +5545,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
}
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -6098,7 +6100,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
}
void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6111,7 +6113,7 @@ void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFl
}
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
}
@@ -6136,7 +6138,7 @@ void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
}
void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -6153,7 +6155,7 @@ void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const Field
bool generate_volatile = field_info.IsVolatile() && is_wide;
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
generate_volatile
? LocationSummary::kCallOnMainOnly
@@ -6327,7 +6329,7 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field
DataType::Type field_type = field_info.GetFieldType();
bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64);
bool generate_volatile = field_info.IsVolatile() && is_wide;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
@@ -6691,7 +6693,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(
instruction,
root,
/*entrypoint*/ temp);
@@ -7018,14 +7020,14 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
// to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetArena())
+ slow_path = new (GetGraph()->GetAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
}
AddSlowPath(slow_path);
@@ -7061,7 +7063,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7097,7 +7099,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -7124,7 +7126,8 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7266,8 +7269,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7295,8 +7298,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7311,7 +7314,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -7320,7 +7323,7 @@ void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRI
}
void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -7661,7 +7664,7 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7839,7 +7842,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -7859,7 +7862,7 @@ static int32_t GetExceptionTlsOffset() {
void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7869,7 +7872,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
}
void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -7878,7 +7881,7 @@ void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear AT
void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
@@ -8004,7 +8007,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load, info_high);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -8041,7 +8044,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
}
void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -8050,8 +8053,8 @@ void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATT
}
void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -8068,7 +8071,7 @@ void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* inst
void LocationsBuilderMIPS::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -8163,7 +8166,7 @@ void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -8221,8 +8224,8 @@ void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
}
void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -8240,8 +8243,8 @@ void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
}
void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -8270,7 +8273,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
}
void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -8303,7 +8306,7 @@ void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
}
void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -8331,7 +8334,7 @@ void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -8360,7 +8363,7 @@ void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction)
}
void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -8377,7 +8380,7 @@ void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruct
void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -8387,7 +8390,7 @@ void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instructio
}
void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -8403,7 +8406,7 @@ void LocationsBuilderMIPS::VisitRem(HRem* rem) {
LocationSummary::CallKind call_kind = (type == DataType::Type::kInt32)
? LocationSummary::kNoCall
: LocationSummary::kCallOnMainOnly;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -8481,7 +8484,7 @@ void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_bar
}
void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
DataType::Type return_type = ret->InputAt(0)->GetType();
locations->SetInAt(0, MipsReturnLocation(return_type));
}
@@ -8622,8 +8625,8 @@ void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -8646,8 +8649,8 @@ void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction)
}
void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -8676,7 +8679,8 @@ void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
call_kind = LocationSummary::kCallOnMainOnly;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
if (call_kind == LocationSummary::kNoCall) {
if (DataType::IsFloatingPointType(input_type)) {
@@ -9014,7 +9018,7 @@ void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -9123,7 +9127,7 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr
void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
// Constant area pointer (HMipsComputeBaseMethodAddress).
locations->SetInAt(1, Location::RequiresRegister());
@@ -9152,7 +9156,7 @@ void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* swit
void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
HMipsComputeBaseMethodAddress* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -9185,7 +9189,7 @@ void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invo
void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 687700380b..fad0fe74e5 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -512,7 +512,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -910,7 +910,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1041,23 +1041,23 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena(), &isa_features),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator(), &isa_features),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -1835,7 +1835,7 @@ void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind A
void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -1860,7 +1860,7 @@ InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
DCHECK_EQ(instruction->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type type = instruction->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -1990,7 +1990,7 @@ void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instructio
void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2119,10 +2119,10 @@ void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2385,7 +2385,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2429,7 +2429,7 @@ void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -2543,7 +2543,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
SlowPathCodeMIPS64* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
Mips64Label non_zero;
@@ -2700,7 +2700,7 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -2751,7 +2751,8 @@ void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -2791,8 +2792,8 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -2946,7 +2947,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2955,7 +2956,7 @@ void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -2968,7 +2969,7 @@ void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
DataType::Type in_type = compare->InputAt(0)->GetType();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(compare);
switch (in_type) {
case DataType::Type::kBool:
@@ -3088,7 +3089,7 @@ void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
}
void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->InputAt(0)->GetType()) {
default:
case DataType::Type::kInt64:
@@ -3376,7 +3377,7 @@ void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* in
void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -3429,7 +3430,7 @@ void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -3455,7 +3456,7 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio
void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3472,7 +3473,7 @@ void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4255,7 +4256,7 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
}
void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -4272,7 +4273,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
}
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -4594,7 +4595,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
}
void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -4607,7 +4608,7 @@ void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimize
}
void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
CanMoveConditionally(select, locations);
}
@@ -4627,7 +4628,7 @@ void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
}
void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -4643,7 +4644,7 @@ void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
DataType::Type field_type = field_info.GetFieldType();
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
object_field_get_with_read_barrier
? LocationSummary::kCallOnSlowPath
@@ -4761,7 +4762,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info ATTRIBUTE_UNUSED) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
@@ -5050,7 +5051,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(
instruction,
root,
/*entrypoint*/ temp);
@@ -5335,14 +5336,14 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
// above are expected to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetArena())
+ slow_path = new (GetGraph()->GetAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
}
AddSlowPath(slow_path);
@@ -5378,7 +5379,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -5414,7 +5415,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instructi
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
@@ -5441,7 +5442,8 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5583,8 +5585,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5612,8 +5614,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -5628,7 +5630,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5637,7 +5639,7 @@ void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATT
}
void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5952,7 +5954,7 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6081,7 +6083,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -6101,7 +6103,7 @@ static int32_t GetExceptionTlsOffset() {
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6111,7 +6113,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
}
void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -6121,7 +6123,7 @@ void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear
void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6199,7 +6201,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load, info_high);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -6227,7 +6229,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
}
void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -6236,8 +6238,8 @@ void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant A
}
void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6255,7 +6257,7 @@ void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* in
void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -6310,7 +6312,7 @@ void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -6360,8 +6362,8 @@ void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
}
void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6379,8 +6381,8 @@ void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
}
void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -6410,7 +6412,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction)
}
void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6434,7 +6436,7 @@ void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
}
void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6462,7 +6464,8 @@ void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -6491,7 +6494,7 @@ void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instructio
}
void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -6508,7 +6511,7 @@ void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instru
void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -6518,7 +6521,7 @@ void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruct
}
void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -6534,7 +6537,7 @@ void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
LocationSummary::CallKind call_kind =
DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -6602,7 +6605,7 @@ void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_b
}
void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
DataType::Type return_type = ret->InputAt(0)->GetType();
locations->SetInAt(0, Mips64ReturnLocation(return_type));
}
@@ -6736,8 +6739,8 @@ void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -6760,8 +6763,8 @@ void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instructio
}
void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6782,7 +6785,7 @@ void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(conversion);
if (DataType::IsFloatingPointType(input_type)) {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -7014,7 +7017,7 @@ void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -7110,7 +7113,7 @@ void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_ins
void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index b2aec1e66d..bcf35d9030 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -38,7 +38,7 @@ using helpers::XRegisterFrom;
#define __ GetVIXLAssembler()->
void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
@@ -131,7 +131,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar*
}
void LocationsBuilderARM64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -206,7 +206,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in
}
void LocationsBuilderARM64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
@@ -246,7 +246,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
}
void LocationsBuilderARM64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) {
@@ -264,7 +264,7 @@ void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) {
}
void LocationsBuilderARM64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) {
@@ -305,7 +305,7 @@ void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) {
}
void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) {
@@ -344,7 +344,7 @@ void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) {
}
void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) {
@@ -395,7 +395,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderARM64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) {
@@ -437,7 +437,7 @@ void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) {
}
void LocationsBuilderARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -477,7 +477,7 @@ void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instructi
}
void LocationsBuilderARM64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) {
@@ -519,7 +519,7 @@ void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) {
}
void LocationsBuilderARM64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) {
@@ -557,7 +557,7 @@ void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) {
}
void LocationsBuilderARM64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) {
@@ -581,7 +581,7 @@ void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) {
}
void LocationsBuilderARM64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
@@ -631,7 +631,7 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
}
void LocationsBuilderARM64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
@@ -682,7 +682,7 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
void LocationsBuilderARM64::VisitVecAnd(HVecAnd* instruction) {
// TODO: Allow constants supported by BIC (vector, immediate).
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAnd(HVecAnd* instruction) {
@@ -718,7 +718,7 @@ void InstructionCodeGeneratorARM64::VisitVecAndNot(HVecAndNot* instruction) {
}
void LocationsBuilderARM64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) {
@@ -745,7 +745,7 @@ void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) {
}
void LocationsBuilderARM64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) {
@@ -792,7 +792,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderARM64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) {
@@ -826,7 +826,7 @@ void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) {
}
void LocationsBuilderARM64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) {
@@ -860,7 +860,7 @@ void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) {
}
void LocationsBuilderARM64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) {
@@ -894,7 +894,7 @@ void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) {
}
void LocationsBuilderARM64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -988,7 +988,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru
}
void LocationsBuilderARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
// Some early revisions of the Cortex-A53 have an erratum (835769) whereby it is possible for a
@@ -1036,7 +1036,7 @@ void InstructionCodeGeneratorARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccum
}
void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
// Some conversions require temporary registers.
LocationSummary* locations = instruction->GetLocations();
HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
@@ -1281,7 +1281,7 @@ MemOperand InstructionCodeGeneratorARM64::VecAddress(
}
void LocationsBuilderARM64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
}
void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) {
@@ -1339,7 +1339,7 @@ void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderARM64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorARM64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index df757524a1..605c936f52 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -33,7 +33,7 @@ using helpers::RegisterFrom;
#define __ GetVIXLAssembler()->
void LocationsBuilderARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -108,7 +108,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in
}
void LocationsBuilderARMVIXL::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) {
@@ -116,7 +116,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) {
@@ -124,7 +124,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) {
@@ -153,7 +153,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) {
@@ -180,7 +180,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) {
@@ -227,7 +227,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderARMVIXL::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) {
@@ -257,7 +257,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -297,7 +297,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruc
}
void LocationsBuilderARMVIXL::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) {
@@ -327,7 +327,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) {
@@ -357,7 +357,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) {
@@ -365,7 +365,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
@@ -405,7 +405,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
@@ -446,7 +446,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
void LocationsBuilderARMVIXL::VisitVecAnd(HVecAnd* instruction) {
// TODO: Allow constants supported by VAND (immediate).
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) {
@@ -470,7 +470,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
@@ -478,7 +478,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) {
@@ -502,7 +502,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) {
@@ -545,7 +545,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderARMVIXL::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) {
@@ -575,7 +575,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) {
@@ -605,7 +605,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecUShr(HVecUShr* instruction) {
@@ -664,7 +664,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru
}
void LocationsBuilderARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -672,7 +672,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAcc
}
void LocationsBuilderARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -767,7 +767,7 @@ AlignedMemOperand InstructionCodeGeneratorARMVIXL::VecAddressUnaligned(
}
void LocationsBuilderARMVIXL::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
}
void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) {
@@ -818,7 +818,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderARMVIXL::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index e8c515761c..82d90e0367 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -24,7 +24,7 @@ namespace mips {
#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -129,7 +129,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in
}
void LocationsBuilderMIPS::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
@@ -137,7 +137,7 @@ void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
}
void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) {
@@ -155,7 +155,7 @@ void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) {
}
void LocationsBuilderMIPS::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) {
@@ -202,7 +202,7 @@ void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) {
}
void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
@@ -249,7 +249,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
}
void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) {
@@ -304,7 +304,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderMIPS::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) {
@@ -346,7 +346,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) {
}
void LocationsBuilderMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -386,7 +386,7 @@ void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instructio
}
void LocationsBuilderMIPS::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) {
@@ -428,7 +428,7 @@ void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) {
}
void LocationsBuilderMIPS::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) {
@@ -470,7 +470,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) {
}
void LocationsBuilderMIPS::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) {
@@ -494,7 +494,7 @@ void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) {
}
void LocationsBuilderMIPS::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
@@ -554,7 +554,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
}
void LocationsBuilderMIPS::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
@@ -614,7 +614,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
}
void LocationsBuilderMIPS::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) {
@@ -643,7 +643,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) {
}
void LocationsBuilderMIPS::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) {
@@ -651,7 +651,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) {
}
void LocationsBuilderMIPS::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) {
@@ -680,7 +680,7 @@ void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) {
}
void LocationsBuilderMIPS::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) {
@@ -729,7 +729,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderMIPS::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) {
@@ -763,7 +763,7 @@ void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) {
}
void LocationsBuilderMIPS::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) {
@@ -797,7 +797,7 @@ void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) {
}
void LocationsBuilderMIPS::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) {
@@ -860,7 +860,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru
}
void LocationsBuilderMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -910,7 +910,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumu
}
void LocationsBuilderMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -980,7 +980,7 @@ int32_t InstructionCodeGeneratorMIPS::VecAddress(LocationSummary* locations,
}
void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
}
void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -1023,7 +1023,7 @@ void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
}
void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 7d69773ae6..6b0162a375 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -29,7 +29,7 @@ VectorRegister VectorRegisterFrom(Location location) {
}
void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -132,7 +132,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in
}
void LocationsBuilderMIPS64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
@@ -140,7 +140,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
}
void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) {
@@ -159,7 +159,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) {
}
void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) {
@@ -206,7 +206,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) {
}
void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
@@ -253,7 +253,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
}
void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) {
@@ -308,7 +308,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderMIPS64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) {
@@ -350,7 +350,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) {
}
void LocationsBuilderMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -390,7 +390,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruct
}
void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) {
@@ -432,7 +432,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) {
}
void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) {
@@ -474,7 +474,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) {
}
void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) {
@@ -498,7 +498,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) {
}
void LocationsBuilderMIPS64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
@@ -558,7 +558,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
}
void LocationsBuilderMIPS64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
@@ -618,7 +618,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
}
void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) {
@@ -647,7 +647,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) {
}
void LocationsBuilderMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
@@ -655,7 +655,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
}
void LocationsBuilderMIPS64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) {
@@ -684,7 +684,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) {
}
void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) {
@@ -733,7 +733,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderMIPS64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) {
@@ -767,7 +767,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) {
}
void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) {
@@ -801,7 +801,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) {
}
void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
@@ -864,7 +864,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru
}
void LocationsBuilderMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -914,7 +914,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccu
}
void LocationsBuilderMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -984,7 +984,7 @@ int32_t InstructionCodeGeneratorMIPS64::VecAddress(LocationSummary* locations,
}
void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
}
void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -1027,7 +1027,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
}
void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index a2ef1b1be9..699c02fb1e 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -26,7 +26,7 @@ namespace x86 {
#define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
bool is_zero = IsZeroBitPattern(input);
switch (instruction->GetPackedType()) {
@@ -117,7 +117,7 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i
}
void LocationsBuilderX86::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kInt64:
// Long needs extra temporary to store into the register pair.
@@ -202,7 +202,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in
}
void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
instruction->GetKind() == HVecReduce::kMin ||
@@ -269,7 +269,7 @@ void InstructionCodeGeneratorX86::VisitVecReduce(HVecReduce* instruction) {
}
void LocationsBuilderX86::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) {
@@ -287,7 +287,7 @@ void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) {
}
void LocationsBuilderX86::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) {
@@ -334,7 +334,7 @@ void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) {
}
void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Integral-abs requires a temporary for the comparison.
if (instruction->GetPackedType() == DataType::Type::kInt32) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -375,7 +375,7 @@ void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) {
}
void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
if (instruction->GetPackedType() == DataType::Type::kBool) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -447,7 +447,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderX86::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) {
@@ -489,7 +489,7 @@ void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) {
}
void LocationsBuilderX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -516,7 +516,7 @@ void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction
}
void LocationsBuilderX86::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) {
@@ -558,7 +558,7 @@ void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) {
}
void LocationsBuilderX86::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) {
@@ -591,7 +591,7 @@ void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) {
}
void LocationsBuilderX86::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) {
@@ -615,7 +615,7 @@ void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) {
}
void LocationsBuilderX86::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
@@ -666,7 +666,7 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
}
void LocationsBuilderX86::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
@@ -717,7 +717,7 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
}
void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) {
@@ -752,7 +752,7 @@ void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) {
}
void LocationsBuilderX86::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) {
@@ -787,7 +787,7 @@ void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) {
}
void LocationsBuilderX86::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) {
@@ -822,7 +822,7 @@ void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) {
}
void LocationsBuilderX86::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) {
@@ -875,7 +875,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderX86::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) {
@@ -904,7 +904,7 @@ void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) {
}
void LocationsBuilderX86::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) {
@@ -929,7 +929,7 @@ void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) {
}
void LocationsBuilderX86::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) {
@@ -958,7 +958,7 @@ void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) {
}
void LocationsBuilderX86::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -1066,7 +1066,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru
}
void LocationsBuilderX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -1075,7 +1075,7 @@ void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumul
}
void LocationsBuilderX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -1131,7 +1131,7 @@ static Address VecAddress(LocationSummary* locations, size_t size, bool is_strin
}
void LocationsBuilderX86::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
// String load requires a temporary for the compressed load.
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -1194,7 +1194,7 @@ void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderX86::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorX86::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 2270f6b9c8..d0c4320411 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -26,7 +26,7 @@ namespace x86_64 {
#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
bool is_zero = IsZeroBitPattern(input);
switch (instruction->GetPackedType()) {
@@ -108,7 +108,7 @@ void InstructionCodeGeneratorX86_64::VisitVecReplicateScalar(HVecReplicateScalar
}
void LocationsBuilderX86_64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -185,7 +185,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in
}
void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
instruction->GetKind() == HVecReduce::kMin ||
@@ -252,7 +252,7 @@ void InstructionCodeGeneratorX86_64::VisitVecReduce(HVecReduce* instruction) {
}
void LocationsBuilderX86_64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) {
@@ -270,7 +270,7 @@ void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) {
}
void LocationsBuilderX86_64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) {
@@ -317,7 +317,7 @@ void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) {
}
void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Integral-abs requires a temporary for the comparison.
if (instruction->GetPackedType() == DataType::Type::kInt32) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -358,7 +358,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) {
}
void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
if (instruction->GetPackedType() == DataType::Type::kBool) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -430,7 +430,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderX86_64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) {
@@ -472,7 +472,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) {
}
void LocationsBuilderX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -499,7 +499,7 @@ void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruct
}
void LocationsBuilderX86_64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) {
@@ -541,7 +541,7 @@ void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) {
}
void LocationsBuilderX86_64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) {
@@ -574,7 +574,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) {
}
void LocationsBuilderX86_64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) {
@@ -598,7 +598,7 @@ void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) {
}
void LocationsBuilderX86_64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
@@ -649,7 +649,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
}
void LocationsBuilderX86_64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
@@ -700,7 +700,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
}
void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) {
@@ -735,7 +735,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) {
}
void LocationsBuilderX86_64::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) {
@@ -770,7 +770,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) {
}
void LocationsBuilderX86_64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) {
@@ -805,7 +805,7 @@ void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) {
}
void LocationsBuilderX86_64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) {
@@ -858,7 +858,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation*
}
void LocationsBuilderX86_64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) {
@@ -887,7 +887,7 @@ void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) {
}
void LocationsBuilderX86_64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) {
@@ -912,7 +912,7 @@ void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) {
}
void LocationsBuilderX86_64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) {
@@ -941,7 +941,7 @@ void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) {
}
void LocationsBuilderX86_64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -1039,7 +1039,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru
}
void LocationsBuilderX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -1048,7 +1048,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccu
}
void LocationsBuilderX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -1104,7 +1104,7 @@ static Address VecAddress(LocationSummary* locations, size_t size, bool is_strin
}
void LocationsBuilderX86_64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
// String load requires a temporary for the compressed load.
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -1167,7 +1167,7 @@ void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderX86_64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorX86_64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 35156491e8..d8a47fa1ea 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -414,7 +414,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -811,7 +811,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1030,21 +1030,21 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
- boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
method_address_offset_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1333,7 +1333,7 @@ void CodeGeneratorX86::MoveConstant(Location location, int32_t value) {
}
void CodeGeneratorX86::MoveLocation(Location dst, Location src, DataType::Type dst_type) {
- HParallelMove move(GetGraph()->GetArena());
+ HParallelMove move(GetGraph()->GetAllocator());
if (dst_type == DataType::Type::kInt64 && !src.IsConstant() && !src.IsFpuRegister()) {
move.AddMove(src.ToLow(), dst.ToLow(), DataType::Type::kInt32, nullptr);
move.AddMove(src.ToHigh(), dst.ToHigh(), DataType::Type::kInt32, nullptr);
@@ -1681,7 +1681,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
}
void LocationsBuilderX86::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1698,7 +1698,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
}
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -1718,7 +1718,7 @@ void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -1750,7 +1750,7 @@ static bool SelectCanUseCMOV(HSelect* select) {
}
void LocationsBuilderX86::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::Any());
@@ -1844,7 +1844,7 @@ void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) {
}
void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -1857,7 +1857,7 @@ void CodeGeneratorX86::GenerateNop() {
void LocationsBuilderX86::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64: {
@@ -2024,7 +2024,7 @@ void InstructionCodeGeneratorX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2034,7 +2034,7 @@ void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant ATTRIB
void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2044,7 +2044,7 @@ void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant ATTR
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2054,7 +2054,7 @@ void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant ATTR
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2064,7 +2064,7 @@ void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant AT
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2099,7 +2099,7 @@ void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNU
void LocationsBuilderX86::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
switch (ret->InputAt(0)->GetType()) {
case DataType::Type::kReference:
case DataType::Type::kBool:
@@ -2300,7 +2300,7 @@ void InstructionCodeGeneratorX86::VisitInvokePolymorphic(HInvokePolymorphic* inv
void LocationsBuilderX86::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -2381,7 +2381,7 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) {
void LocationsBuilderX86::VisitX86FPNeg(HX86FPNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
DCHECK(DataType::IsFloatingPointType(neg->GetType()));
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2423,7 +2423,7 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
switch (result_type) {
case DataType::Type::kUint8:
@@ -2921,7 +2921,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3048,7 +3048,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
void LocationsBuilderX86::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -3154,7 +3154,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
void LocationsBuilderX86::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
locations->SetInAt(0, Location::RequiresRegister());
@@ -3581,7 +3581,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
GenerateDivRemWithAnyConstant(instruction);
}
} else {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86(
instruction, out.AsRegister<Register>(), is_div);
codegen_->AddSlowPath(slow_path);
@@ -3630,7 +3630,7 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
LocationSummary::CallKind call_kind = (div->GetResultType() == DataType::Type::kInt64)
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case DataType::Type::kInt32: {
@@ -3735,7 +3735,7 @@ void LocationsBuilderX86::VisitRem(HRem* rem) {
LocationSummary::CallKind call_kind = (rem->GetResultType() == DataType::Type::kInt64)
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32: {
@@ -3817,7 +3817,7 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
}
void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -3867,7 +3867,7 @@ void LocationsBuilderX86::HandleShift(HBinaryOperation* op) {
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32:
@@ -4062,7 +4062,7 @@ void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, Register
void LocationsBuilderX86::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt64:
@@ -4170,8 +4170,8 @@ void InstructionCodeGeneratorX86::VisitUShr(HUShr* ushr) {
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
locations->SetOut(Location::RegisterLocation(EAX));
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -4199,8 +4199,8 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
}
void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
locations->SetOut(Location::RegisterLocation(EAX));
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -4219,7 +4219,7 @@ void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -4235,7 +4235,7 @@ void InstructionCodeGeneratorX86::VisitParameterValue(
void LocationsBuilderX86::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -4244,7 +4244,7 @@ void InstructionCodeGeneratorX86::VisitCurrentMethod(HCurrentMethod* instruction
void LocationsBuilderX86::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4270,7 +4270,7 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction
void LocationsBuilderX86::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4297,7 +4297,7 @@ void InstructionCodeGeneratorX86::VisitNot(HNot* not_) {
void LocationsBuilderX86::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4312,7 +4312,7 @@ void InstructionCodeGeneratorX86::VisitBooleanNot(HBooleanNot* bool_not) {
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -4431,7 +4431,7 @@ void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) {
void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -4714,10 +4714,10 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- kEmitCompilerReadBarrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ kEmitCompilerReadBarrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4862,7 +4862,7 @@ void LocationsBuilderX86::HandleFieldSet(HInstruction* instruction, const FieldI
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
@@ -5149,7 +5149,7 @@ void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5176,10 +5176,10 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5332,7 +5332,7 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -5427,7 +5427,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5570,7 +5570,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
if (!instruction->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -5618,7 +5618,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5684,8 +5684,8 @@ void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction)
}
void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5712,7 +5712,7 @@ void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instructio
SuspendCheckSlowPathX86* slow_path =
down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -6044,7 +6044,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6165,7 +6165,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
@@ -6184,7 +6184,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -6193,7 +6193,7 @@ void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -6229,7 +6229,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
load_kind == HLoadString::LoadKind::kBootImageInternTable ||
@@ -6300,7 +6300,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -6333,7 +6333,7 @@ static Address GetExceptionTlsAddress() {
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6342,7 +6342,7 @@ void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) {
}
void LocationsBuilderX86::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -6350,8 +6350,8 @@ void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATT
}
void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6403,7 +6403,8 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6580,8 +6581,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6612,8 +6613,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6661,7 +6662,8 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
IsTypeCheckSlowPathFatal(type_check_kind, throws_into_catch)
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
// Require a register for the interface check since there is a loop that compares the class to
@@ -6704,8 +6706,8 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
NearLabel done;
@@ -6902,8 +6904,8 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
}
void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6926,7 +6928,7 @@ void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(i
void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
locations->SetInAt(0, Location::RequiresRegister());
@@ -7148,7 +7150,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -7278,10 +7280,10 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
SlowPathCode* slow_path;
if (always_update_field) {
DCHECK(temp != nullptr);
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -7314,7 +7316,7 @@ void CodeGeneratorX86::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7350,7 +7352,7 @@ void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction,
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -7370,7 +7372,7 @@ void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction ATTRIBU
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderX86::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -7437,7 +7439,7 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr)
void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
// Constant area pointer.
@@ -7492,7 +7494,7 @@ void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7516,7 +7518,7 @@ void InstructionCodeGeneratorX86::VisitX86ComputeBaseMethodAddress(
void LocationsBuilderX86::VisitX86LoadFromConstantTable(
HX86LoadFromConstantTable* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::ConstantLocation(insn->GetConstant()));
@@ -7676,28 +7678,31 @@ Address CodeGeneratorX86::LiteralDoubleAddress(double v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
AssemblerFixup* fixup =
- new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddDouble(v));
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralFloatAddress(float v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddFloat(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddFloat(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralInt32Address(int32_t v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt32(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt32(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralInt64Address(int64_t v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt64(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt64(v));
return Address(reg, kDummy32BitOffset, fixup);
}
@@ -7747,7 +7752,7 @@ Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
Register value) {
// Create a fixup to be used to create and address the jump table.
JumpTableRIPFixup* table_fixup =
- new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+ new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr);
// We have to populate the jump tables.
fixups_to_jump_tables_.push_back(table_fixup);
@@ -7773,13 +7778,13 @@ void CodeGeneratorX86::MoveFromReturnRegister(Location target, DataType::Type ty
// TODO: Consider pairs in the parallel move resolver, then this could be nicely merged
// with the else branch.
if (type == DataType::Type::kInt64) {
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc.ToLow(), target.ToLow(), DataType::Type::kInt32, nullptr);
parallel_move.AddMove(return_loc.ToHigh(), target.ToHigh(), DataType::Type::kInt32, nullptr);
GetMoveResolver()->EmitNativeCode(&parallel_move);
} else {
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, target, type, nullptr);
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e8bfa66a58..b6aa110f2d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -427,7 +427,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -831,7 +831,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1230,19 +1230,19 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
constant_area_start_(0),
- boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1702,7 +1702,7 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc
}
void LocationsBuilderX86_64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1719,7 +1719,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
}
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -1739,7 +1739,7 @@ void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -1767,7 +1767,7 @@ static bool SelectCanUseCMOV(HSelect* select) {
}
void LocationsBuilderX86_64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::Any());
@@ -1847,7 +1847,7 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) {
}
void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -1860,7 +1860,7 @@ void CodeGeneratorX86_64::GenerateNop() {
void LocationsBuilderX86_64::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64:
@@ -2034,7 +2034,7 @@ void InstructionCodeGeneratorX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -2132,7 +2132,7 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) {
void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2142,7 +2142,7 @@ void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATT
void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2152,7 +2152,7 @@ void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant A
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2162,7 +2162,7 @@ void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant A
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2172,7 +2172,7 @@ void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2208,7 +2208,7 @@ void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_
void LocationsBuilderX86_64::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
switch (ret->InputAt(0)->GetType()) {
case DataType::Type::kReference:
case DataType::Type::kBool:
@@ -2474,7 +2474,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokePolymorphic(HInvokePolymorphic*
void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -2540,7 +2540,7 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) {
void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall);
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
@@ -3010,7 +3010,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
void LocationsBuilderX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3134,7 +3134,7 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) {
void LocationsBuilderX86_64::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3225,7 +3225,7 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
void LocationsBuilderX86_64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3649,7 +3649,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
}
} else {
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86_64(
+ new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86_64(
instruction, out.AsRegister(), type, is_div);
codegen_->AddSlowPath(slow_path);
@@ -3678,7 +3678,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -3761,7 +3761,7 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) {
void LocationsBuilderX86_64::VisitRem(HRem* rem) {
DataType::Type type = rem->GetResultType();
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(rem, LocationSummary::kNoCall);
switch (type) {
case DataType::Type::kInt32:
@@ -3818,7 +3818,7 @@ void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86_64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -3869,7 +3869,7 @@ void LocationsBuilderX86_64::HandleShift(HBinaryOperation* op) {
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32:
@@ -3945,7 +3945,7 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) {
void LocationsBuilderX86_64::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt32:
@@ -4017,8 +4017,8 @@ void InstructionCodeGeneratorX86_64::VisitUShr(HUShr* ushr) {
}
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -4046,8 +4046,8 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction)
}
void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(RAX));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -4066,7 +4066,7 @@ void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -4083,7 +4083,7 @@ void InstructionCodeGeneratorX86_64::VisitParameterValue(
void LocationsBuilderX86_64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -4094,7 +4094,7 @@ void InstructionCodeGeneratorX86_64::VisitCurrentMethod(
void LocationsBuilderX86_64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4119,7 +4119,7 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4145,7 +4145,7 @@ void InstructionCodeGeneratorX86_64::VisitNot(HNot* not_) {
void LocationsBuilderX86_64::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4160,7 +4160,7 @@ void InstructionCodeGeneratorX86_64::VisitBooleanNot(HBooleanNot* bool_not) {
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -4201,10 +4201,10 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4326,7 +4326,7 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction,
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DataType::Type field_type = field_info.GetFieldType();
bool is_volatile = field_info.IsVolatile();
bool needs_write_barrier =
@@ -4602,7 +4602,7 @@ void CodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86_64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4629,10 +4629,10 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4775,7 +4775,7 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -4864,7 +4864,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
Location temp_loc = locations->GetTemp(0);
CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86_64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5002,7 +5002,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (!instruction->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -5043,7 +5043,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction)
LocationSummary* locations = instruction->GetLocations();
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86_64(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5129,8 +5129,8 @@ void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instructio
}
void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5157,7 +5157,7 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc
SuspendCheckSlowPathX86_64* slow_path =
down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -5439,7 +5439,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5555,7 +5555,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5572,7 +5572,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -5581,7 +5581,7 @@ void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -5608,7 +5608,7 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(Location::RegisterLocation(RAX));
} else {
@@ -5671,7 +5671,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -5707,7 +5707,7 @@ static Address GetExceptionTlsAddress() {
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -5716,7 +5716,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) {
}
void LocationsBuilderX86_64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -5724,8 +5724,8 @@ void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear
}
void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -5775,7 +5775,8 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5960,8 +5961,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -5992,8 +5993,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6041,7 +6042,8 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
LocationSummary::CallKind call_kind = is_fatal_slow_path
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
// Require a register for the interface check since there is a loop that compares the class to
@@ -6086,8 +6088,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
@@ -6285,8 +6287,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
}
void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6308,7 +6310,7 @@ void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperatio
void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
locations->SetInAt(0, Location::RequiresRegister());
@@ -6512,7 +6514,7 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -6644,10 +6646,10 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
if (always_update_field) {
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -6680,7 +6682,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6716,7 +6718,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instructi
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -6736,7 +6738,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction ATTR
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -7024,22 +7026,22 @@ void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
}
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralFloatAddress(float v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddFloat(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddFloat(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralInt32Address(int32_t v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt32(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt32(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralInt64Address(int64_t v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt64(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt64(v));
return Address::RIP(fixup);
}
@@ -7058,7 +7060,7 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, DataType::Type ty
}
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, trg, type, nullptr);
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
@@ -7066,7 +7068,7 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, DataType::Type ty
Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
// Create a fixup to be used to create and address the jump table.
JumpTableRIPFixup* table_fixup =
- new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+ new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr);
// We have to populate the jump tables.
fixups_to_jump_tables_.push_back(table_fixup);
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index b558eb17a7..d8ebac95a8 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -16,6 +16,10 @@
#include "code_sinking.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "common_dominator.h"
#include "nodes.h"
@@ -115,7 +119,7 @@ static bool IsInterestingInstruction(HInstruction* instruction) {
static void AddInstruction(HInstruction* instruction,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
// Add to the work list if the instruction is not in the list of blocks
// to discard, hasn't been already processed and is of interest.
if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
@@ -128,7 +132,7 @@ static void AddInstruction(HInstruction* instruction,
static void AddInputs(HInstruction* instruction,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
for (HInstruction* input : instruction->GetInputs()) {
AddInstruction(input, processed_instructions, discard_blocks, worklist);
}
@@ -137,7 +141,7 @@ static void AddInputs(HInstruction* instruction,
static void AddInputs(HBasicBlock* block,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
}
@@ -242,17 +246,19 @@ static HInstruction* FindIdealPosition(HInstruction* instruction,
void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
- // Local allocator to discard data structures created below at the end of
- // this optimization.
- ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
+ // Local allocator to discard data structures created below at the end of this optimization.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
size_t number_of_instructions = graph_->GetCurrentInstructionId();
- ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
+ ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ processed_instructions.ClearAllBits();
ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ post_dominated.ClearAllBits();
ArenaBitVector instructions_that_can_move(
&allocator, number_of_instructions, /* expandable */ false);
- ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
+ instructions_that_can_move.ClearAllBits();
+ ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
// Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
// TODO(ngeoffray): Getting the full set of post-dominated shoud be done by
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 896fcfa20d..e35c7c734b 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -72,34 +72,37 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
return v;
}
-static void TestCode(const uint16_t* data,
- bool has_result = false,
- int32_t expected = 0) {
+class CodegenTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0);
+ void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected);
+ void TestComparison(IfCondition condition,
+ int64_t i,
+ int64_t j,
+ DataType::Type type,
+ const CodegenTargetConfig target_config);
+};
+
+void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateCFG(&arena, data);
+ ResetPoolAndAllocator();
+ HGraph* graph = CreateCFG(data);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
}
}
-static void TestCodeLong(const uint16_t* data,
- bool has_result,
- int64_t expected) {
+void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateCFG(&arena, data, DataType::Type::kInt64);
+ ResetPoolAndAllocator();
+ HGraph* graph = CreateCFG(data, DataType::Type::kInt64);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
}
}
-class CodegenTest : public CommonCompilerTest {};
-
TEST_F(CodegenTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
TestCode(data);
@@ -412,28 +415,25 @@ TEST_F(CodegenTest, ReturnMulIntLit16) {
TEST_F(CodegenTest, NonMaterializedCondition) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- entry->AddInstruction(new (&allocator) HGoto());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(first_block);
entry->AddSuccessor(first_block);
HIntConstant* constant0 = graph->GetIntConstant(0);
HIntConstant* constant1 = graph->GetIntConstant(1);
- HEqual* equal = new (&allocator) HEqual(constant0, constant0);
+ HEqual* equal = new (GetAllocator()) HEqual(constant0, constant0);
first_block->AddInstruction(equal);
- first_block->AddInstruction(new (&allocator) HIf(equal));
+ first_block->AddInstruction(new (GetAllocator()) HIf(equal));
- HBasicBlock* then_block = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_block = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* then_block = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_block = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->SetExitBlock(exit_block);
graph->AddBlock(then_block);
@@ -444,9 +444,9 @@ TEST_F(CodegenTest, NonMaterializedCondition) {
then_block->AddSuccessor(exit_block);
else_block->AddSuccessor(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
- then_block->AddInstruction(new (&allocator) HReturn(constant0));
- else_block->AddInstruction(new (&allocator) HReturn(constant1));
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
+ then_block->AddInstruction(new (GetAllocator()) HReturn(constant0));
+ else_block->AddInstruction(new (GetAllocator()) HReturn(constant1));
ASSERT_FALSE(equal->IsEmittedAtUseSite());
graph->BuildDominatorTree();
@@ -455,7 +455,7 @@ TEST_F(CodegenTest, NonMaterializedCondition) {
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move = new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -475,19 +475,17 @@ TEST_F(CodegenTest, MaterializedCondition1) {
int rhs[] = {2, 1, 2, -1, 0xabc};
for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
- HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
+ HBasicBlock* code_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(code_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(code_block);
code_block->AddSuccessor(exit_block);
@@ -503,7 +501,8 @@ TEST_F(CodegenTest, MaterializedCondition1) {
graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move =
+ new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
@@ -523,24 +522,22 @@ TEST_F(CodegenTest, MaterializedCondition2) {
for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_block);
- HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_true_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_true_block);
- HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_false_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_false_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph->SetEntryBlock(entry_block);
entry_block->AddSuccessor(if_block);
@@ -571,7 +568,8 @@ TEST_F(CodegenTest, MaterializedCondition2) {
graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move =
+ new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
@@ -599,27 +597,25 @@ TEST_F(CodegenTest, ReturnDivInt2Addr) {
}
// Helper method.
-static void TestComparison(IfCondition condition,
- int64_t i,
- int64_t j,
- DataType::Type type,
- const CodegenTargetConfig target_config) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
-
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+void CodegenTest::TestComparison(IfCondition condition,
+ int64_t i,
+ int64_t j,
+ DataType::Type type,
+ const CodegenTargetConfig target_config) {
+ HGraph* graph = CreateGraph();
+
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
@@ -641,48 +637,48 @@ static void TestComparison(IfCondition condition,
const uint64_t y = j;
switch (condition) {
case kCondEQ:
- comparison = new (&allocator) HEqual(op1, op2);
+ comparison = new (GetAllocator()) HEqual(op1, op2);
expected_result = (i == j);
break;
case kCondNE:
- comparison = new (&allocator) HNotEqual(op1, op2);
+ comparison = new (GetAllocator()) HNotEqual(op1, op2);
expected_result = (i != j);
break;
case kCondLT:
- comparison = new (&allocator) HLessThan(op1, op2);
+ comparison = new (GetAllocator()) HLessThan(op1, op2);
expected_result = (i < j);
break;
case kCondLE:
- comparison = new (&allocator) HLessThanOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HLessThanOrEqual(op1, op2);
expected_result = (i <= j);
break;
case kCondGT:
- comparison = new (&allocator) HGreaterThan(op1, op2);
+ comparison = new (GetAllocator()) HGreaterThan(op1, op2);
expected_result = (i > j);
break;
case kCondGE:
- comparison = new (&allocator) HGreaterThanOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HGreaterThanOrEqual(op1, op2);
expected_result = (i >= j);
break;
case kCondB:
- comparison = new (&allocator) HBelow(op1, op2);
+ comparison = new (GetAllocator()) HBelow(op1, op2);
expected_result = (x < y);
break;
case kCondBE:
- comparison = new (&allocator) HBelowOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HBelowOrEqual(op1, op2);
expected_result = (x <= y);
break;
case kCondA:
- comparison = new (&allocator) HAbove(op1, op2);
+ comparison = new (GetAllocator()) HAbove(op1, op2);
expected_result = (x > y);
break;
case kCondAE:
- comparison = new (&allocator) HAboveOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HAboveOrEqual(op1, op2);
expected_result = (x >= y);
break;
}
block->AddInstruction(comparison);
- block->AddInstruction(new (&allocator) HReturn(comparison));
+ block->AddInstruction(new (GetAllocator()) HReturn(comparison));
graph->BuildDominatorTree();
RunCode(target_config, graph, [](HGraph*) {}, true, expected_result);
@@ -718,9 +714,7 @@ TEST_F(CodegenTest, ComparisonsLong) {
TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
std::unique_ptr<const ArmInstructionSetFeatures> features(
ArmInstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
@@ -729,7 +723,7 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
// int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were
// used as temps; however GPR scratch register is required for big stack offsets which don't fit
// LDR encoding. So the following code is a regression test for that situation.
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), DataType::Type::kInt32, nullptr);
move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), DataType::Type::kInt32, nullptr);
codegen.GetMoveResolver()->EmitNativeCode(move);
@@ -744,9 +738,7 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
std::unique_ptr<const Arm64InstructionSetFeatures> features(
Arm64InstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
@@ -777,7 +769,7 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
// The solution used so far is to use a floating-point temp register
// (D31) in step #2, so that IP1 is available for step #3.
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::DoubleStackSlot(0),
Location::DoubleStackSlot(257),
DataType::Type::kFloat64,
@@ -796,16 +788,14 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
std::unique_ptr<const Arm64InstructionSetFeatures> features(
Arm64InstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
graph->SetHasSIMD(true);
for (int i = 0; i < 2; i++) {
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::SIMDStackSlot(0),
Location::SIMDStackSlot(257),
DataType::Type::kFloat64,
@@ -841,33 +831,31 @@ TEST_F(CodegenTest, MipsClobberRA) {
return;
}
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
// To simplify matters, don't create PC-relative HLoadClass or HLoadString.
// Instead, generate HMipsComputeBaseMethodAddress directly.
- HMipsComputeBaseMethodAddress* base = new (&allocator) HMipsComputeBaseMethodAddress();
+ HMipsComputeBaseMethodAddress* base = new (GetAllocator()) HMipsComputeBaseMethodAddress();
block->AddInstruction(base);
// HMipsComputeBaseMethodAddress is defined as int, so just make the
// compiled method return it.
- block->AddInstruction(new (&allocator) HReturn(base));
+ block->AddInstruction(new (GetAllocator()) HReturn(base));
graph->BuildDominatorTree();
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index aa4f5da3f0..5f4593ff0e 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -298,7 +298,7 @@ static void RunCodeNoCheck(CodeGenerator* codegen,
SsaLivenessAnalysis liveness(graph, codegen);
PrepareForRegisterAllocation(graph).Run();
liveness.Analyze();
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
+ RegisterAllocator::Create(graph->GetAllocator(), codegen, liveness)->AllocateRegisters();
hook_before_codegen(graph);
InternalCodeAllocator allocator;
codegen->Compile(&allocator);
@@ -331,7 +331,7 @@ static void RunCode(CodegenTargetConfig target_config,
CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
ArmInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
}
#endif
@@ -340,7 +340,7 @@ CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& c
CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
Arm64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
TestCodeGeneratorARM64(graph, *features_arm64.get(), compiler_options);
}
#endif
@@ -349,7 +349,8 @@ CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compil
CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options);
+ return new (graph->GetAllocator()) TestCodeGeneratorX86(
+ graph, *features_x86.get(), compiler_options);
}
#endif
@@ -357,7 +358,7 @@ CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler
CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
X86_64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options);
}
#endif
@@ -366,7 +367,7 @@ CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compi
CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options);
}
#endif
@@ -375,7 +376,7 @@ CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compile
CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options);
}
#endif
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index c85a2e3e70..e1980e080e 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -32,11 +32,9 @@ namespace art {
/**
* Fixture class for the constant folding and dce tests.
*/
-class ConstantFoldingTest : public CommonCompilerTest {
+class ConstantFoldingTest : public OptimizingUnitTest {
public:
- ConstantFoldingTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ ConstantFoldingTest() : graph_(nullptr) { }
void TestCode(const uint16_t* data,
const std::string& expected_before,
@@ -44,7 +42,7 @@ class ConstantFoldingTest : public CommonCompilerTest {
const std::string& expected_after_dce,
const std::function<void(HGraph*)>& check_after_cf,
DataType::Type return_type = DataType::Type::kInt32) {
- graph_ = CreateCFG(&allocator_, data, return_type);
+ graph_ = CreateCFG(data, return_type);
TestCodeOnReadyGraph(expected_before,
expected_after_cf,
expected_after_dce,
@@ -88,8 +86,6 @@ class ConstantFoldingTest : public CommonCompilerTest {
EXPECT_EQ(expected_after_dce, actual_after_dce);
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
@@ -742,46 +738,46 @@ TEST_F(ConstantFoldingTest, ConstantCondition) {
* in the bytecode, we need to set up the graph explicitly.
*/
TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) {
- graph_ = CreateGraph(&allocator_);
- HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
+ graph_ = CreateGraph();
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block);
graph_->SetEntryBlock(entry_block);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit_block);
graph_->SetExitBlock(exit_block);
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
// Make various unsigned comparisons with zero against a parameter.
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32, true);
entry_block->AddInstruction(parameter);
- entry_block->AddInstruction(new (&allocator_) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
HInstruction* zero = graph_->GetIntConstant(0);
HInstruction* last;
- block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(new (&allocator_) HReturn(zero));
-
- exit_block->AddInstruction(new (&allocator_) HExit());
+ block->AddInstruction(last = new (GetAllocator()) HAbove(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAbove(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelow(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelow(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(new (GetAllocator()) HReturn(zero));
+
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index ff7ce60905..4a66cd2265 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -17,6 +17,8 @@
#include "constructor_fence_redundancy_elimination.h"
#include "base/arena_allocator.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
namespace art {
@@ -27,7 +29,7 @@ class CFREVisitor : public HGraphVisitor {
public:
CFREVisitor(HGraph* graph, OptimizingCompilerStats* stats)
: HGraphVisitor(graph),
- scoped_allocator_(graph->GetArena()->GetArenaPool()),
+ scoped_allocator_(graph->GetArenaStack()),
candidate_fences_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
stats_(stats) {}
@@ -227,9 +229,8 @@ class CFREVisitor : public HGraphVisitor {
MaybeRecordStat(stats_, MethodCompilationStat::kConstructorFenceRemovedCFRE);
}
- // Phase-local heap memory allocator for CFRE optimizer. Storage obtained
- // through this allocator is immediately released when the CFRE optimizer is done.
- ArenaAllocator scoped_allocator_;
+ // Phase-local heap memory allocator for CFRE optimizer.
+ ScopedArenaAllocator scoped_allocator_;
// Set of constructor fences that we've seen in the current block.
// Each constructor fences acts as a guard for one or more `targets`.
@@ -237,11 +238,11 @@ class CFREVisitor : public HGraphVisitor {
//
// Fences are in succession order (e.g. fence[i] succeeds fence[i-1]
// within the same basic block).
- ArenaVector<HConstructorFence*> candidate_fences_;
+ ScopedArenaVector<HConstructorFence*> candidate_fences_;
// Stores a set of the fence targets, to allow faster lookup of whether
// a detected publish is a target of one of the candidate fences.
- ArenaHashSet<HInstruction*> candidate_fence_targets_;
+ ScopedArenaHashSet<HInstruction*> candidate_fence_targets_;
// Used to record stats about the optimization.
OptimizingCompilerStats* const stats_;
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9b094e989e..5117e07a12 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -24,7 +24,7 @@
namespace art {
static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
- ArenaVector<HBasicBlock*> worklist(graph->GetArena()->Adapter(kArenaAllocDCE));
+ ArenaVector<HBasicBlock*> worklist(graph->GetAllocator()->Adapter(kArenaAllocDCE));
constexpr size_t kDefaultWorlistSize = 8;
worklist.reserve(kDefaultWorlistSize);
visited->SetBit(graph->GetEntryBlock()->GetBlockId());
@@ -306,7 +306,7 @@ void HDeadCodeElimination::ConnectSuccessiveBlocks() {
bool HDeadCodeElimination::RemoveDeadBlocks() {
// Classify blocks as reachable/unreachable.
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
MarkReachableBlocks(graph_, &live_blocks);
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 96fa5406b2..929572ee3b 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -27,14 +27,17 @@
namespace art {
-class DeadCodeEliminationTest : public CommonCompilerTest {};
-
-static void TestCode(const uint16_t* data,
- const std::string& expected_before,
- const std::string& expected_after) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+class DeadCodeEliminationTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data,
+ const std::string& expected_before,
+ const std::string& expected_after);
+};
+
+void DeadCodeEliminationTest::TestCode(const uint16_t* data,
+ const std::string& expected_before,
+ const std::string& expected_after) {
+ HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
StringPrettyPrinter printer_before(graph);
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 50c677adf5..6bf3a5943f 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -24,12 +24,13 @@
namespace art {
-class OptimizerTest : public CommonCompilerTest {};
+class OptimizerTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length);
+};
-static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
+ HGraph* graph = CreateCFG(data);
ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
if (blocks[i] == kInvalidBlockId) {
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 0e9c81dae3..36e932c67a 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -25,16 +25,15 @@
namespace art {
-class EmitSwapMipsTest : public ::testing::Test {
+class EmitSwapMipsTest : public OptimizingUnitTest {
public:
void SetUp() OVERRIDE {
- allocator_.reset(new ArenaAllocator(&pool_));
- graph_ = CreateGraph(allocator_.get());
+ graph_ = CreateGraph();
isa_features_ = MipsInstructionSetFeatures::FromCppDefines();
- codegen_ = new (graph_->GetArena()) mips::CodeGeneratorMIPS(graph_,
- *isa_features_.get(),
- CompilerOptions());
- moves_ = new (allocator_.get()) HParallelMove(allocator_.get());
+ codegen_ = new (graph_->GetAllocator()) mips::CodeGeneratorMIPS(graph_,
+ *isa_features_.get(),
+ CompilerOptions());
+ moves_ = new (GetAllocator()) HParallelMove(GetAllocator());
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -47,8 +46,9 @@ class EmitSwapMipsTest : public ::testing::Test {
}
void TearDown() OVERRIDE {
- allocator_.reset();
test_helper_.reset();
+ isa_features_.reset();
+ ResetPoolAndAllocator();
}
// Get the typically used name for this architecture.
@@ -104,12 +104,10 @@ class EmitSwapMipsTest : public ::testing::Test {
}
protected:
- ArenaPool pool_;
HGraph* graph_;
HParallelMove* moves_;
mips::CodeGeneratorMIPS* codegen_;
mips::MipsAssembler* assembler_;
- std::unique_ptr<ArenaAllocator> allocator_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
std::unique_ptr<const MipsInstructionSetFeatures> isa_features_;
};
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index bbd28f5c46..c91752855b 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -27,7 +27,7 @@
namespace art {
-class FindLoopsTest : public CommonCompilerTest {};
+class FindLoopsTest : public OptimizingUnitTest {};
TEST_F(FindLoopsTest, CFG1) {
// Constant is not used.
@@ -35,9 +35,7 @@ TEST_F(FindLoopsTest, CFG1) {
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -48,9 +46,7 @@ TEST_F(FindLoopsTest, CFG2) {
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -64,9 +60,7 @@ TEST_F(FindLoopsTest, CFG3) {
Instruction::GOTO | 0x100,
Instruction::RETURN);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -81,9 +75,7 @@ TEST_F(FindLoopsTest, CFG4) {
Instruction::CONST_4 | 5 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -96,9 +88,7 @@ TEST_F(FindLoopsTest, CFG5) {
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -142,9 +132,7 @@ TEST_F(FindLoopsTest, Loop1) {
Instruction::GOTO | 0xFE00,
Instruction::RETURN_VOID);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -170,9 +158,7 @@ TEST_F(FindLoopsTest, Loop2) {
Instruction::GOTO | 0xFD00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -195,9 +181,7 @@ TEST_F(FindLoopsTest, Loop3) {
Instruction::GOTO | 0xFE00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -221,9 +205,7 @@ TEST_F(FindLoopsTest, Loop4) {
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -247,9 +229,7 @@ TEST_F(FindLoopsTest, Loop5) {
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -272,9 +252,7 @@ TEST_F(FindLoopsTest, InnerLoop) {
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of outer loop
@@ -303,9 +281,7 @@ TEST_F(FindLoopsTest, TwoLoops) {
Instruction::GOTO | 0xFE00, // second loop
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
@@ -333,9 +309,7 @@ TEST_F(FindLoopsTest, NonNaturalLoop) {
Instruction::GOTO | 0xFD00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
ASSERT_TRUE(graph->GetBlocks()[3]->IsLoopHeader());
HLoopInformation* info = graph->GetBlocks()[3]->GetLoopInformation();
ASSERT_EQ(1u, info->NumberOfBackEdges());
@@ -349,9 +323,7 @@ TEST_F(FindLoopsTest, DoWhileLoop) {
Instruction::IF_EQ, 0xFFFF,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 3060c80073..6af7b429f7 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -28,14 +28,14 @@ class GraphChecker : public HGraphDelegateVisitor {
public:
explicit GraphChecker(HGraph* graph, const char* dump_prefix = "art::GraphChecker: ")
: HGraphDelegateVisitor(graph),
- errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
+ errors_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
dump_prefix_(dump_prefix),
- seen_ids_(graph->GetArena(),
+ seen_ids_(graph->GetAllocator(),
graph->GetCurrentInstructionId(),
false,
kArenaAllocGraphChecker),
- blocks_storage_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
- visited_storage_(graph->GetArena(), 0u, true, kArenaAllocGraphChecker) {}
+ blocks_storage_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
+ visited_storage_(graph->GetAllocator(), 0u, true, kArenaAllocGraphChecker) {}
// Check the whole graph (in reverse post-order).
void Run() {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 2b8231942b..9ca3e4953a 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -19,6 +19,12 @@
namespace art {
+class GraphCheckerTest : public OptimizingUnitTest {
+ protected:
+ HGraph* CreateSimpleCFG();
+ void TestCode(const uint16_t* data);
+};
+
/**
* Create a simple control-flow graph composed of two blocks:
*
@@ -27,14 +33,14 @@ namespace art {
* BasicBlock 1, pred: 0
* 1: Exit
*/
-HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry_block = new (allocator) HBasicBlock(graph);
- entry_block->AddInstruction(new (allocator) HReturnVoid());
+HGraph* GraphCheckerTest::CreateSimpleCFG() {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
+ entry_block->AddInstruction(new (GetAllocator()) HReturnVoid());
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- HBasicBlock* exit_block = new (allocator) HBasicBlock(graph);
- exit_block->AddInstruction(new (allocator) HExit());
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
entry_block->AddSuccessor(exit_block);
@@ -42,10 +48,8 @@ HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
return graph;
}
-static void TestCode(const uint16_t* data) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void GraphCheckerTest::TestCode(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
GraphChecker graph_checker(graph);
@@ -53,8 +57,6 @@ static void TestCode(const uint16_t* data) {
ASSERT_TRUE(graph_checker.IsValid());
}
-class GraphCheckerTest : public CommonCompilerTest {};
-
TEST_F(GraphCheckerTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
@@ -93,10 +95,7 @@ TEST_F(GraphCheckerTest, CFG3) {
// Test case with an invalid graph containing inconsistent
// predecessor/successor arcs in CFG.
TEST_F(GraphCheckerTest, InconsistentPredecessorsAndSuccessors) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateSimpleCFG(&allocator);
+ HGraph* graph = CreateSimpleCFG();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
@@ -111,10 +110,7 @@ TEST_F(GraphCheckerTest, InconsistentPredecessorsAndSuccessors) {
// Test case with an invalid graph containing a non-branch last
// instruction in a block.
TEST_F(GraphCheckerTest, BlockEndingWithNonBranchInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateSimpleCFG(&allocator);
+ HGraph* graph = CreateSimpleCFG();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index 28ee3a5e8b..29af808731 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -24,43 +24,52 @@
namespace art {
-static HBasicBlock* createIfBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* if_block = new (allocator) HBasicBlock(graph);
+class GraphTest : public OptimizingUnitTest {
+ protected:
+ HBasicBlock* CreateIfBlock(HGraph* graph);
+ HBasicBlock* CreateGotoBlock(HGraph* graph);
+ HBasicBlock* CreateEntryBlock(HGraph* graph);
+ HBasicBlock* CreateReturnBlock(HGraph* graph);
+ HBasicBlock* CreateExitBlock(HGraph* graph);
+};
+
+HBasicBlock* GraphTest::CreateIfBlock(HGraph* graph) {
+ HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_block);
HInstruction* instr = graph->GetIntConstant(4);
- HInstruction* equal = new (allocator) HEqual(instr, instr);
+ HInstruction* equal = new (GetAllocator()) HEqual(instr, instr);
if_block->AddInstruction(equal);
- instr = new (allocator) HIf(equal);
+ instr = new (GetAllocator()) HIf(equal);
if_block->AddInstruction(instr);
return if_block;
}
-static HBasicBlock* createGotoBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateGotoBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* got = new (allocator) HGoto();
+ HInstruction* got = new (GetAllocator()) HGoto();
block->AddInstruction(got);
return block;
}
-static HBasicBlock* createEntryBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = createGotoBlock(graph, allocator);
+HBasicBlock* GraphTest::CreateEntryBlock(HGraph* graph) {
+ HBasicBlock* block = CreateGotoBlock(graph);
graph->SetEntryBlock(block);
return block;
}
-static HBasicBlock* createReturnBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateReturnBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* return_instr = new (allocator) HReturnVoid();
+ HInstruction* return_instr = new (GetAllocator()) HReturnVoid();
block->AddInstruction(return_instr);
return block;
}
-static HBasicBlock* createExitBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateExitBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* exit_instr = new (allocator) HExit();
+ HInstruction* exit_instr = new (GetAllocator()) HExit();
block->AddInstruction(exit_instr);
return block;
}
@@ -68,16 +77,13 @@ static HBasicBlock* createExitBlock(HGraph* graph, ArenaAllocator* allocator) {
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be the return block.
-TEST(GraphTest, IfSuccessorSimpleJoinBlock1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_true = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorSimpleJoinBlock1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* if_true = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(if_true);
@@ -103,16 +109,13 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock1) {
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be the return block.
-TEST(GraphTest, IfSuccessorSimpleJoinBlock2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_false = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorSimpleJoinBlock2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* if_false = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
@@ -138,15 +141,12 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock2) {
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be the loop header.
-TEST(GraphTest, IfSuccessorMultipleBackEdges1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultipleBackEdges1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(if_block);
@@ -173,15 +173,12 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges1) {
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be the loop header.
-TEST(GraphTest, IfSuccessorMultipleBackEdges2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultipleBackEdges2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
@@ -208,16 +205,13 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges2) {
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be a loop header with multiple pre headers.
-TEST(GraphTest, IfSuccessorMultiplePreHeaders1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultiplePreHeaders1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* first_if_block = CreateIfBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* loop_block = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
@@ -247,16 +241,13 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders1) {
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be a loop header with multiple pre headers.
-TEST(GraphTest, IfSuccessorMultiplePreHeaders2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultiplePreHeaders2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* first_if_block = CreateIfBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* loop_block = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
@@ -283,17 +274,14 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders2) {
loop_block->GetLoopInformation()->GetPreHeader());
}
-TEST(GraphTest, InsertInstructionBefore) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* block = createGotoBlock(graph, &allocator);
+TEST_F(GraphTest, InsertInstructionBefore) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* block = CreateGotoBlock(graph);
HInstruction* got = block->GetLastInstruction();
ASSERT_TRUE(got->IsControlFlow());
// Test at the beginning of the block.
- HInstruction* first_instruction = new (&allocator) HIntConstant(4);
+ HInstruction* first_instruction = new (GetAllocator()) HIntConstant(4);
block->InsertInstructionBefore(first_instruction, got);
ASSERT_NE(first_instruction->GetId(), -1);
@@ -306,7 +294,7 @@ TEST(GraphTest, InsertInstructionBefore) {
ASSERT_EQ(got->GetPrevious(), first_instruction);
// Test in the middle of the block.
- HInstruction* second_instruction = new (&allocator) HIntConstant(4);
+ HInstruction* second_instruction = new (GetAllocator()) HIntConstant(4);
block->InsertInstructionBefore(second_instruction, got);
ASSERT_NE(second_instruction->GetId(), -1);
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 8ea312d0ea..c09e5df1c0 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -566,7 +566,7 @@ HBasicBlock* GlobalValueNumberer::FindVisitedBlockWithRecyclableSet(
}
void GVNOptimization::Run() {
- GlobalValueNumberer gvn(graph_->GetArena(), graph_, side_effects_);
+ GlobalValueNumberer gvn(graph_->GetAllocator(), graph_, side_effects_);
gvn.Run();
}
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index ac0dbee2c5..3bf4cc35ba 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -24,77 +24,74 @@
namespace art {
-class GVNTest : public CommonCompilerTest {};
+class GVNTest : public OptimizingUnitTest {};
TEST_F(GVNTest, LocalFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* to_remove = block->GetLastInstruction();
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(43),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(43),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
- block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* use_after_kill = block->GetLastInstruction();
- block->AddInstruction(new (&allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
ASSERT_EQ(to_remove->GetBlock(), block);
ASSERT_EQ(different_offset->GetBlock(), block);
@@ -111,36 +108,33 @@ TEST_F(GVNTest, LocalFieldElimination) {
}
TEST_F(GVNTest, GlobalFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
-
- block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
- HBasicBlock* then = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (&allocator) HBasicBlock(graph);
- HBasicBlock* join = new (&allocator) HBasicBlock(graph);
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+
+ block->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction()));
+ HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(then);
graph->AddBlock(else_);
graph->AddBlock(join);
@@ -150,36 +144,36 @@ TEST_F(GVNTest, GlobalFieldElimination) {
then->AddSuccessor(join);
else_->AddSuccessor(join);
- then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- then->AddInstruction(new (&allocator) HGoto());
- else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- else_->AddInstruction(new (&allocator) HGoto());
- join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- join->AddInstruction(new (&allocator) HExit());
+ then->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ then->AddInstruction(new (GetAllocator()) HGoto());
+ else_->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ else_->AddInstruction(new (GetAllocator()) HGoto());
+ join->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ join->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
SideEffectsAnalysis side_effects(graph);
@@ -193,37 +187,34 @@ TEST_F(GVNTest, GlobalFieldElimination) {
}
TEST_F(GVNTest, LoopFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HGoto());
-
- HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HGoto());
+
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(loop_header);
graph->AddBlock(loop_body);
@@ -233,54 +224,54 @@ TEST_F(GVNTest, LoopFieldElimination) {
loop_header->AddSuccessor(exit);
loop_body->AddSuccessor(loop_header);
- loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_header->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
- loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
+ loop_header->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction()));
// Kill inside the loop body to prevent field gets inside the loop header
// and the body to be GVN'ed.
- loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_set = loop_body->GetLastInstruction();
- loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
- loop_body->AddInstruction(new (&allocator) HGoto());
-
- exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
+
+ exit->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
- exit->AddInstruction(new (&allocator) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
ASSERT_EQ(field_get_in_loop_header->GetBlock(), loop_header);
ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
@@ -315,22 +306,19 @@ TEST_F(GVNTest, LoopFieldElimination) {
// Test that inner loops affect the side effects of the outer loop.
TEST_F(GVNTest, LoopSideEffects) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HBasicBlock* outer_loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* outer_loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* outer_loop_exit = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_exit = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* outer_loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* outer_loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* outer_loop_exit = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(outer_loop_header);
graph->AddBlock(outer_loop_body);
@@ -348,20 +336,20 @@ TEST_F(GVNTest, LoopSideEffects) {
inner_loop_body->AddSuccessor(inner_loop_header);
inner_loop_exit->AddSuccessor(outer_loop_header);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kBool);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kBool);
entry->AddInstruction(parameter);
- entry->AddInstruction(new (&allocator) HGoto());
- outer_loop_header->AddInstruction(new (&allocator) HSuspendCheck());
- outer_loop_header->AddInstruction(new (&allocator) HIf(parameter));
- outer_loop_body->AddInstruction(new (&allocator) HGoto());
- inner_loop_header->AddInstruction(new (&allocator) HSuspendCheck());
- inner_loop_header->AddInstruction(new (&allocator) HIf(parameter));
- inner_loop_body->AddInstruction(new (&allocator) HGoto());
- inner_loop_exit->AddInstruction(new (&allocator) HGoto());
- outer_loop_exit->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
+ outer_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck());
+ outer_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter));
+ outer_loop_body->AddInstruction(new (GetAllocator()) HGoto());
+ inner_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck());
+ inner_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter));
+ inner_loop_body->AddInstruction(new (GetAllocator()) HGoto());
+ inner_loop_exit->AddInstruction(new (GetAllocator()) HGoto());
+ outer_loop_exit->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
@@ -371,16 +359,16 @@ TEST_F(GVNTest, LoopSideEffects) {
// Check that the only side effect of loops is to potentially trigger GC.
{
// Make one block with a side effect.
- entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ entry->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
@@ -396,16 +384,16 @@ TEST_F(GVNTest, LoopSideEffects) {
// Check that the side effects of the outer loop does not affect the inner loop.
{
outer_loop_body->InsertInstructionBefore(
- new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0),
+ new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0),
outer_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
@@ -422,16 +410,16 @@ TEST_F(GVNTest, LoopSideEffects) {
{
outer_loop_body->RemoveInstruction(outer_loop_body->GetFirstInstruction());
inner_loop_body->InsertInstructionBefore(
- new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0),
+ new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0),
inner_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index eab17aad31..0987293e4e 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -100,17 +100,17 @@ static DataType::Type ImplicitConversion(DataType::Type type) {
HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
: HOptimization(graph, kInductionPassName),
global_depth_(0),
- stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ stack_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
map_(std::less<HInstruction*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
- scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
+ scc_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
cycle_(std::less<HInstruction*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
type_(DataType::Type::kVoid),
induction_(std::less<HLoopInformation*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
cycles_(std::less<HPhi*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)) {
}
void HInductionVarAnalysis::Run() {
@@ -265,7 +265,8 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
// Rotate proper loop-phi to front.
if (size > 1) {
- ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis));
+ ArenaVector<HInstruction*> other(
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis));
RotateEntryPhiFirst(loop, &scc_, &other);
}
@@ -991,7 +992,7 @@ void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
it = induction_.Put(loop,
ArenaSafeMap<HInstruction*, InductionInfo*>(
std::less<HInstruction*>(),
- graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)));
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)));
}
it->second.Put(instruction, info);
}
@@ -1082,7 +1083,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
return CreateSimplifiedInvariant(kSub, b->op_b, b->op_a);
}
}
- return new (graph_->GetArena()) InductionInfo(
+ return new (graph_->GetAllocator()) InductionInfo(
kInvariant, op, a, b, nullptr, ImplicitConversion(b->type));
}
@@ -1119,7 +1120,7 @@ HInstruction* HInductionVarAnalysis::GetShiftConstant(HLoopInformation* loop,
void HInductionVarAnalysis::AssignCycle(HPhi* phi) {
ArenaSet<HInstruction*>* set = &cycles_.Put(phi, ArenaSet<HInstruction*>(
- graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
for (HInstruction* i : scc_) {
set->insert(i);
}
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 421b3ab9d0..a2d302ae81 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -129,7 +129,7 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* CreateInvariantFetch(HInstruction* f) {
DCHECK(f != nullptr);
- return new (graph_->GetArena())
+ return new (graph_->GetAllocator())
InductionInfo(kInvariant, kFetch, nullptr, nullptr, f, f->GetType());
}
@@ -138,7 +138,7 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* b,
DataType::Type type) {
DCHECK(a != nullptr && b != nullptr);
- return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, type);
+ return new (graph_->GetAllocator()) InductionInfo(kInvariant, op, a, b, nullptr, type);
}
InductionInfo* CreateInduction(InductionClass ic,
@@ -148,7 +148,7 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* f,
DataType::Type type) {
DCHECK(a != nullptr && b != nullptr);
- return new (graph_->GetArena()) InductionInfo(ic, op, a, b, f, type);
+ return new (graph_->GetAllocator()) InductionInfo(ic, op, a, b, f, type);
}
// Methods for analysis.
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 53c8044a0b..4c11ad4643 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -27,12 +27,10 @@ namespace art {
/**
* Fixture class for the InductionVarAnalysis tests.
*/
-class InductionVarAnalysisTest : public CommonCompilerTest {
+class InductionVarAnalysisTest : public OptimizingUnitTest {
public:
InductionVarAnalysisTest()
- : pool_(),
- allocator_(&pool_),
- iva_(nullptr),
+ : iva_(nullptr),
entry_(nullptr),
return_(nullptr),
exit_(nullptr),
@@ -44,7 +42,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
constant100_(nullptr),
constantm1_(nullptr),
float_constant0_(nullptr) {
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph();
}
~InductionVarAnalysisTest() { }
@@ -52,15 +50,15 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
// Builds single for-loop at depth d.
void BuildForLoop(int d, int n) {
ASSERT_LT(d, n);
- loop_preheader_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_preheader_[d]);
- loop_header_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_header_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header_[d]);
loop_preheader_[d]->AddSuccessor(loop_header_[d]);
if (d < (n - 1)) {
BuildForLoop(d + 1, n);
}
- loop_body_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_body_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_body_[d]);
loop_body_[d]->AddSuccessor(loop_header_[d]);
if (d < (n - 1)) {
@@ -79,12 +77,12 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
graph_->SetNumberOfVRegs(n + 3);
// Build basic blocks with entry, nested loop, exit.
- entry_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
BuildForLoop(0, n);
- return_ = new (&allocator_) HBasicBlock(graph_);
+ return_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(return_);
- exit_ = new (&allocator_) HBasicBlock(graph_);
+ exit_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit_);
entry_->AddSuccessor(loop_preheader_[0]);
loop_header_[0]->AddSuccessor(return_);
@@ -93,7 +91,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
graph_->SetExitBlock(exit_);
// Provide entry and exit instructions.
- parameter_ = new (&allocator_) HParameterValue(
+ parameter_ = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference, true);
entry_->AddInstruction(parameter_);
constant0_ = graph_->GetIntConstant(0);
@@ -103,20 +101,20 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
constant100_ = graph_->GetIntConstant(100);
constantm1_ = graph_->GetIntConstant(-1);
float_constant0_ = graph_->GetFloatConstant(0.0f);
- return_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_->AddInstruction(new (&allocator_) HExit());
+ return_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_->AddInstruction(new (GetAllocator()) HExit());
// Provide loop instructions.
for (int d = 0; d < n; d++) {
- basic_[d] = new (&allocator_) HPhi(&allocator_, d, 0, DataType::Type::kInt32);
- loop_preheader_[d]->AddInstruction(new (&allocator_) HGoto());
+ basic_[d] = new (GetAllocator()) HPhi(GetAllocator(), d, 0, DataType::Type::kInt32);
+ loop_preheader_[d]->AddInstruction(new (GetAllocator()) HGoto());
loop_header_[d]->AddPhi(basic_[d]);
- HInstruction* compare = new (&allocator_) HLessThan(basic_[d], constant100_);
+ HInstruction* compare = new (GetAllocator()) HLessThan(basic_[d], constant100_);
loop_header_[d]->AddInstruction(compare);
- loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare));
- increment_[d] = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[d], constant1_);
+ loop_header_[d]->AddInstruction(new (GetAllocator()) HIf(compare));
+ increment_[d] = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[d], constant1_);
loop_body_[d]->AddInstruction(increment_[d]);
- loop_body_[d]->AddInstruction(new (&allocator_) HGoto());
+ loop_body_[d]->AddInstruction(new (GetAllocator()) HGoto());
basic_[d]->AddInput(constant0_);
basic_[d]->AddInput(increment_[d]);
@@ -125,9 +123,9 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
// Builds if-statement at depth d.
HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock** ifF) {
- HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* cond = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* ifTrue = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* ifFalse = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(cond);
graph_->AddBlock(ifTrue);
graph_->AddBlock(ifFalse);
@@ -137,11 +135,11 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
cond->AddSuccessor(ifFalse);
ifTrue->AddSuccessor(loop_body_[d]);
ifFalse->AddSuccessor(loop_body_[d]);
- cond->AddInstruction(new (&allocator_) HIf(parameter_));
+ cond->AddInstruction(new (GetAllocator()) HIf(parameter_));
*ifT = ifTrue;
*ifF = ifFalse;
- HPhi* select_phi = new (&allocator_) HPhi(&allocator_, -1, 0, DataType::Type::kInt32);
+ HPhi* select_phi = new (GetAllocator()) HPhi(GetAllocator(), -1, 0, DataType::Type::kInt32);
loop_body_[d]->AddPhi(select_phi);
return select_phi;
}
@@ -154,7 +152,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
// Inserts a phi to loop header at depth d and returns it.
HPhi* InsertLoopPhi(int vreg, int d) {
- HPhi* phi = new (&allocator_) HPhi(&allocator_, vreg, 0, DataType::Type::kInt32);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), vreg, 0, DataType::Type::kInt32);
loop_header_[d]->AddPhi(phi);
return phi;
}
@@ -164,7 +162,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
HInstruction* InsertArrayStore(HInstruction* subscript, int d) {
// ArraySet is given a float value in order to avoid SsaBuilder typing
// it from the array's non-existent reference type info.
- return InsertInstruction(new (&allocator_) HArraySet(
+ return InsertInstruction(new (GetAllocator()) HArraySet(
parameter_, subscript, float_constant0_, DataType::Type::kFloat32, 0), d);
}
@@ -197,13 +195,11 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
// Performs InductionVarAnalysis (after proper set up).
void PerformInductionVarAnalysis() {
graph_->BuildDominatorTree();
- iva_ = new (&allocator_) HInductionVarAnalysis(graph_);
+ iva_ = new (GetAllocator()) HInductionVarAnalysis(graph_);
iva_->Run();
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HInductionVarAnalysis* iva_;
@@ -286,15 +282,15 @@ TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
// }
BuildLoopNest(1);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, basic_[0]), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, basic_[0]), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (100)):Int32", GetInductionInfo(add, 0).c_str());
@@ -318,10 +314,10 @@ TEST_F(InductionVarAnalysisTest, FindChainInduction) {
k_header->AddInput(constant0_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* store1 = InsertArrayStore(add, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, add, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, add, constant1_), 0);
HInstruction* store2 = InsertArrayStore(sub, 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -351,11 +347,11 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) {
HPhi* k_body = BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_);
+ HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_);
ifTrue->AddInstruction(inc1);
k_body->AddInput(inc1);
// False-branch.
- HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_);
+ HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_);
ifFalse->AddInstruction(inc2);
k_body->AddInput(inc2);
// Merge over a phi.
@@ -384,11 +380,11 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) {
HPhi* k = BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
+ HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
ifTrue->AddInstruction(inc1);
k->AddInput(inc1);
// False-branch.
- HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
+ HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
ifFalse->AddInstruction(inc2);
k->AddInput(inc2);
// Merge over a phi.
@@ -412,11 +408,11 @@ TEST_F(InductionVarAnalysisTest, AddLinear) {
BuildLoopNest(1);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0);
HInstruction* add3 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add1, add2), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, add2), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(basic_[0], 0).c_str());
@@ -438,11 +434,11 @@ TEST_F(InductionVarAnalysisTest, FindPolynomialInduction) {
k_header->AddInput(constant1_);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, mul), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, mul), 0);
HInstruction* pol = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add, k_header), 0);
k_header->AddInput(pol);
PerformInductionVarAnalysis();
@@ -469,17 +465,17 @@ TEST_F(InductionVarAnalysisTest, FindPolynomialInductionAndDerived) {
k_header->AddInput(constant1_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* pol = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
k_header->AddInput(pol);
PerformInductionVarAnalysis();
@@ -512,11 +508,11 @@ TEST_F(InductionVarAnalysisTest, AddPolynomial) {
k_header->AddInput(constant7_);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, k_header), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add1, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, k_header), 0);
HInstruction* add3 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
k_header->AddInput(add3);
PerformInductionVarAnalysis();
@@ -542,7 +538,7 @@ TEST_F(InductionVarAnalysisTest, FindGeometricMulInduction) {
k_header->AddInput(constant1_);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
k_header->AddInput(mul);
PerformInductionVarAnalysis();
@@ -567,19 +563,19 @@ TEST_F(InductionVarAnalysisTest, FindGeometricShlInductionAndDerived) {
k_header->AddInput(constant1_);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* shl1 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, shl1, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, shl1, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, shl1, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, shl1, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, shl1, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, shl1, constant2_), 0);
HInstruction* shl2 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, shl1, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, shl1, constant2_), 0);
k_header->AddInput(shl1);
PerformInductionVarAnalysis();
@@ -610,17 +606,17 @@ TEST_F(InductionVarAnalysisTest, FindGeometricDivInductionAndDerived) {
k_header->AddInput(constant1_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* div = InsertInstruction(
- new (&allocator_) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0);
+ new (GetAllocator()) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0);
k_header->AddInput(div);
PerformInductionVarAnalysis();
@@ -645,7 +641,7 @@ TEST_F(InductionVarAnalysisTest, FindGeometricShrInduction) {
k_header->AddInput(constant100_);
HInstruction* shr = InsertInstruction(
- new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(shr);
PerformInductionVarAnalysis();
@@ -665,7 +661,7 @@ TEST_F(InductionVarAnalysisTest, FindNotGeometricShrInduction) {
k_header->AddInput(constantm1_);
HInstruction* shr = InsertInstruction(
- new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(shr);
PerformInductionVarAnalysis();
@@ -689,17 +685,17 @@ TEST_F(InductionVarAnalysisTest, FindRemWrapAroundInductionAndDerived) {
k_header->AddInput(constant100_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* rem = InsertInstruction(
- new (&allocator_) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0);
+ new (GetAllocator()) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0);
k_header->AddInput(rem);
PerformInductionVarAnalysis();
@@ -731,7 +727,7 @@ TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -760,7 +756,7 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
HInstruction* store = InsertArrayStore(k_header, 0);
k_header->AddInput(t);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0);
t->AddInput(sub);
PerformInductionVarAnalysis();
@@ -785,19 +781,19 @@ TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) {
k_header->AddInput(constant0_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* shl1 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg1 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0);
HInstruction* shl2 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
HInstruction* neg2 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, shl2), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, shl2), 0);
k_header->AddInput(shl2);
PerformInductionVarAnalysis();
@@ -856,7 +852,7 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -877,7 +873,7 @@ TEST_F(InductionVarAnalysisTest, FindXorPeriodicInduction) {
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -896,7 +892,7 @@ TEST_F(InductionVarAnalysisTest, FindXorConstantLeftPeriodicInduction) {
k_header->AddInput(constant1_);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, constant1_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -915,7 +911,7 @@ TEST_F(InductionVarAnalysisTest, FindXor100PeriodicInduction) {
k_header->AddInput(constant1_);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant100_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -933,7 +929,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanEqPeriodicInduction) {
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HEqual(k_header, constant0_), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(k_header, constant0_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -951,7 +947,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanEqConstantLeftPeriodicInduction) {
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HEqual(constant0_, k_header), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(constant0_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -969,7 +965,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanNePeriodicInduction) {
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(k_header, constant1_), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(k_header, constant1_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -987,7 +983,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanNeConstantLeftPeriodicInduction) {
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(constant1_, k_header), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(constant1_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -1012,19 +1008,19 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
k_header->AddInput(constant0_);
HInstruction* neg1 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0);
HInstruction* idiom = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, idiom, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, idiom, constant1_), 0);
HInstruction* neg2 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, idiom), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, idiom), 0);
k_header->AddInput(idiom);
PerformInductionVarAnalysis();
@@ -1057,7 +1053,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
}
HInstruction* inc = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9);
HInstruction* store = InsertArrayStore(inc, 9);
for (int d = 0; d < 10; d++) {
@@ -1091,7 +1087,7 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) {
// }
BuildLoopNest(1);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* store2 = InsertArrayStore(basic_[0], 0);
PerformInductionVarAnalysis();
@@ -1122,10 +1118,10 @@ TEST_F(InductionVarAnalysisTest, ByteInductionDerivedIntLoopControl) {
// }
BuildLoopNest(1);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
HInstruction* store2 = InsertArrayStore(add, 0);
PerformInductionVarAnalysis();
@@ -1152,9 +1148,9 @@ TEST_F(InductionVarAnalysisTest, ByteInduction) {
k_header->AddInput(graph_->GetIntConstant(-128));
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
k_header->AddInput(conv);
PerformInductionVarAnalysis();
@@ -1180,9 +1176,9 @@ TEST_F(InductionVarAnalysisTest, NoByteInduction1) {
k_header->AddInput(graph_->GetIntConstant(-129));
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
k_header->AddInput(conv);
PerformInductionVarAnalysis();
@@ -1202,9 +1198,9 @@ TEST_F(InductionVarAnalysisTest, NoByteInduction2) {
k_header->AddInput(constant0_);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
k_header->AddInput(add);
PerformInductionVarAnalysis();
@@ -1221,7 +1217,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(127), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1247,7 +1243,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl2) {
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(128), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1273,7 +1269,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl1) {
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(32767), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1299,7 +1295,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl2) {
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(32768), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1324,7 +1320,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl1) {
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(65535), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1349,7 +1345,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl2) {
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(65536), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index ab6fbae248..99dec11240 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -418,7 +418,8 @@ HInstruction* InductionVarRange::GenerateTripCount(HLoopInformation* loop,
if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) {
if (taken_test != nullptr) {
HInstruction* zero = graph->GetConstant(trip->type, 0);
- trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc));
+ ArenaAllocator* allocator = graph->GetAllocator();
+ trip_expr = Insert(block, new (allocator) HSelect(taken_test, trip_expr, zero, kNoDexPc));
}
return trip_expr;
}
@@ -1059,7 +1060,7 @@ bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::Induc
sum = static_cast<int32_t>(sum); // okay to truncate
}
*result =
- Insert(block, new (graph->GetArena()) HAdd(type, graph->GetConstant(type, sum), c));
+ Insert(block, new (graph->GetAllocator()) HAdd(type, graph->GetConstant(type, sum), c));
}
return true;
}
@@ -1104,12 +1105,13 @@ bool InductionVarRange::GenerateLastValueGeometric(HInductionVarAnalysis::Induct
} else {
// Last value: a * f ^ m + b or a * f ^ -m + b.
HInstruction* e = nullptr;
+ ArenaAllocator* allocator = graph->GetAllocator();
if (info->operation == HInductionVarAnalysis::kMul) {
- e = new (graph->GetArena()) HMul(type, opa, graph->GetConstant(type, fpow));
+ e = new (allocator) HMul(type, opa, graph->GetConstant(type, fpow));
} else {
- e = new (graph->GetArena()) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc);
+ e = new (allocator) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc);
}
- *result = Insert(block, new (graph->GetArena()) HAdd(type, Insert(block, e), opb));
+ *result = Insert(block, new (allocator) HAdd(type, Insert(block, e), opb));
}
}
return true;
@@ -1190,18 +1192,20 @@ bool InductionVarRange::GenerateLastValuePeriodic(HInductionVarAnalysis::Inducti
// During actual code generation (graph != nullptr), generate is_even ? x : y.
if (graph != nullptr) {
DataType::Type type = trip->type;
+ ArenaAllocator* allocator = graph->GetAllocator();
HInstruction* msk =
- Insert(block, new (graph->GetArena()) HAnd(type, t, graph->GetConstant(type, 1)));
+ Insert(block, new (allocator) HAnd(type, t, graph->GetConstant(type, 1)));
HInstruction* is_even =
- Insert(block, new (graph->GetArena()) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc));
- *result = Insert(block, new (graph->GetArena()) HSelect(is_even, x, y, kNoDexPc));
+ Insert(block, new (allocator) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc));
+ *result = Insert(block, new (graph->GetAllocator()) HSelect(is_even, x, y, kNoDexPc));
}
// Guard select with taken test if needed.
if (*needs_taken_test) {
HInstruction* is_taken = nullptr;
if (GenerateCode(trip->op_b, nullptr, graph, block, graph ? &is_taken : nullptr, false, false)) {
if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HSelect(is_taken, *result, x, kNoDexPc));
+ ArenaAllocator* allocator = graph->GetAllocator();
+ *result = Insert(block, new (allocator) HSelect(is_taken, *result, x, kNoDexPc));
}
*needs_taken_test = false; // taken care of
} else {
@@ -1250,25 +1254,25 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
HInstruction* operation = nullptr;
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
- operation = new (graph->GetArena()) HAdd(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HAdd(type, opa, opb); break;
case HInductionVarAnalysis::kSub:
- operation = new (graph->GetArena()) HSub(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HSub(type, opa, opb); break;
case HInductionVarAnalysis::kMul:
- operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HMul(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kDiv:
- operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HDiv(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kRem:
- operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HRem(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kXor:
- operation = new (graph->GetArena()) HXor(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HXor(type, opa, opb); break;
case HInductionVarAnalysis::kLT:
- operation = new (graph->GetArena()) HLessThan(opa, opb); break;
+ operation = new (graph->GetAllocator()) HLessThan(opa, opb); break;
case HInductionVarAnalysis::kLE:
- operation = new (graph->GetArena()) HLessThanOrEqual(opa, opb); break;
+ operation = new (graph->GetAllocator()) HLessThanOrEqual(opa, opb); break;
case HInductionVarAnalysis::kGT:
- operation = new (graph->GetArena()) HGreaterThan(opa, opb); break;
+ operation = new (graph->GetAllocator()) HGreaterThan(opa, opb); break;
case HInductionVarAnalysis::kGE:
- operation = new (graph->GetArena()) HGreaterThanOrEqual(opa, opb); break;
+ operation = new (graph->GetAllocator()) HGreaterThanOrEqual(opa, opb); break;
default:
LOG(FATAL) << "unknown operation";
}
@@ -1280,7 +1284,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
case HInductionVarAnalysis::kNeg:
if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
+ *result = Insert(block, new (graph->GetAllocator()) HNeg(type, opb));
}
return true;
}
@@ -1306,9 +1310,9 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
} else if (in_body) {
if (GenerateCode(info->op_a, trip, graph, block, &opb, in_body, is_min)) {
if (graph != nullptr) {
+ ArenaAllocator* allocator = graph->GetAllocator();
*result =
- Insert(block,
- new (graph->GetArena()) HSub(type, opb, graph->GetConstant(type, 1)));
+ Insert(block, new (allocator) HSub(type, opb, graph->GetConstant(type, 1)));
}
return true;
}
@@ -1333,15 +1337,16 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) &&
GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
if (graph != nullptr) {
+ ArenaAllocator* allocator = graph->GetAllocator();
HInstruction* oper;
if (stride_value == 1) {
- oper = new (graph->GetArena()) HAdd(type, opa, opb);
+ oper = new (allocator) HAdd(type, opa, opb);
} else if (stride_value == -1) {
- oper = new (graph->GetArena()) HSub(type, opb, opa);
+ oper = new (graph->GetAllocator()) HSub(type, opb, opa);
} else {
HInstruction* mul =
- new (graph->GetArena()) HMul(type, graph->GetConstant(type, stride_value), opa);
- oper = new (graph->GetArena()) HAdd(type, Insert(block, mul), opb);
+ new (allocator) HMul(type, graph->GetConstant(type, stride_value), opa);
+ oper = new (allocator) HAdd(type, Insert(block, mul), opb);
}
*result = Insert(block, oper);
}
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 1c8426954b..e5bc6ef22c 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -29,13 +29,11 @@ using Value = InductionVarRange::Value;
/**
* Fixture class for the InductionVarRange tests.
*/
-class InductionVarRangeTest : public CommonCompilerTest {
+class InductionVarRangeTest : public OptimizingUnitTest {
public:
InductionVarRangeTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
- iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
+ : graph_(CreateGraph()),
+ iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
range_(iva_) {
BuildGraph();
}
@@ -61,22 +59,22 @@ class InductionVarRangeTest : public CommonCompilerTest {
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
// Two parameters.
- x_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ x_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(x_);
- y_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ y_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(y_);
// Set arbitrary range analysis hint while testing private methods.
SetHint(x_);
@@ -85,13 +83,13 @@ class InductionVarRangeTest : public CommonCompilerTest {
/** Constructs loop with given upper bound. */
void BuildLoop(int32_t lower, HInstruction* upper, int32_t stride) {
// Control flow.
- loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_preheader_);
- loop_header_ = new (&allocator_) HBasicBlock(graph_);
+ loop_header_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header_);
- loop_body_ = new (&allocator_) HBasicBlock(graph_);
+ loop_body_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_body_);
- HBasicBlock* return_block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* return_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(return_block);
entry_block_->AddSuccessor(loop_preheader_);
loop_preheader_->AddSuccessor(loop_header_);
@@ -100,24 +98,24 @@ class InductionVarRangeTest : public CommonCompilerTest {
loop_body_->AddSuccessor(loop_header_);
return_block->AddSuccessor(exit_block_);
// Instructions.
- loop_preheader_->AddInstruction(new (&allocator_) HGoto());
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
+ loop_preheader_->AddInstruction(new (GetAllocator()) HGoto());
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
loop_header_->AddPhi(phi);
phi->AddInput(graph_->GetIntConstant(lower)); // i = l
if (stride > 0) {
- condition_ = new (&allocator_) HLessThan(phi, upper); // i < u
+ condition_ = new (GetAllocator()) HLessThan(phi, upper); // i < u
} else {
- condition_ = new (&allocator_) HGreaterThan(phi, upper); // i > u
+ condition_ = new (GetAllocator()) HGreaterThan(phi, upper); // i > u
}
loop_header_->AddInstruction(condition_);
- loop_header_->AddInstruction(new (&allocator_) HIf(condition_));
+ loop_header_->AddInstruction(new (GetAllocator()) HIf(condition_));
increment_ =
- new (&allocator_) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride));
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride));
loop_body_->AddInstruction(increment_); // i += s
phi->AddInput(increment_);
- loop_body_->AddInstruction(new (&allocator_) HGoto());
- return_block->AddInstruction(new (&allocator_) HReturnVoid());
- exit_block_->AddInstruction(new (&allocator_) HExit());
+ loop_body_->AddInstruction(new (GetAllocator()) HGoto());
+ return_block->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
}
/** Constructs SSA and performs induction variable analysis. */
@@ -304,8 +302,6 @@ class InductionVarRangeTest : public CommonCompilerTest {
Value MaxValue(Value v1, Value v2) { return range_.MergeVal(v1, v2, false); }
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
@@ -705,9 +701,9 @@ TEST_F(InductionVarRangeTest, MaxValue) {
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (&allocator_) HNewArray(x_, x_, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
entry_block_->AddInstruction(new_array);
- HInstruction* array_length = new (&allocator_) HArrayLength(new_array, 0);
+ HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
// With null hint: yields extreme constants.
const int32_t max_value = std::numeric_limits<int32_t>::max();
@@ -725,13 +721,13 @@ TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
}
TEST_F(InductionVarRangeTest, AddOrSubAndConstant) {
- HInstruction* add = new (&allocator_)
+ HInstruction* add = new (GetAllocator())
HAdd(DataType::Type::kInt32, x_, graph_->GetIntConstant(-1));
- HInstruction* alt = new (&allocator_)
+ HInstruction* alt = new (GetAllocator())
HAdd(DataType::Type::kInt32, graph_->GetIntConstant(-1), x_);
- HInstruction* sub = new (&allocator_)
+ HInstruction* sub = new (GetAllocator())
HSub(DataType::Type::kInt32, x_, graph_->GetIntConstant(1));
- HInstruction* rev = new (&allocator_)
+ HInstruction* rev = new (GetAllocator())
HSub(DataType::Type::kInt32, graph_->GetIntConstant(1), x_);
entry_block_->AddInstruction(add);
entry_block_->AddInstruction(alt);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 90e3d2ade7..4d846fa4ed 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -705,7 +705,7 @@ HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker,
uint32_t dex_pc) const {
ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
- HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet(
+ HInstanceFieldGet* result = new (graph_->GetAllocator()) HInstanceFieldGet(
receiver,
field,
DataType::Type::kReference,
@@ -812,12 +812,12 @@ void HInliner::AddCHAGuard(HInstruction* invoke_instruction,
uint32_t dex_pc,
HInstruction* cursor,
HBasicBlock* bb_cursor) {
- HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetArena())
- HShouldDeoptimizeFlag(graph_->GetArena(), dex_pc);
- HInstruction* compare = new (graph_->GetArena()) HNotEqual(
+ HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetAllocator())
+ HShouldDeoptimizeFlag(graph_->GetAllocator(), dex_pc);
+ HInstruction* compare = new (graph_->GetAllocator()) HNotEqual(
deopt_flag, graph_->GetIntConstant(0, dex_pc));
- HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(), compare, DeoptimizationKind::kCHA, dex_pc);
+ HInstruction* deopt = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(), compare, DeoptimizationKind::kCHA, dex_pc);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(deopt_flag, cursor);
@@ -865,13 +865,13 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
// Note that we will just compare the classes, so we don't need Java semantics access checks.
// Note that the type index and the dex file are relative to the method this type guard is
// inlined into.
- HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
- class_index,
- caller_dex_file,
- klass,
- is_referrer,
- invoke_instruction->GetDexPc(),
- /* needs_access_check */ false);
+ HLoadClass* load_class = new (graph_->GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
+ class_index,
+ caller_dex_file,
+ klass,
+ is_referrer,
+ invoke_instruction->GetDexPc(),
+ /* needs_access_check */ false);
HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
load_class, codegen_, compiler_driver_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -887,11 +887,11 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
load_class->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
}
- HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
+ HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(load_class, receiver_class);
bb_cursor->InsertInstructionAfter(compare, load_class);
if (with_deoptimization) {
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(),
+ HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(),
compare,
receiver,
Runtime::Current()->IsAotCompiler()
@@ -1012,7 +1012,7 @@ void HInliner::CreateDiamondPatternForPolymorphicInline(HInstruction* compare,
uint32_t dex_pc = invoke_instruction->GetDexPc();
HBasicBlock* cursor_block = compare->GetBlock();
HBasicBlock* original_invoke_block = invoke_instruction->GetBlock();
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
// Spit the block after the compare: `cursor_block` will now be the start of the diamond,
// and the returned block is the start of the then branch (that could contain multiple blocks).
@@ -1147,7 +1147,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
DataType::Type type = Is64BitInstructionSet(graph_->GetInstructionSet())
? DataType::Type::kInt64
: DataType::Type::kInt32;
- HClassTableGet* class_table_get = new (graph_->GetArena()) HClassTableGet(
+ HClassTableGet* class_table_get = new (graph_->GetAllocator()) HClassTableGet(
receiver_class,
type,
invoke_instruction->IsInvokeVirtual() ? HClassTableGet::TableKind::kVTable
@@ -1164,7 +1164,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
reinterpret_cast<intptr_t>(actual_method), invoke_instruction->GetDexPc());
}
- HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant);
+ HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(class_table_get, constant);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(receiver_class, cursor);
} else {
@@ -1176,8 +1176,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
if (outermost_graph_->IsCompilingOsr()) {
CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
} else {
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(),
+ HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(),
compare,
receiver,
DeoptimizationKind::kJitSameTarget,
@@ -1240,8 +1240,8 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
if (dex_method_index == dex::kDexNoIndex) {
return false;
}
- HInvokeVirtual* new_invoke = new (graph_->GetArena()) HInvokeVirtual(
- graph_->GetArena(),
+ HInvokeVirtual* new_invoke = new (graph_->GetAllocator()) HInvokeVirtual(
+ graph_->GetAllocator(),
invoke_instruction->GetNumberOfArguments(),
invoke_instruction->GetType(),
invoke_instruction->GetDexPc(),
@@ -1517,7 +1517,7 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
DCHECK(obj != nullptr) << "only non-static methods can have a constructor fence";
HConstructorFence* constructor_fence =
- new (graph_->GetArena()) HConstructorFence(obj, kNoDexPc, graph_->GetArena());
+ new (graph_->GetAllocator()) HConstructorFence(obj, kNoDexPc, graph_->GetAllocator());
invoke_instruction->GetBlock()->InsertInstructionBefore(constructor_fence,
invoke_instruction);
}
@@ -1539,7 +1539,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
ArtField* resolved_field =
class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
DCHECK(resolved_field != nullptr);
- HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
+ HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
obj,
resolved_field,
DataType::FromShorty(resolved_field->GetTypeDescriptor()[0]),
@@ -1579,7 +1579,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index,
DCHECK(referrer->IsConstructor());
*is_final = resolved_field->IsFinal();
}
- HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
+ HInstanceFieldSet* iput = new (graph_->GetAllocator()) HInstanceFieldSet(
obj,
value,
resolved_field,
@@ -1641,8 +1641,9 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
- HGraph* callee_graph = new (graph_->GetArena()) HGraph(
- graph_->GetArena(),
+ HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
+ graph_->GetAllocator(),
+ graph_->GetArenaStack(),
callee_dex_file,
method_index,
compiler_driver_->GetInstructionSet(),
@@ -1659,7 +1660,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (stats_ != nullptr) {
// Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
if (inline_stats_ == nullptr) {
- void* storage = graph_->GetArena()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
+ void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
inline_stats_ = new (storage) OptimizingCompilerStats;
} else {
inline_stats_->Reset();
@@ -1672,7 +1673,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
codegen_,
inline_stats_,
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
- dex_cache,
handles_);
if (builder.BuildGraph() != kAnalysisSuccess) {
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 6ad8036870..b06d91c823 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -59,8 +59,8 @@ ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
// the first throwing instruction.
HInstruction* current_local_value = (*current_locals_)[i];
if (current_local_value != nullptr) {
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
i,
0,
current_local_value->GetType());
@@ -109,8 +109,8 @@ void HInstructionBuilder::InitializeBlockLocals() {
HInstruction* incoming =
ValueOfLocalAt(current_block_->GetLoopInformation()->GetPreHeader(), local);
if (incoming != nullptr) {
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
local,
0,
incoming->GetType());
@@ -148,8 +148,8 @@ void HInstructionBuilder::InitializeBlockLocals() {
if (is_different) {
HInstruction* first_input = ValueOfLocalAt(current_block_->GetPredecessors()[0], local);
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
local,
current_block_->GetPredecessors().size(),
first_input->GetType());
@@ -210,8 +210,8 @@ void HInstructionBuilder::InsertInstructionAtTop(HInstruction* instruction) {
void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) {
if (instruction->NeedsEnvironment()) {
- HEnvironment* environment = new (arena_) HEnvironment(
- arena_,
+ HEnvironment* environment = new (allocator_) HEnvironment(
+ allocator_,
current_locals_->size(),
graph_->GetArtMethod(),
instruction->GetDexPc(),
@@ -227,7 +227,7 @@ HInstruction* HInstructionBuilder::LoadNullCheckedLocal(uint32_t register_index,
return ref;
}
- HNullCheck* null_check = new (arena_) HNullCheck(ref, dex_pc);
+ HNullCheck* null_check = new (allocator_) HNullCheck(ref, dex_pc);
AppendInstruction(null_check);
return null_check;
}
@@ -265,7 +265,7 @@ static bool IsBlockPopulated(HBasicBlock* block) {
bool HInstructionBuilder::Build() {
locals_for_.resize(graph_->GetBlocks().size(),
- ArenaVector<HInstruction*>(arena_->Adapter(kArenaAllocGraphBuilder)));
+ ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder)));
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
@@ -275,7 +275,8 @@ bool HInstructionBuilder::Build() {
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
- native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false);
+ native_debug_info_locations =
+ new (allocator_) ArenaBitVector (allocator_, num_instructions, false);
FindNativeDebugInfoLocations(native_debug_info_locations);
}
@@ -287,14 +288,14 @@ bool HInstructionBuilder::Build() {
if (current_block_->IsEntryBlock()) {
InitializeParameters();
- AppendInstruction(new (arena_) HSuspendCheck(0u));
- AppendInstruction(new (arena_) HGoto(0u));
+ AppendInstruction(new (allocator_) HSuspendCheck(0u));
+ AppendInstruction(new (allocator_) HGoto(0u));
continue;
} else if (current_block_->IsExitBlock()) {
- AppendInstruction(new (arena_) HExit());
+ AppendInstruction(new (allocator_) HExit());
continue;
} else if (current_block_->IsLoopHeader()) {
- HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(current_block_->GetDexPc());
+ HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(current_block_->GetDexPc());
current_block_->GetLoopInformation()->SetSuspendCheck(suspend_check);
// This is slightly odd because the loop header might not be empty (TryBoundary).
// But we're still creating the environment with locals from the top of the block.
@@ -331,7 +332,7 @@ bool HInstructionBuilder::Build() {
}
if (native_debuggable && native_debug_info_locations->IsBitSet(dex_pc)) {
- AppendInstruction(new (arena_) HNativeDebugInfo(dex_pc));
+ AppendInstruction(new (allocator_) HNativeDebugInfo(dex_pc));
}
if (!ProcessDexInstruction(it.CurrentInstruction(), dex_pc, quicken_index)) {
@@ -348,7 +349,7 @@ bool HInstructionBuilder::Build() {
// instruction of the current block is not a branching instruction.
// We add an unconditional Goto to the next block.
DCHECK_EQ(current_block_->GetSuccessors().size(), 1u);
- AppendInstruction(new (arena_) HGoto());
+ AppendInstruction(new (allocator_) HGoto());
}
}
@@ -452,7 +453,7 @@ void HInstructionBuilder::InitializeParameters() {
dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_,
+ HParameterValue* parameter = new (allocator_) HParameterValue(*dex_file_,
referrer_method_id.class_idx_,
parameter_index++,
DataType::Type::kReference,
@@ -468,7 +469,7 @@ void HInstructionBuilder::InitializeParameters() {
const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
- HParameterValue* parameter = new (arena_) HParameterValue(
+ HParameterValue* parameter = new (allocator_) HParameterValue(
*dex_file_,
arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
parameter_index++,
@@ -491,18 +492,18 @@ template<typename T>
void HInstructionBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), DataType::Type::kInt32);
HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32);
- T* comparison = new (arena_) T(first, second, dex_pc);
+ T* comparison = new (allocator_) T(first, second, dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
current_block_ = nullptr;
}
template<typename T>
void HInstructionBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
HInstruction* value = LoadLocal(instruction.VRegA(), DataType::Type::kInt32);
- T* comparison = new (arena_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc);
+ T* comparison = new (allocator_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
current_block_ = nullptr;
}
@@ -511,7 +512,7 @@ void HInstructionBuilder::Unop_12x(const Instruction& instruction,
DataType::Type type,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
- AppendInstruction(new (arena_) T(type, first, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -520,7 +521,7 @@ void HInstructionBuilder::Conversion_12x(const Instruction& instruction,
DataType::Type result_type,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
- AppendInstruction(new (arena_) HTypeConversion(result_type, first, dex_pc));
+ AppendInstruction(new (allocator_) HTypeConversion(result_type, first, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -530,7 +531,7 @@ void HInstructionBuilder::Binop_23x(const Instruction& instruction,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -540,7 +541,7 @@ void HInstructionBuilder::Binop_23x_shift(const Instruction& instruction,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), DataType::Type::kInt32);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -550,7 +551,7 @@ void HInstructionBuilder::Binop_23x_cmp(const Instruction& instruction,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
- AppendInstruction(new (arena_) HCompare(type, first, second, bias, dex_pc));
+ AppendInstruction(new (allocator_) HCompare(type, first, second, bias, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -560,7 +561,7 @@ void HInstructionBuilder::Binop_12x_shift(const Instruction& instruction,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -570,7 +571,7 @@ void HInstructionBuilder::Binop_12x(const Instruction& instruction,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), type);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -581,7 +582,7 @@ void HInstructionBuilder::Binop_22s(const Instruction& instruction, bool reverse
if (reverse) {
std::swap(first, second);
}
- AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -592,7 +593,7 @@ void HInstructionBuilder::Binop_22b(const Instruction& instruction, bool reverse
if (reverse) {
std::swap(first, second);
}
- AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -630,13 +631,13 @@ void HInstructionBuilder::BuildSwitch(const Instruction& instruction, uint32_t d
if (table.GetNumEntries() == 0) {
// Empty Switch. Code falls through to the next block.
DCHECK(IsFallthroughInstruction(instruction, dex_pc, current_block_));
- AppendInstruction(new (arena_) HGoto(dex_pc));
+ AppendInstruction(new (allocator_) HGoto(dex_pc));
} else if (table.ShouldBuildDecisionTree()) {
for (DexSwitchTableIterator it(table); !it.Done(); it.Advance()) {
HInstruction* case_value = graph_->GetIntConstant(it.CurrentKey(), dex_pc);
- HEqual* comparison = new (arena_) HEqual(value, case_value, dex_pc);
+ HEqual* comparison = new (allocator_) HEqual(value, case_value, dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
if (!it.IsLast()) {
current_block_ = FindBlockStartingAt(it.GetDexPcForCurrentIndex());
@@ -644,7 +645,7 @@ void HInstructionBuilder::BuildSwitch(const Instruction& instruction, uint32_t d
}
} else {
AppendInstruction(
- new (arena_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc));
+ new (allocator_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc));
}
current_block_ = nullptr;
@@ -664,16 +665,16 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction,
HInstruction* fence_target = current_this_parameter_;
DCHECK(fence_target != nullptr);
- AppendInstruction(new (arena_) HConstructorFence(fence_target, dex_pc, arena_));
+ AppendInstruction(new (allocator_) HConstructorFence(fence_target, dex_pc, allocator_));
MaybeRecordStat(
compilation_stats_,
MethodCompilationStat::kConstructorFenceGeneratedFinal);
}
- AppendInstruction(new (arena_) HReturnVoid(dex_pc));
+ AppendInstruction(new (allocator_) HReturnVoid(dex_pc));
} else {
DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_));
HInstruction* value = LoadLocal(instruction.VRegA(), type);
- AppendInstruction(new (arena_) HReturn(value, dex_pc));
+ AppendInstruction(new (allocator_) HReturn(value, dex_pc));
}
current_block_ = nullptr;
}
@@ -816,12 +817,12 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
if (UNLIKELY(resolved_method == nullptr)) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedMethod);
- HInvoke* invoke = new (arena_) HInvokeUnresolved(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- invoke_type);
+ HInvoke* invoke = new (allocator_) HInvokeUnresolved(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ invoke_type);
return HandleInvoke(invoke,
number_of_vreg_arguments,
args,
@@ -841,8 +842,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
dchecked_integral_cast<uint64_t>(string_init_entry_point)
};
MethodReference target_method(dex_file_, method_idx);
- HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
- arena_,
+ HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
+ allocator_,
number_of_arguments - 1,
DataType::Type::kReference /*return_type */,
dex_pc,
@@ -887,35 +888,35 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
};
MethodReference target_method(resolved_method->GetDexFile(),
resolved_method->GetDexMethodIndex());
- invoke = new (arena_) HInvokeStaticOrDirect(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- dispatch_info,
- invoke_type,
- target_method,
- clinit_check_requirement);
+ invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ dispatch_info,
+ invoke_type,
+ target_method,
+ clinit_check_requirement);
} else if (invoke_type == kVirtual) {
ScopedObjectAccess soa(Thread::Current()); // Needed for the method index
- invoke = new (arena_) HInvokeVirtual(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- resolved_method->GetMethodIndex());
+ invoke = new (allocator_) HInvokeVirtual(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ resolved_method->GetMethodIndex());
} else {
DCHECK_EQ(invoke_type, kInterface);
ScopedObjectAccess soa(Thread::Current()); // Needed for the IMT index.
- invoke = new (arena_) HInvokeInterface(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- ImTable::GetImtIndex(resolved_method));
+ invoke = new (allocator_) HInvokeInterface(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ ImTable::GetImtIndex(resolved_method));
}
return HandleInvoke(invoke,
@@ -940,11 +941,11 @@ bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction
DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
DataType::Type return_type = DataType::FromShorty(descriptor[0]);
size_t number_of_arguments = strlen(descriptor);
- HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx);
+ HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx);
return HandleInvoke(invoke,
number_of_vreg_arguments,
args,
@@ -964,7 +965,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u
Handle<mirror::Class> klass = load_class->GetClass();
if (!IsInitialized(klass)) {
- cls = new (arena_) HClinitCheck(load_class, dex_pc);
+ cls = new (allocator_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
@@ -979,7 +980,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u
// Consider classes we haven't resolved as potentially finalizable.
bool finalizable = (klass == nullptr) || klass->IsFinalizable();
- HNewInstance* new_instance = new (arena_) HNewInstance(
+ HNewInstance* new_instance = new (allocator_) HNewInstance(
cls,
dex_pc,
type_index,
@@ -1036,7 +1037,7 @@ void HInstructionBuilder::BuildConstructorFenceForAllocation(HInstruction* alloc
// (and in theory the 0-initializing, but that happens automatically
// when new memory pages are mapped in by the OS).
HConstructorFence* ctor_fence =
- new (arena_) HConstructorFence(allocation, allocation->GetDexPc(), arena_);
+ new (allocator_) HConstructorFence(allocation, allocation->GetDexPc(), allocator_);
AppendInstruction(ctor_fence);
MaybeRecordStat(
compilation_stats_,
@@ -1090,7 +1091,7 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
/* needs_access_check */ false);
if (cls != nullptr) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
+ clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
AppendInstruction(clinit_check);
}
}
@@ -1290,23 +1291,23 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedField);
- field_set = new (arena_) HUnresolvedInstanceFieldSet(object,
- value,
- field_type,
- field_index,
- dex_pc);
+ field_set = new (allocator_) HUnresolvedInstanceFieldSet(object,
+ value,
+ field_type,
+ field_index,
+ dex_pc);
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
- field_set = new (arena_) HInstanceFieldSet(object,
- value,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc);
+ field_set = new (allocator_) HInstanceFieldSet(object,
+ value,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc);
}
AppendInstruction(field_set);
} else {
@@ -1314,21 +1315,21 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedField);
- field_get = new (arena_) HUnresolvedInstanceFieldGet(object,
- field_type,
- field_index,
- dex_pc);
+ field_get = new (allocator_) HUnresolvedInstanceFieldGet(object,
+ field_type,
+ field_index,
+ dex_pc);
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
- field_get = new (arena_) HInstanceFieldGet(object,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc);
+ field_get = new (allocator_) HInstanceFieldGet(object,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc);
}
AppendInstruction(field_get);
UpdateLocal(source_or_dest_reg, field_get);
@@ -1382,9 +1383,9 @@ void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& in
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
AppendInstruction(
- new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
+ new (allocator_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
} else {
- AppendInstruction(new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
+ AppendInstruction(new (allocator_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
}
@@ -1475,7 +1476,7 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
HInstruction* cls = constant;
if (!IsInitialized(klass)) {
- cls = new (arena_) HClinitCheck(constant, dex_pc);
+ cls = new (allocator_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
}
@@ -1484,38 +1485,38 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
// We need to keep the class alive before loading the value.
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type));
- AppendInstruction(new (arena_) HStaticFieldSet(cls,
- value,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc));
+ AppendInstruction(new (allocator_) HStaticFieldSet(cls,
+ value,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc));
} else {
- AppendInstruction(new (arena_) HStaticFieldGet(cls,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc));
+ AppendInstruction(new (allocator_) HStaticFieldGet(cls,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
return true;
}
void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg,
- uint16_t first_vreg,
- int64_t second_vreg_or_constant,
- uint32_t dex_pc,
- DataType::Type type,
- bool second_is_constant,
- bool isDiv) {
+ uint16_t first_vreg,
+ int64_t second_vreg_or_constant,
+ uint32_t dex_pc,
+ DataType::Type type,
+ bool second_is_constant,
+ bool isDiv) {
DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
HInstruction* first = LoadLocal(first_vreg, type);
@@ -1533,14 +1534,14 @@ void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg,
if (!second_is_constant
|| (type == DataType::Type::kInt32 && second->AsIntConstant()->GetValue() == 0)
|| (type == DataType::Type::kInt64 && second->AsLongConstant()->GetValue() == 0)) {
- second = new (arena_) HDivZeroCheck(second, dex_pc);
+ second = new (allocator_) HDivZeroCheck(second, dex_pc);
AppendInstruction(second);
}
if (isDiv) {
- AppendInstruction(new (arena_) HDiv(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) HDiv(type, first, second, dex_pc));
} else {
- AppendInstruction(new (arena_) HRem(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) HRem(type, first, second, dex_pc));
}
UpdateLocal(out_vreg, current_block_->GetLastInstruction());
}
@@ -1554,19 +1555,19 @@ void HInstructionBuilder::BuildArrayAccess(const Instruction& instruction,
uint8_t index_reg = instruction.VRegC_23x();
HInstruction* object = LoadNullCheckedLocal(array_reg, dex_pc);
- HInstruction* length = new (arena_) HArrayLength(object, dex_pc);
+ HInstruction* length = new (allocator_) HArrayLength(object, dex_pc);
AppendInstruction(length);
HInstruction* index = LoadLocal(index_reg, DataType::Type::kInt32);
- index = new (arena_) HBoundsCheck(index, length, dex_pc);
+ index = new (allocator_) HBoundsCheck(index, length, dex_pc);
AppendInstruction(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
// TODO: Insert a type check node if the type is Object.
- HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
} else {
- HArrayGet* aget = new (arena_) HArrayGet(object, index, anticipated_type, dex_pc);
+ HArrayGet* aget = new (allocator_) HArrayGet(object, index, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArrayGet(aget);
AppendInstruction(aget);
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
@@ -1582,7 +1583,7 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
uint32_t register_index) {
HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* const object = new (arena_) HNewArray(cls, length, dex_pc);
+ HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
AppendInstruction(object);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
@@ -1597,7 +1598,7 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1615,7 +1616,7 @@ void HInstructionBuilder::BuildFillArrayData(HInstruction* object,
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
HInstruction* value = graph_->GetIntConstant(data[i], dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1635,13 +1636,13 @@ void HInstructionBuilder::BuildFillArrayData(const Instruction& instruction, uin
return;
}
- HInstruction* length = new (arena_) HArrayLength(array, dex_pc);
+ HInstruction* length = new (allocator_) HArrayLength(array, dex_pc);
AppendInstruction(length);
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1, dex_pc);
- AppendInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
+ AppendInstruction(new (allocator_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
case 1:
@@ -1684,7 +1685,8 @@ void HInstructionBuilder::BuildFillWideArrayData(HInstruction* object,
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
HInstruction* value = graph_->GetLongConstant(data[i], dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc);
+ HArraySet* aset =
+ new (allocator_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1752,7 +1754,7 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
}
// Note: `klass` must be from `handles_`.
- HLoadClass* load_class = new (arena_) HLoadClass(
+ HLoadClass* load_class = new (allocator_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*actual_dex_file,
@@ -1787,15 +1789,15 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
- AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
+ AppendInstruction(new (allocator_) HInstanceOf(object, cls, check_kind, dex_pc));
UpdateLocal(destination, current_block_->GetLastInstruction());
} else {
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
// We emit a CheckCast followed by a BoundType. CheckCast is a statement
// which may throw. If it succeeds BoundType sets the new type of `object`
// for all subsequent uses.
- AppendInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc));
- AppendInstruction(new (arena_) HBoundType(object, dex_pc));
+ AppendInstruction(new (allocator_) HCheckCast(object, cls, check_kind, dex_pc));
+ AppendInstruction(new (allocator_) HBoundType(object, dex_pc));
UpdateLocal(reference, current_block_->GetLastInstruction());
}
}
@@ -1943,7 +1945,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
- AppendInstruction(new (arena_) HGoto(dex_pc));
+ AppendInstruction(new (allocator_) HGoto(dex_pc));
current_block_ = nullptr;
break;
}
@@ -2580,7 +2582,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* new_array = new (arena_) HNewArray(cls, length, dex_pc);
+ HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
AppendInstruction(new_array);
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
BuildConstructorFenceForAllocation(new_array);
@@ -2744,23 +2746,27 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::ARRAY_LENGTH: {
HInstruction* object = LoadNullCheckedLocal(instruction.VRegB_12x(), dex_pc);
- AppendInstruction(new (arena_) HArrayLength(object, dex_pc));
+ AppendInstruction(new (allocator_) HArrayLength(object, dex_pc));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING: {
dex::StringIndex string_index(instruction.VRegB_21c());
- AppendInstruction(
- new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
+ AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(),
+ string_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING_JUMBO: {
dex::StringIndex string_index(instruction.VRegB_31c());
- AppendInstruction(
- new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
+ AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(),
+ string_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
break;
}
@@ -2773,15 +2779,15 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
}
case Instruction::MOVE_EXCEPTION: {
- AppendInstruction(new (arena_) HLoadException(dex_pc));
+ AppendInstruction(new (allocator_) HLoadException(dex_pc));
UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
- AppendInstruction(new (arena_) HClearException(dex_pc));
+ AppendInstruction(new (allocator_) HClearException(dex_pc));
break;
}
case Instruction::THROW: {
HInstruction* exception = LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference);
- AppendInstruction(new (arena_) HThrow(exception, dex_pc));
+ AppendInstruction(new (allocator_) HThrow(exception, dex_pc));
// We finished building this block. Set the current block to null to avoid
// adding dead instructions to it.
current_block_ = nullptr;
@@ -2804,7 +2810,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
}
case Instruction::MONITOR_ENTER: {
- AppendInstruction(new (arena_) HMonitorOperation(
+ AppendInstruction(new (allocator_) HMonitorOperation(
LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
HMonitorOperation::OperationKind::kEnter,
dex_pc));
@@ -2812,7 +2818,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
}
case Instruction::MONITOR_EXIT: {
- AppendInstruction(new (arena_) HMonitorOperation(
+ AppendInstruction(new (allocator_) HMonitorOperation(
LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
HMonitorOperation::OperationKind::kExit,
dex_pc));
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index a684bf40e6..79d6ddc87d 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -43,15 +43,15 @@ class HInstructionBuilder : public ValueObject {
const DexFile* dex_file,
const DexFile::CodeItem& code_item,
DataType::Type return_type,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
- : arena_(graph->GetArena()),
+ : allocator_(graph->GetAllocator()),
graph_(graph),
handles_(handles),
dex_file_(dex_file),
@@ -59,7 +59,7 @@ class HInstructionBuilder : public ValueObject {
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- locals_for_(arena_->Adapter(kArenaAllocGraphBuilder)),
+ locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)),
current_block_(nullptr),
current_locals_(nullptr),
latest_result_(nullptr),
@@ -71,7 +71,7 @@ class HInstructionBuilder : public ValueObject {
quicken_info_(interpreter_metadata),
compilation_stats_(compiler_stats),
dex_cache_(dex_cache),
- loop_headers_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) {
+ loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -312,7 +312,7 @@ class HInstructionBuilder : public ValueObject {
ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
@@ -342,7 +342,7 @@ class HInstructionBuilder : public ValueObject {
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
- DexCompilationUnit* const dex_compilation_unit_;
+ const DexCompilationUnit* const dex_compilation_unit_;
// The compilation unit of the outermost method being compiled. That is the
// method being compiled (and not inlined), and potentially inlining other
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 6610bcc713..1cebeb5bd7 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -186,7 +186,7 @@ bool InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop(HBinaryOperation
binop->ReplaceInput(right_neg->GetInput(), 1);
left_neg->GetBlock()->RemoveInstruction(left_neg);
right_neg->GetBlock()->RemoveInstruction(right_neg);
- HNeg* neg = new (GetGraph()->GetArena()) HNeg(binop->GetType(), binop);
+ HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(binop->GetType(), binop);
binop->GetBlock()->InsertInstructionBefore(neg, binop->GetNext());
binop->ReplaceWithExceptInReplacementAtIndex(neg, 0);
RecordSimplification();
@@ -225,15 +225,15 @@ bool InstructionSimplifierVisitor::TryDeMorganNegationFactoring(HBinaryOperation
// Replace the `HAnd` or `HOr`.
HBinaryOperation* hbin;
if (op->IsAnd()) {
- hbin = new (GetGraph()->GetArena()) HOr(type, src_left, src_right, dex_pc);
+ hbin = new (GetGraph()->GetAllocator()) HOr(type, src_left, src_right, dex_pc);
} else {
- hbin = new (GetGraph()->GetArena()) HAnd(type, src_left, src_right, dex_pc);
+ hbin = new (GetGraph()->GetAllocator()) HAnd(type, src_left, src_right, dex_pc);
}
HInstruction* hnot;
if (left->IsBooleanNot()) {
- hnot = new (GetGraph()->GetArena()) HBooleanNot(hbin, dex_pc);
+ hnot = new (GetGraph()->GetAllocator()) HBooleanNot(hbin, dex_pc);
} else {
- hnot = new (GetGraph()->GetArena()) HNot(type, hbin, dex_pc);
+ hnot = new (GetGraph()->GetAllocator()) HNot(type, hbin, dex_pc);
}
op->GetBlock()->InsertInstructionBefore(hbin, op);
@@ -274,7 +274,7 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul)
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -407,7 +407,8 @@ bool InstructionSimplifierVisitor::ReplaceRotateWithRor(HBinaryOperation* op,
HUShr* ushr,
HShl* shl) {
DCHECK(op->IsAdd() || op->IsXor() || op->IsOr()) << op->DebugName();
- HRor* ror = new (GetGraph()->GetArena()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight());
+ HRor* ror =
+ new (GetGraph()->GetAllocator()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight());
op->GetBlock()->ReplaceAndRemoveInstructionWith(op, ror);
if (!ushr->HasUses()) {
ushr->GetBlock()->RemoveInstruction(ushr);
@@ -667,7 +668,7 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
MaybeRecordStat(stats_, kRemovedInstanceOf);
if (outcome && can_be_null) {
// Type test will succeed, we just need a null test.
- HNotEqual* test = new (graph->GetArena()) HNotEqual(graph->GetNullConstant(), object);
+ HNotEqual* test = new (graph->GetAllocator()) HNotEqual(graph->GetNullConstant(), object);
instruction->GetBlock()->InsertInstructionBefore(test, instruction);
instruction->ReplaceWith(test);
} else {
@@ -939,14 +940,14 @@ void InstructionSimplifierVisitor::VisitSelect(HSelect* select) {
if ((cmp == kCondLT || cmp == kCondLE) &&
(a == negated && a == false_value && IsInt64Value(b, 0))) {
// Found a < 0 ? -a : a which can be replaced by ABS(a).
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), false_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), false_value, select);
}
} else if (false_value->IsNeg()) {
HInstruction* negated = false_value->InputAt(0);
if ((cmp == kCondGT || cmp == kCondGE) &&
(a == true_value && a == negated && IsInt64Value(b, 0))) {
// Found a > 0 ? a : -a which can be replaced by ABS(a).
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
}
} else if (true_value->IsSub() && false_value->IsSub()) {
HInstruction* true_sub1 = true_value->InputAt(0);
@@ -961,7 +962,7 @@ void InstructionSimplifierVisitor::VisitSelect(HSelect* select) {
// Found a > b ? a - b : b - a or
// a < b ? b - a : a - b
// which can be replaced by ABS(a - b) for lower precision operands a, b.
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
}
}
}
@@ -1173,7 +1174,8 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) {
// particular, we do not want the live range of `b` to be extended if we are
// not sure the initial 'NEG' instruction can be removed.
HInstruction* other = left_is_neg ? right : left;
- HSub* sub = new(GetGraph()->GetArena()) HSub(instruction->GetType(), other, neg->GetInput());
+ HSub* sub =
+ new(GetGraph()->GetAllocator()) HSub(instruction->GetType(), other, neg->GetInput());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, sub);
RecordSimplification();
neg->GetBlock()->RemoveInstruction(neg);
@@ -1251,10 +1253,10 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
DCHECK_NE(new_and_input->GetType(), DataType::Type::kInt64);
HConstant* new_const = GetGraph()->GetConstant(DataType::Type::kInt32, value);
HAnd* new_and =
- new (GetGraph()->GetArena()) HAnd(DataType::Type::kInt32, new_and_input, new_const);
+ new (GetGraph()->GetAllocator()) HAnd(DataType::Type::kInt32, new_and_input, new_const);
instruction->GetBlock()->InsertInstructionBefore(new_and, instruction);
HTypeConversion* new_conversion =
- new (GetGraph()->GetArena()) HTypeConversion(DataType::Type::kInt64, new_and);
+ new (GetGraph()->GetAllocator()) HTypeConversion(DataType::Type::kInt64, new_and);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_conversion);
input_other->GetBlock()->RemoveInstruction(input_other);
RecordSimplification();
@@ -1279,7 +1281,7 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
input_other->HasOnlyOneNonEnvironmentUse()) {
DCHECK(input_other->IsShr()); // For UShr, we would have taken the branch above.
// Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24".
- HUShr* ushr = new (GetGraph()->GetArena()) HUShr(instruction->GetType(),
+ HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(),
input_other->InputAt(0),
input_other->InputAt(1),
input_other->GetDexPc());
@@ -1410,7 +1412,8 @@ void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
// on the right hand side.
if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) {
HBasicBlock* block = condition->GetBlock();
- HCondition* replacement = GetOppositeConditionSwapOps(block->GetGraph()->GetArena(), condition);
+ HCondition* replacement =
+ GetOppositeConditionSwapOps(block->GetGraph()->GetAllocator(), condition);
// If it is a fp we must set the opposite bias.
if (replacement != nullptr) {
if (condition->IsLtBias()) {
@@ -1506,7 +1509,7 @@ void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) {
// with
// NEG dst, src
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
- instruction, new (GetGraph()->GetArena()) HNeg(type, input_other));
+ instruction, new (GetGraph()->GetAllocator()) HNeg(type, input_other));
RecordSimplification();
return;
}
@@ -1532,7 +1535,7 @@ void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) {
if (reciprocal != nullptr) {
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
- instruction, new (GetGraph()->GetArena()) HMul(type, input_other, reciprocal));
+ instruction, new (GetGraph()->GetAllocator()) HMul(type, input_other, reciprocal));
RecordSimplification();
return;
}
@@ -1544,7 +1547,7 @@ void InstructionSimplifierVisitor::VisitMul(HMul* instruction) {
HInstruction* input_other = instruction->GetLeastConstantLeft();
DataType::Type type = instruction->GetType();
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
if (input_cst == nullptr) {
return;
@@ -1683,8 +1686,8 @@ void InstructionSimplifierVisitor::VisitNeg(HNeg* instruction) {
// removed.
// We do not perform optimization for fp because we could lose the sign of zero.
HSub* sub = input->AsSub();
- HSub* new_sub =
- new (GetGraph()->GetArena()) HSub(instruction->GetType(), sub->GetRight(), sub->GetLeft());
+ HSub* new_sub = new (GetGraph()->GetAllocator()) HSub(
+ instruction->GetType(), sub->GetRight(), sub->GetLeft());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_sub);
if (!sub->HasUses()) {
sub->GetBlock()->RemoveInstruction(sub);
@@ -1786,7 +1789,7 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) {
}
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
HInstruction* left = instruction->GetLeft();
HInstruction* right = instruction->GetRight();
@@ -1818,7 +1821,7 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) {
// SUB dst, a, tmp
// with
// ADD dst, a, b
- HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left, right->AsNeg()->GetInput());
+ HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left, right->AsNeg()->GetInput());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add);
RecordSimplification();
right->GetBlock()->RemoveInstruction(right);
@@ -1834,9 +1837,9 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) {
// NEG dst, tmp
// The second version is not intrinsically better, but enables more
// transformations.
- HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left->AsNeg()->GetInput(), right);
+ HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left->AsNeg()->GetInput(), right);
instruction->GetBlock()->InsertInstructionBefore(add, instruction);
- HNeg* neg = new (GetGraph()->GetArena()) HNeg(instruction->GetType(), add);
+ HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(instruction->GetType(), add);
instruction->GetBlock()->InsertInstructionBefore(neg, instruction);
instruction->ReplaceWith(neg);
instruction->GetBlock()->RemoveInstruction(instruction);
@@ -1898,7 +1901,7 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) {
// XOR dst, src, 1
// with
// BOOLEAN_NOT dst, src
- HBooleanNot* boolean_not = new (GetGraph()->GetArena()) HBooleanNot(input_other);
+ HBooleanNot* boolean_not = new (GetGraph()->GetAllocator()) HBooleanNot(input_other);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, boolean_not);
RecordSimplification();
return;
@@ -1909,7 +1912,7 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) {
// XOR dst, src, 0xFFF...FF
// with
// NOT dst, src
- HNot* bitwise_not = new (GetGraph()->GetArena()) HNot(instruction->GetType(), input_other);
+ HNot* bitwise_not = new (GetGraph()->GetAllocator()) HNot(instruction->GetType(), input_other);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, bitwise_not);
RecordSimplification();
return;
@@ -1980,10 +1983,10 @@ void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke,
// Unconditionally set the type of the negated distance to `int`,
// as shift and rotate operations expect a 32-bit (or narrower)
// value for their distance input.
- distance = new (GetGraph()->GetArena()) HNeg(DataType::Type::kInt32, distance);
+ distance = new (GetGraph()->GetAllocator()) HNeg(DataType::Type::kInt32, distance);
invoke->GetBlock()->InsertInstructionBefore(distance, invoke);
}
- HRor* ror = new (GetGraph()->GetArena()) HRor(type, value, distance);
+ HRor* ror = new (GetGraph()->GetAllocator()) HRor(type, value, distance);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, ror);
// Remove ClinitCheck and LoadClass, if possible.
HInstruction* clinit = invoke->GetInputs().back();
@@ -2127,7 +2130,7 @@ void InstructionSimplifierVisitor::SimplifyCompare(HInvoke* invoke,
} else {
right = GetGraph()->GetIntConstant(0);
}
- HCompare* compare = new (GetGraph()->GetArena())
+ HCompare* compare = new (GetGraph()->GetAllocator())
HCompare(type, left, right, ComparisonBias::kNoBias, dex_pc);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, compare);
}
@@ -2137,7 +2140,7 @@ void InstructionSimplifierVisitor::SimplifyIsNaN(HInvoke* invoke) {
uint32_t dex_pc = invoke->GetDexPc();
// IsNaN(x) is the same as x != x.
HInstruction* x = invoke->InputAt(0);
- HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+ HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc);
condition->SetBias(ComparisonBias::kLtBias);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, condition);
}
@@ -2164,11 +2167,11 @@ void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) {
kNoThrow);
}
// Test IsNaN(x), which is the same as x != x.
- HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+ HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc);
condition->SetBias(ComparisonBias::kLtBias);
invoke->GetBlock()->InsertInstructionBefore(condition, invoke->GetNext());
// Select between the two.
- HInstruction* select = new (GetGraph()->GetArena()) HSelect(condition, nan, invoke, dex_pc);
+ HInstruction* select = new (GetGraph()->GetAllocator()) HSelect(condition, nan, invoke, dex_pc);
invoke->GetBlock()->InsertInstructionBefore(select, condition->GetNext());
invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0
}
@@ -2177,7 +2180,7 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) {
HInstruction* str = invoke->InputAt(0);
HInstruction* index = invoke->InputAt(1);
uint32_t dex_pc = invoke->GetDexPc();
- ArenaAllocator* arena = GetGraph()->GetArena();
+ ArenaAllocator* arena = GetGraph()->GetAllocator();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength, HBoundsCheck and HArrayGet.
HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true);
@@ -2202,13 +2205,13 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength.
HArrayLength* length =
- new (GetGraph()->GetArena()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
HInstruction* replacement;
if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
// For String.isEmpty(), create the `HEqual` representing the `length == 0`.
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
HIntConstant* zero = GetGraph()->GetIntConstant(0);
- HEqual* equal = new (GetGraph()->GetArena()) HEqual(length, zero, dex_pc);
+ HEqual* equal = new (GetGraph()->GetAllocator()) HEqual(length, zero, dex_pc);
replacement = equal;
} else {
DCHECK_EQ(invoke->GetIntrinsic(), Intrinsics::kStringLength);
@@ -2278,9 +2281,11 @@ void InstructionSimplifierVisitor::SimplifyAllocationIntrinsic(HInvoke* invoke)
}
}
-void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
+void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke,
+ MemBarrierKind barrier_kind) {
uint32_t dex_pc = invoke->GetDexPc();
- HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
+ HMemoryBarrier* mem_barrier =
+ new (GetGraph()->GetAllocator()) HMemoryBarrier(barrier_kind, dex_pc);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, mem_barrier);
}
@@ -2519,7 +2524,7 @@ bool InstructionSimplifierVisitor::TrySubtractionChainSimplification(
int64_t const3_val = ComputeAddition(type, const1_val, const2_val);
HBasicBlock* block = instruction->GetBlock();
HConstant* const3 = block->GetGraph()->GetConstant(type, const3_val);
- ArenaAllocator* arena = instruction->GetArena();
+ ArenaAllocator* arena = instruction->GetAllocator();
HInstruction* z;
if (is_x_negated) {
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 7439893787..9422f9f30c 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -137,12 +137,12 @@ bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* u
if (do_merge) {
HDataProcWithShifterOp* alu_with_op =
- new (GetGraph()->GetArena()) HDataProcWithShifterOp(use,
- other_input,
- bitfield_op->InputAt(0),
- op_kind,
- shift_amount,
- use->GetDexPc());
+ new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use,
+ other_input,
+ bitfield_op->InputAt(0),
+ op_kind,
+ shift_amount,
+ use->GetDexPc());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op);
if (bitfield_op->GetUses().empty()) {
bitfield_op->GetBlock()->RemoveInstruction(bitfield_op);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index c639953536..c0ab68fec2 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -141,12 +141,12 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction*
if (do_merge) {
HDataProcWithShifterOp* alu_with_op =
- new (GetGraph()->GetArena()) HDataProcWithShifterOp(use,
- other_input,
- bitfield_op->InputAt(0),
- op_kind,
- shift_amount,
- use->GetDexPc());
+ new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use,
+ other_input,
+ bitfield_op->InputAt(0),
+ op_kind,
+ shift_amount,
+ use->GetDexPc());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op);
if (bitfield_op->GetUses().empty()) {
bitfield_op->GetBlock()->RemoveInstruction(bitfield_op);
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 4bf1bfb9f3..6a0d8a60c4 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -74,7 +74,7 @@ bool InstructionSimplifierMipsVisitor::TryExtractArrayAccessIndex(HInstruction*
}
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
size_t component_shift = DataType::SizeShift(packed_type);
bool is_extracting_beneficial = false;
@@ -113,7 +113,7 @@ bool InstructionSimplifierMipsVisitor::TryExtractArrayAccessIndex(HInstruction*
HIntConstant* shift = graph->GetIntConstant(component_shift);
HIntermediateArrayAddressIndex* address =
- new (arena) HIntermediateArrayAddressIndex(index, shift, kNoDexPc);
+ new (allocator) HIntermediateArrayAddressIndex(index, shift, kNoDexPc);
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 1);
return true;
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 73d866fbea..037e98c3bf 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -75,7 +75,7 @@ bool TrySimpleMultiplyAccumulatePatterns(HMul* mul,
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetAllocator();
HMultiplyAccumulate* mulacc = new(arena) HMultiplyAccumulate(
mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc());
@@ -105,7 +105,7 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -216,7 +216,7 @@ bool TryMergeNegatedInput(HBinaryOperation* op) {
// BIC dst, src, mask (respectively ORN, EON)
HInstruction* src = hnot->AsNot()->GetInput();
- HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetArena())
+ HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetAllocator())
HBitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc());
op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op);
@@ -255,7 +255,7 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
// Proceed to extract the base address computation.
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* arena = graph->GetAllocator();
HIntConstant* offset = graph->GetIntConstant(data_offset);
HIntermediateAddress* address = new (arena) HIntermediateAddress(array, offset, kNoDexPc);
@@ -289,7 +289,7 @@ bool TryExtractVecArrayAccessAddress(HVecMemoryOperation* access, HInstruction*
}
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* arena = graph->GetAllocator();
DataType::Type packed_type = access->GetPackedType();
uint32_t data_offset = mirror::Array::DataOffset(
DataType::Size(packed_type)).Uint32Value();
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 0f14d2728b..dfae534555 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -220,7 +220,7 @@ void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
}
// The intrinsic will call if it needs to allocate a j.l.Integer.
- LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
if (!invoke->InputAt(0)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 7abfd5b74e..4429e6e5b7 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -100,7 +100,7 @@ class IntrinsicVisitor : public ValueObject {
// We're moving potentially two or more locations to locations that could overlap, so we need
// a parallel move resolver.
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 75a1ce7e6f..ee07c4f65c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -70,7 +70,7 @@ MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() {
}
ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
#define __ codegen->GetVIXLAssembler()->
@@ -236,18 +236,16 @@ bool IntrinsicLocationsBuilderARM64::TryDispatch(HInvoke* invoke) {
#define __ masm->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -267,10 +265,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, MacroAssembler
}
void IntrinsicLocationsBuilderARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -281,10 +279,10 @@ void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -294,10 +292,9 @@ void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -324,7 +321,7 @@ static void GenReverseBytes(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARM64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -332,7 +329,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -340,17 +337,16 @@ void IntrinsicCodeGeneratorARM64::VisitLongReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitShortReverseBytes(HInvoke* invoke) {
GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetVIXLAssembler());
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -368,7 +364,7 @@ static void GenNumberOfLeadingZeros(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -376,7 +372,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invo
}
void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -396,7 +392,7 @@ static void GenNumberOfTrailingZeros(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -404,7 +400,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* inv
}
void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -423,7 +419,7 @@ static void GenReverse(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARM64::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerReverse(HInvoke* invoke) {
@@ -431,7 +427,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerReverse(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongReverse(HInvoke* invoke) {
@@ -456,7 +452,7 @@ static void GenBitCount(HInvoke* instr, DataType::Type type, MacroAssembler* mas
}
void IntrinsicLocationsBuilderARM64::VisitLongBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) {
@@ -464,7 +460,7 @@ void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -489,7 +485,7 @@ static void GenHighestOneBit(HInvoke* invoke, DataType::Type type, MacroAssemble
}
void IntrinsicLocationsBuilderARM64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -497,7 +493,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerHighestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -518,7 +514,7 @@ static void GenLowestOneBit(HInvoke* invoke, DataType::Type type, MacroAssembler
}
void IntrinsicLocationsBuilderARM64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -526,17 +522,16 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerLowestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongLowestOneBit(HInvoke* invoke) {
GenLowestOneBit(invoke, DataType::Type::kInt64, GetVIXLAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -552,7 +547,7 @@ static void MathAbsFP(LocationSummary* locations, bool is64bit, MacroAssembler*
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -560,7 +555,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAbsDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsFloat(HInvoke* invoke) {
@@ -581,7 +576,7 @@ static void GenAbsInteger(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsInt(HInvoke* invoke) {
@@ -589,7 +584,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAbsInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsLong(HInvoke* invoke) {
@@ -614,17 +609,16 @@ static void GenMinMaxFP(LocationSummary* locations,
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
void IntrinsicLocationsBuilderARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -632,7 +626,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -640,7 +634,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -648,7 +642,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -673,7 +667,7 @@ static void GenMinMax(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARM64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -681,7 +675,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -689,7 +683,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinLongLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -697,7 +691,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMaxIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -705,7 +699,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMaxLongLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSqrt(HInvoke* invoke) {
@@ -715,7 +709,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathSqrt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathCeil(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCeil(HInvoke* invoke) {
@@ -725,7 +719,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCeil(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathFloor(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathFloor(HInvoke* invoke) {
@@ -735,7 +729,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathFloor(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathRint(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRint(HInvoke* invoke) {
@@ -744,10 +738,9 @@ void IntrinsicCodeGeneratorARM64::VisitMathRint(HInvoke* invoke) {
__ Frintn(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0)));
}
-static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -791,7 +784,7 @@ static void GenMathRound(HInvoke* invoke, bool is_double, vixl::aarch64::MacroAs
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateFPToIntPlusFPTempLocations(arena_, invoke);
+ CreateFPToIntPlusFPTempLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -799,7 +792,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateFPToIntPlusFPTempLocations(arena_, invoke);
+ CreateFPToIntPlusFPTempLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -807,7 +800,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -817,7 +810,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -827,7 +820,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -837,7 +830,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -846,16 +839,15 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -865,7 +857,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -875,7 +867,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -885,7 +877,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -895,9 +887,8 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -949,15 +940,16 @@ static void GenUnsafeGet(HInvoke* invoke,
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -972,22 +964,22 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1009,10 +1001,9 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke)
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1020,31 +1011,31 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
static void GenUnsafePut(HInvoke* invoke,
@@ -1151,17 +1142,18 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1265,10 +1257,10 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARM64* cod
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
@@ -1277,7 +1269,7 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
return;
}
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -1295,11 +1287,12 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafeCASObject(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- invoke->InputAt(1)->CanBeNull()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -1526,9 +1519,8 @@ static const char* GetConstString(HInstruction* candidate, uint32_t* utf16_lengt
}
void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1754,9 +1746,8 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1774,9 +1765,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1792,9 +1782,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -1819,9 +1808,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke)
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -1841,9 +1829,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke)
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
@@ -1864,29 +1851,27 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke
__ Bind(slow_path->GetExitLabel());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType()));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
@@ -1901,7 +1886,7 @@ static void GenFPToFPCall(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARM64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCos(HInvoke* invoke) {
@@ -1909,7 +1894,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSin(HInvoke* invoke) {
@@ -1917,7 +1902,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathSin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAcos(HInvoke* invoke) {
@@ -1925,7 +1910,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAcos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAsin(HInvoke* invoke) {
@@ -1933,7 +1918,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAsin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAtan(HInvoke* invoke) {
@@ -1941,7 +1926,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAtan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCbrt(HInvoke* invoke) {
@@ -1949,7 +1934,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCbrt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCosh(HInvoke* invoke) {
@@ -1957,7 +1942,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCosh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathExp(HInvoke* invoke) {
@@ -1965,7 +1950,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathExp(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathExpm1(HInvoke* invoke) {
@@ -1973,7 +1958,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathExpm1(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathLog(HInvoke* invoke) {
@@ -1981,7 +1966,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathLog(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathLog10(HInvoke* invoke) {
@@ -1989,7 +1974,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathLog10(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSinh(HInvoke* invoke) {
@@ -1997,7 +1982,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathSinh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathTan(HInvoke* invoke) {
@@ -2005,7 +1990,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathTan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathTanh(HInvoke* invoke) {
@@ -2013,7 +1998,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathTanh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) {
@@ -2021,7 +2006,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathHypot(HInvoke* invoke) {
@@ -2029,7 +2014,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathHypot(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathNextAfter(HInvoke* invoke) {
@@ -2037,9 +2022,8 @@ void IntrinsicCodeGeneratorARM64::VisitMathNextAfter(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2189,10 +2173,9 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopyChar(HInvoke* invoke) {
}
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
- LocationSummary* locations = new (allocator) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(char[] src, int src_pos, char[] dst, int dst_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1));
@@ -2428,10 +2411,9 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) {
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
- LocationSummary* locations = new (allocator) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1));
@@ -2937,7 +2919,7 @@ static void GenIsInfinite(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -2945,7 +2927,7 @@ void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -3026,9 +3008,8 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARM64::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 5a6d180ed6..3533c88c67 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -39,8 +39,8 @@ class CodeGeneratorARM64;
class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
public:
- explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen)
- : arena_(arena), codegen_(codegen) {}
+ explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
+ : allocator_(allocator), codegen_(codegen) {}
// Define visitor methods.
@@ -57,7 +57,7 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorARM64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 7ce576c307..332306bebf 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -65,7 +65,7 @@ ArmVIXLAssembler* IntrinsicCodeGeneratorARMVIXL::GetAssembler() {
}
ArenaAllocator* IntrinsicCodeGeneratorARMVIXL::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
// Default slow-path for fallback (calling the managed code to handle the intrinsic) in an
@@ -246,7 +246,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
};
IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
- : arena_(codegen->GetGraph()->GetArena()),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -260,18 +260,16 @@ bool IntrinsicLocationsBuilderARMVIXL::TryDispatch(HInvoke* invoke) {
return res->Intrinsified();
}
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -297,10 +295,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, ArmVIXLAssembl
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -311,10 +309,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke)
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -324,26 +322,23 @@ void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
-static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -376,7 +371,7 @@ static void GenNumberOfLeadingZeros(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -384,7 +379,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* in
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -422,7 +417,7 @@ static void GenNumberOfTrailingZeros(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -430,7 +425,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* i
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -442,7 +437,7 @@ static void MathAbsFP(HInvoke* invoke, ArmVIXLAssembler* assembler) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
@@ -450,17 +445,16 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke, GetAssembler());
}
-static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -499,7 +493,7 @@ static void GenAbsInteger(LocationSummary* locations,
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
@@ -508,7 +502,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
@@ -575,17 +569,16 @@ static void GenMinMaxFloat(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* c
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
@@ -594,7 +587,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
@@ -654,7 +647,7 @@ static void GenMinMaxDouble(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL*
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -662,7 +655,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -708,17 +701,16 @@ static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assemb
}
}
-static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
@@ -726,7 +718,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -751,17 +743,16 @@ static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler)
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
@@ -769,7 +760,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -777,7 +768,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) {
@@ -787,7 +778,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) {
void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -799,9 +790,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -850,7 +840,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -860,7 +850,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -870,7 +860,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -891,7 +881,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -900,16 +890,15 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke)
__ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -918,7 +907,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -927,7 +916,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -941,7 +930,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -950,9 +939,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke)
}
void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1034,17 +1022,18 @@ static void GenUnsafeGet(HInvoke* invoke,
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1061,22 +1050,22 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
@@ -1098,14 +1087,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
const ArmInstructionSetFeatures& features,
DataType::Type type,
bool is_volatile,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1126,39 +1114,39 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* arena,
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
}
static void GenUnsafePut(LocationSummary* locations,
@@ -1284,17 +1272,18 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke)
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1427,7 +1416,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
@@ -1436,7 +1425,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
GenCas(invoke, DataType::Type::kInt32, codegen_);
@@ -1451,11 +1440,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
void IntrinsicLocationsBuilderARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- invoke->InputAt(1)->CanBeNull()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -1733,9 +1723,8 @@ static const char* GetConstString(HInstruction* candidate, uint32_t* utf16_lengt
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1974,9 +1963,8 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -1994,9 +1982,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -2012,9 +1999,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -2037,9 +2023,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromBytes(HInvoke* invok
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -2059,9 +2044,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromChars(HInvoke* invok
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(LocationFrom(r0));
@@ -2571,7 +2555,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
__ Bind(intrinsic_slow_path->GetExitLabel());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
// If the graph is debuggable, all callee-saved floating-point registers are blocked by
// the code generator. Furthermore, the register allocator creates fixed live intervals
// for all caller-saved registers because we are doing a function call. As a result, if
@@ -2585,9 +2569,8 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64);
DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
const InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -2597,7 +2580,7 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
// If the graph is debuggable, all callee-saved floating-point registers are blocked by
// the code generator. Furthermore, the register allocator creates fixed live intervals
// for all caller-saved registers because we are doing a function call. As a result, if
@@ -2612,9 +2595,8 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke)
DCHECK_EQ(invoke->InputAt(1)->GetType(), DataType::Type::kFloat64);
DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
const InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -2669,7 +2651,7 @@ static void GenFPFPToFPCall(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) {
@@ -2677,7 +2659,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) {
@@ -2685,7 +2667,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) {
@@ -2693,7 +2675,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) {
@@ -2701,7 +2683,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) {
@@ -2709,7 +2691,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) {
@@ -2717,7 +2699,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) {
@@ -2725,7 +2707,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) {
@@ -2733,7 +2715,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) {
@@ -2741,7 +2723,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) {
@@ -2749,7 +2731,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) {
@@ -2757,7 +2739,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) {
@@ -2765,7 +2747,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) {
@@ -2773,7 +2755,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) {
@@ -2781,7 +2763,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
@@ -2789,7 +2771,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) {
@@ -2797,7 +2779,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
@@ -2805,7 +2787,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
@@ -2814,7 +2796,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongReverse(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) {
@@ -2831,7 +2813,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -2840,7 +2822,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
@@ -2857,7 +2839,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
@@ -2894,7 +2876,7 @@ static void GenBitCount(HInvoke* instr, DataType::Type type, ArmVIXLAssembler* a
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
}
@@ -2961,7 +2943,7 @@ static void GenHighestOneBit(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2969,7 +2951,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -3026,7 +3008,7 @@ static void GenLowestOneBit(HInvoke* invoke,
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -3034,7 +3016,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
@@ -3042,9 +3024,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -3170,7 +3151,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -3188,7 +3169,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -3215,7 +3196,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -3227,7 +3208,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -3309,9 +3290,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index a4a2830211..4f18ca3fc1 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -46,7 +46,7 @@ class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGenerator* codegen_;
ArmVIXLAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 8847256532..5f2f71bd4d 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -35,7 +35,7 @@ namespace art {
namespace mips {
IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen)
- : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
}
MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() {
@@ -43,7 +43,7 @@ MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() {
}
ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() const {
@@ -152,10 +152,9 @@ bool IntrinsicLocationsBuilderMIPS::TryDispatch(HInvoke* invoke) {
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -178,7 +177,7 @@ static void MoveFPToInt(LocationSummary* locations, bool is64bit, MipsAssembler*
// long java.lang.Double.doubleToRawLongBits(double)
void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -187,17 +186,16 @@ void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke)
// int java.lang.Float.floatToRawIntBits(float)
void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -220,7 +218,7 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, MipsAssembler*
// double java.lang.Double.longBitsToDouble(long)
void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
@@ -229,19 +227,18 @@ void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
// float java.lang.Float.intBitsToFloat(int)
void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
Location::OutputOverlap overlaps = Location::kNoOutputOverlap) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), overlaps);
}
@@ -402,7 +399,7 @@ static void GenReverse(LocationSummary* locations,
// int java.lang.Integer.reverseBytes(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -416,7 +413,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
// long java.lang.Long.reverseBytes(long)
void IntrinsicLocationsBuilderMIPS::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) {
@@ -430,7 +427,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) {
// short java.lang.Short.reverseBytes(short)
void IntrinsicLocationsBuilderMIPS::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) {
@@ -474,7 +471,7 @@ static void GenNumberOfLeadingZeroes(LocationSummary* locations,
// int java.lang.Integer.numberOfLeadingZeros(int i)
void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -483,7 +480,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invok
// int java.lang.Long.numberOfLeadingZeros(long i)
void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -561,7 +558,7 @@ static void GenNumberOfTrailingZeroes(LocationSummary* locations,
// int java.lang.Integer.numberOfTrailingZeros(int i)
void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -570,7 +567,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invo
// int java.lang.Long.numberOfTrailingZeros(long i)
void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -579,7 +576,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke)
// int java.lang.Integer.reverse(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) {
@@ -593,7 +590,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) {
// long java.lang.Long.reverse(long)
void IntrinsicLocationsBuilderMIPS::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) {
@@ -605,10 +602,9 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) {
GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -725,7 +721,7 @@ static void GenBitCount(LocationSummary* locations,
// int java.lang.Integer.bitCount(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) {
@@ -734,9 +730,8 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) {
// int java.lang.Long.bitCount(int)
void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -801,7 +796,7 @@ static void MathAbsFP(LocationSummary* locations,
// double java.lang.Math.abs(double)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) {
@@ -810,7 +805,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) {
// float java.lang.Math.abs(float)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsFloat(HInvoke* invoke) {
@@ -847,7 +842,7 @@ static void GenAbsInteger(LocationSummary* locations, bool is64bit, MipsAssemble
// int java.lang.Math.abs(int)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) {
@@ -856,7 +851,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) {
// long java.lang.Math.abs(long)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsLong(HInvoke* invoke) {
@@ -1026,10 +1021,9 @@ static void GenMinMaxFP(LocationSummary* locations,
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap);
@@ -1037,7 +1031,7 @@ static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
// double java.lang.Math.min(double, double)
void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -1050,7 +1044,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
// float java.lang.Math.min(float, float)
void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -1063,7 +1057,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
// double java.lang.Math.max(double, double)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -1076,7 +1070,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
// float java.lang.Math.max(float, float)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -1087,10 +1081,9 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
GetAssembler());
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -1267,7 +1260,7 @@ static void GenMinMax(LocationSummary* locations,
// int java.lang.Math.min(int, int)
void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) {
@@ -1280,7 +1273,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) {
// long java.lang.Math.min(long, long)
void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) {
@@ -1293,7 +1286,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) {
// int java.lang.Math.max(int, int)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -1306,7 +1299,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
// long java.lang.Math.max(long, long)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -1319,7 +1312,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
// double java.lang.Math.sqrt(double)
void IntrinsicLocationsBuilderMIPS::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) {
@@ -1333,7 +1326,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) {
// byte libcore.io.Memory.peekByte(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1346,7 +1339,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
// short libcore.io.Memory.peekShort(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -1378,7 +1371,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
// int libcore.io.Memory.peekInt(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1396,7 +1389,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
// long libcore.io.Memory.peekLong(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1416,17 +1409,16 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
}
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
// void libcore.io.Memory.pokeByte(long address, byte value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1439,7 +1431,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
// void libcore.io.Memory.pokeShort(long address, short value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1461,7 +1453,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
// void libcore.io.Memory.pokeInt(long address, int value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1479,7 +1471,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
// void libcore.io.Memory.pokeLong(long address, long value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1501,9 +1493,8 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
// Thread java.lang.Thread.currentThread()
void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1517,17 +1508,18 @@ void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
Thread::PeerOffset<kMipsPointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1657,7 +1649,7 @@ static void GenUnsafeGet(HInvoke* invoke,
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
@@ -1666,7 +1658,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1675,7 +1667,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
@@ -1684,7 +1676,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1693,17 +1685,16 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
}
-static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1774,7 +1765,7 @@ static void GenUnsafePut(LocationSummary* locations,
// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
@@ -1788,7 +1779,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
@@ -1802,7 +1793,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
@@ -1816,7 +1807,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
@@ -1830,7 +1821,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
@@ -1844,7 +1835,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
@@ -1858,7 +1849,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
@@ -1872,7 +1863,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
@@ -1884,15 +1875,16 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2016,7 +2008,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* code
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -2031,7 +2023,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2044,9 +2036,8 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2071,9 +2062,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) {
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
@@ -2248,9 +2238,8 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -2273,9 +2262,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -2299,9 +2287,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2325,9 +2312,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke)
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2348,9 +2334,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke)
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
@@ -2411,7 +2396,7 @@ static void GenIsInfinite(LocationSummary* locations,
// boolean java.lang.Float.isInfinite(float)
void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -2420,7 +2405,7 @@ void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
// boolean java.lang.Double.isInfinite(double)
void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2476,7 +2461,7 @@ static void GenHighestOneBit(LocationSummary* locations,
// int java.lang.Integer.highestOneBit(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2485,7 +2470,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
// long java.lang.Long.highestOneBit(long)
void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2524,7 +2509,7 @@ static void GenLowestOneBit(LocationSummary* locations,
// int java.lang.Integer.lowestOneBit(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2533,7 +2518,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
// long java.lang.Long.lowestOneBit(long)
void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
@@ -2542,9 +2527,8 @@ void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
// int java.lang.Math.round(float)
void IntrinsicLocationsBuilderMIPS::VisitMathRoundFloat(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -2667,9 +2651,8 @@ void IntrinsicCodeGeneratorMIPS::VisitMathRoundFloat(HInvoke* invoke) {
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2757,20 +2740,18 @@ void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
@@ -2804,7 +2785,7 @@ static void GenFPFPToFPCall(HInvoke* invoke,
// static double java.lang.Math.cos(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) {
@@ -2813,7 +2794,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) {
// static double java.lang.Math.sin(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) {
@@ -2822,7 +2803,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) {
// static double java.lang.Math.acos(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) {
@@ -2831,7 +2812,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) {
// static double java.lang.Math.asin(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) {
@@ -2840,7 +2821,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) {
// static double java.lang.Math.atan(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) {
@@ -2849,7 +2830,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) {
// static double java.lang.Math.atan2(double y, double x)
void IntrinsicLocationsBuilderMIPS::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) {
@@ -2858,7 +2839,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) {
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) {
@@ -2867,7 +2848,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) {
// static double java.lang.Math.cosh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) {
@@ -2876,7 +2857,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) {
// static double java.lang.Math.exp(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) {
@@ -2885,7 +2866,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) {
// static double java.lang.Math.expm1(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) {
@@ -2894,7 +2875,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) {
// static double java.lang.Math.hypot(double x, double y)
void IntrinsicLocationsBuilderMIPS::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) {
@@ -2903,7 +2884,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) {
// static double java.lang.Math.log(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) {
@@ -2912,7 +2893,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) {
// static double java.lang.Math.log10(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) {
@@ -2921,7 +2902,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) {
// static double java.lang.Math.nextAfter(double start, double direction)
void IntrinsicLocationsBuilderMIPS::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) {
@@ -2930,7 +2911,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) {
// static double java.lang.Math.sinh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) {
@@ -2939,7 +2920,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) {
// static double java.lang.Math.tan(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) {
@@ -2948,7 +2929,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) {
// static double java.lang.Math.tanh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathTanh(HInvoke* invoke) {
@@ -2982,7 +2963,7 @@ void IntrinsicLocationsBuilderMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) {
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 05d1aa284a..afd9548a60 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -50,7 +50,7 @@ class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
private:
CodeGeneratorMIPS* codegen_;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
};
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index d0234d8271..8d5be80202 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -35,7 +35,7 @@ namespace art {
namespace mips64 {
IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen)
- : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
}
Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
@@ -43,7 +43,7 @@ Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
}
ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
#define __ codegen->GetAssembler()->
@@ -141,10 +141,9 @@ bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) {
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -162,7 +161,7 @@ static void MoveFPToInt(LocationSummary* locations, bool is64bit, Mips64Assemble
// long java.lang.Double.doubleToRawLongBits(double)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -171,17 +170,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invok
// int java.lang.Float.floatToRawIntBits(float)
void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -199,7 +197,7 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, Mips64Assemble
// double java.lang.Double.longBitsToDouble(long)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
@@ -208,17 +206,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke)
// float java.lang.Float.intBitsToFloat(int)
void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -250,7 +247,7 @@ static void GenReverseBytes(LocationSummary* locations,
// int java.lang.Integer.reverseBytes(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -259,7 +256,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
// long java.lang.Long.reverseBytes(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -268,7 +265,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
// short java.lang.Short.reverseBytes(short)
void IntrinsicLocationsBuilderMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -290,7 +287,7 @@ static void GenNumberOfLeadingZeroes(LocationSummary* locations,
// int java.lang.Integer.numberOfLeadingZeros(int i)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -299,7 +296,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* inv
// int java.lang.Long.numberOfLeadingZeros(long i)
void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -327,7 +324,7 @@ static void GenNumberOfTrailingZeroes(LocationSummary* locations,
// int java.lang.Integer.numberOfTrailingZeros(int i)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -336,7 +333,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* in
// int java.lang.Long.numberOfTrailingZeros(long i)
void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -364,7 +361,7 @@ static void GenReverse(LocationSummary* locations,
// int java.lang.Integer.reverse(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) {
@@ -373,17 +370,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) {
// long java.lang.Long.reverse(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongReverse(HInvoke* invoke) {
GenReverse(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -458,7 +454,7 @@ static void GenBitCount(LocationSummary* locations,
// int java.lang.Integer.bitCount(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -467,7 +463,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
// int java.lang.Long.bitCount(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) {
@@ -487,7 +483,7 @@ static void MathAbsFP(LocationSummary* locations, bool is64bit, Mips64Assembler*
// double java.lang.Math.abs(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -496,17 +492,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
// float java.lang.Math.abs(float)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToInt(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToInt(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -528,7 +523,7 @@ static void GenAbsInteger(LocationSummary* locations, bool is64bit, Mips64Assemb
// int java.lang.Math.abs(int)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToInt(arena_, invoke);
+ CreateIntToInt(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsInt(HInvoke* invoke) {
@@ -537,7 +532,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAbsInt(HInvoke* invoke) {
// long java.lang.Math.abs(long)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToInt(arena_, invoke);
+ CreateIntToInt(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsLong(HInvoke* invoke) {
@@ -613,10 +608,9 @@ static void GenMinMaxFP(LocationSummary* locations,
__ Bind(&done);
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
@@ -624,7 +618,7 @@ static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
// double java.lang.Math.min(double, double)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -633,7 +627,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
// float java.lang.Math.min(float, float)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -642,7 +636,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
// double java.lang.Math.max(double, double)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -651,7 +645,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
// float java.lang.Math.max(float, float)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -716,10 +710,9 @@ static void GenMinMax(LocationSummary* locations,
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -727,7 +720,7 @@ static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
// int java.lang.Math.min(int, int)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -736,7 +729,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
// long java.lang.Math.min(long, long)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -745,7 +738,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
// int java.lang.Math.max(int, int)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -754,7 +747,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
// long java.lang.Math.max(long, long)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -763,7 +756,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
// double java.lang.Math.sqrt(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) {
@@ -775,19 +768,18 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) {
__ SqrtD(out, in);
}
-static void CreateFPToFP(ArenaAllocator* arena,
+static void CreateFPToFP(ArenaAllocator* allocator,
HInvoke* invoke,
Location::OutputOverlap overlaps = Location::kOutputOverlap) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), overlaps);
}
// double java.lang.Math.rint(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap);
+ CreateFPToFP(allocator_, invoke, Location::kNoOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
@@ -801,7 +793,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
// double java.lang.Math.floor(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathFloor(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke);
+ CreateFPToFP(allocator_, invoke);
}
const constexpr uint16_t kFPLeaveUnchanged = kPositiveZero |
@@ -878,7 +870,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
// double java.lang.Math.ceil(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke);
+ CreateFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
@@ -961,9 +953,8 @@ static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Dat
// int java.lang.Math.round(float)
void IntrinsicLocationsBuilderMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -975,9 +966,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
// long java.lang.Math.round(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -989,7 +979,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
// byte libcore.io.Memory.peekByte(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1002,7 +992,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
// short libcore.io.Memory.peekShort(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -1015,7 +1005,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
// int libcore.io.Memory.peekInt(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1028,7 +1018,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
// long libcore.io.Memory.peekLong(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1039,17 +1029,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
__ Ld(out, adr, 0);
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
// void libcore.io.Memory.pokeByte(long address, byte value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1062,7 +1051,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
// void libcore.io.Memory.pokeShort(long address, short value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1075,7 +1064,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
// void libcore.io.Memory.pokeInt(long address, int value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1088,7 +1077,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
// void libcore.io.Memory.pokeLong(long address, long value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1101,9 +1090,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
// Thread java.lang.Thread.currentThread()
void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1117,17 +1105,18 @@ void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1227,7 +1216,7 @@ static void GenUnsafeGet(HInvoke* invoke,
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1236,7 +1225,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1245,7 +1234,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
@@ -1254,7 +1243,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
@@ -1263,7 +1252,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1272,17 +1261,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1341,7 +1329,7 @@ static void GenUnsafePut(LocationSummary* locations,
// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
@@ -1354,7 +1342,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
@@ -1367,7 +1355,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
@@ -1380,7 +1368,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
@@ -1393,7 +1381,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
@@ -1406,7 +1394,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke)
// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
@@ -1419,7 +1407,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke)
// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
@@ -1432,7 +1420,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
@@ -1445,7 +1433,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
@@ -1456,15 +1444,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1583,7 +1572,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS64* co
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -1592,7 +1581,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
// boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
@@ -1607,7 +1596,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -1620,9 +1609,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1648,9 +1636,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
@@ -1814,9 +1801,8 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1835,9 +1821,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1855,9 +1840,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1883,9 +1867,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1907,9 +1890,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
@@ -1948,7 +1930,7 @@ static void GenIsInfinite(LocationSummary* locations,
// boolean java.lang.Float.isInfinite(float)
void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -1957,7 +1939,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
// boolean java.lang.Double.isInfinite(double)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -1966,9 +1948,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2083,7 +2064,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke)
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -2277,7 +2258,7 @@ static void GenHighestOneBit(LocationSummary* locations,
// int java.lang.Integer.highestOneBit(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2286,7 +2267,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
// long java.lang.Long.highestOneBit(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2311,7 +2292,7 @@ static void GenLowestOneBit(LocationSummary* locations,
// int java.lang.Integer.lowestOneBit(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2320,27 +2301,25 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
// long java.lang.Long.lowestOneBit(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
@@ -2376,7 +2355,7 @@ static void GenFPFPToFPCall(HInvoke* invoke,
// static double java.lang.Math.cos(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) {
@@ -2385,7 +2364,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) {
// static double java.lang.Math.sin(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) {
@@ -2394,7 +2373,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) {
// static double java.lang.Math.acos(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) {
@@ -2403,7 +2382,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) {
// static double java.lang.Math.asin(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) {
@@ -2412,7 +2391,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) {
// static double java.lang.Math.atan(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) {
@@ -2421,7 +2400,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) {
// static double java.lang.Math.atan2(double y, double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
@@ -2430,7 +2409,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) {
@@ -2439,7 +2418,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) {
// static double java.lang.Math.cosh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) {
@@ -2448,7 +2427,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) {
// static double java.lang.Math.exp(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) {
@@ -2457,7 +2436,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) {
// static double java.lang.Math.expm1(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) {
@@ -2466,7 +2445,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) {
// static double java.lang.Math.hypot(double x, double y)
void IntrinsicLocationsBuilderMIPS64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) {
@@ -2475,7 +2454,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) {
// static double java.lang.Math.log(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) {
@@ -2484,7 +2463,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) {
// static double java.lang.Math.log10(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) {
@@ -2493,7 +2472,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) {
// static double java.lang.Math.nextAfter(double start, double direction)
void IntrinsicLocationsBuilderMIPS64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) {
@@ -2502,7 +2481,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) {
// static double java.lang.Math.sinh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) {
@@ -2511,7 +2490,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) {
// static double java.lang.Math.tan(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) {
@@ -2520,7 +2499,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) {
// static double java.lang.Math.tanh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathTanh(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 6880a255c3..6085c7b29c 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -50,7 +50,7 @@ class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor {
private:
CodeGeneratorMIPS64* codegen_;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
};
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index a5916228a8..8b389ba876 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -46,7 +46,7 @@ static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000);
static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
- : arena_(codegen->GetGraph()->GetArena()),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen) {
}
@@ -56,7 +56,7 @@ X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() {
}
ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) {
@@ -175,10 +175,9 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode {
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
if (is64bit) {
@@ -186,10 +185,9 @@ static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool
}
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
if (is64bit) {
@@ -230,10 +228,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler*
}
void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke, /* is64bit */ true);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
}
void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke, /* is64bit */ true);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
}
void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -244,10 +242,10 @@ void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke, /* is64bit */ false);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
}
void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke, /* is64bit */ false);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
}
void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -257,26 +255,23 @@ void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-static void CreateLongToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -302,7 +297,7 @@ static void GenReverseBytes(LocationSummary* locations,
}
void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -310,7 +305,7 @@ void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitLongReverseBytes(HInvoke* invoke) {
- CreateLongToLongLocations(arena_, invoke);
+ CreateLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) {
@@ -331,7 +326,7 @@ void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) {
@@ -342,11 +337,10 @@ void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) {
// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
// need is 64b.
-static void CreateFloatToFloat(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFloatToFloat(ArenaAllocator* allocator, HInvoke* invoke) {
// TODO: Enable memory operations when the assembler supports them.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
@@ -401,7 +395,7 @@ static void MathAbsFP(HInvoke* invoke,
}
void IntrinsicLocationsBuilderX86::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFloatToFloat(arena_, invoke);
+ CreateFloatToFloat(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
@@ -409,17 +403,16 @@ void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFloatToFloat(arena_, invoke);
+ CreateFloatToFloat(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke, /* is64bit */ false, GetAssembler(), codegen_);
}
-static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateAbsIntLocation(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RegisterLocation(EAX));
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RegisterLocation(EDX));
@@ -444,10 +437,9 @@ static void GenAbsInteger(LocationSummary* locations, X86Assembler* assembler) {
// The result is in EAX.
}
-static void CreateAbsLongLocation(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateAbsLongLocation(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
locations->AddTemp(Location::RequiresRegister());
@@ -480,7 +472,7 @@ static void GenAbsLong(LocationSummary* locations, X86Assembler* assembler) {
}
void IntrinsicLocationsBuilderX86::VisitMathAbsInt(HInvoke* invoke) {
- CreateAbsIntLocation(arena_, invoke);
+ CreateAbsIntLocation(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) {
@@ -488,7 +480,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathAbsLong(HInvoke* invoke) {
- CreateAbsLongLocation(arena_, invoke);
+ CreateAbsLongLocation(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsLong(HInvoke* invoke) {
@@ -598,10 +590,9 @@ static void GenMinMaxFP(HInvoke* invoke,
__ Bind(&done);
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
// The following is sub-optimal, but all we can do for now. It would be fine to also accept
@@ -616,7 +607,7 @@ static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -628,7 +619,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -640,7 +631,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -652,7 +643,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -718,19 +709,17 @@ static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
@@ -739,7 +728,7 @@ static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke
}
void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
@@ -747,7 +736,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
@@ -755,7 +744,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -763,23 +752,22 @@ void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) {
GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
void IntrinsicLocationsBuilderX86::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) {
@@ -805,18 +793,18 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke)
}
}
-static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86* codegen) {
+static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- CreateFPToFPLocations(arena, invoke);
+ CreateFPToFPLocations(allocator, invoke);
return;
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -839,7 +827,7 @@ static void GenSSE41FPToFPIntrinsic(CodeGeneratorX86* codegen,
}
void IntrinsicLocationsBuilderX86::VisitMathCeil(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) {
@@ -847,7 +835,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathFloor(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) {
@@ -855,7 +843,7 @@ void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathRint(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) {
@@ -867,9 +855,8 @@ void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
DCHECK(static_or_direct != nullptr);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
if (static_or_direct->HasSpecialInput() &&
invoke->InputAt(
@@ -883,8 +870,8 @@ void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -951,11 +938,9 @@ void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) {
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -992,7 +977,7 @@ static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorX86* codegen, QuickEntry
}
void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCos(HInvoke* invoke) {
@@ -1000,7 +985,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSin(HInvoke* invoke) {
@@ -1008,7 +993,7 @@ void IntrinsicCodeGeneratorX86::VisitMathSin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAcos(HInvoke* invoke) {
@@ -1016,7 +1001,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAcos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAsin(HInvoke* invoke) {
@@ -1024,7 +1009,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAsin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAtan(HInvoke* invoke) {
@@ -1032,7 +1017,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAtan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCbrt(HInvoke* invoke) {
@@ -1040,7 +1025,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCbrt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCosh(HInvoke* invoke) {
@@ -1048,7 +1033,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCosh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathExp(HInvoke* invoke) {
@@ -1056,7 +1041,7 @@ void IntrinsicCodeGeneratorX86::VisitMathExp(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathExpm1(HInvoke* invoke) {
@@ -1064,7 +1049,7 @@ void IntrinsicCodeGeneratorX86::VisitMathExpm1(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathLog(HInvoke* invoke) {
@@ -1072,7 +1057,7 @@ void IntrinsicCodeGeneratorX86::VisitMathLog(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathLog10(HInvoke* invoke) {
@@ -1080,7 +1065,7 @@ void IntrinsicCodeGeneratorX86::VisitMathLog10(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSinh(HInvoke* invoke) {
@@ -1088,7 +1073,7 @@ void IntrinsicCodeGeneratorX86::VisitMathSinh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathTan(HInvoke* invoke) {
@@ -1096,18 +1081,16 @@ void IntrinsicCodeGeneratorX86::VisitMathTan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
@@ -1115,7 +1098,7 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) {
@@ -1123,7 +1106,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathHypot(HInvoke* invoke) {
@@ -1131,7 +1114,7 @@ void IntrinsicCodeGeneratorX86::VisitMathHypot(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathNextAfter(HInvoke* invoke) {
@@ -1174,7 +1157,7 @@ void IntrinsicLocationsBuilderX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -1336,9 +1319,8 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1363,9 +1345,8 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1655,7 +1636,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
@@ -1663,7 +1644,7 @@ void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1672,9 +1653,8 @@ void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1699,9 +1679,8 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1721,9 +1700,8 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -1746,9 +1724,8 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke)
void IntrinsicLocationsBuilderX86::VisitStringGetCharsNoCheck(HInvoke* invoke) {
// public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
// Place srcEnd in ECX to save a move below.
@@ -1875,7 +1852,7 @@ static void GenPeek(LocationSummary* locations, DataType::Type size, X86Assemble
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1883,7 +1860,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1891,7 +1868,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateLongToLongLocations(arena_, invoke);
+ CreateLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1899,18 +1876,18 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
}
-static void CreateLongIntToVoidLocations(ArenaAllocator* arena, DataType::Type size,
+static void CreateLongIntToVoidLocations(ArenaAllocator* allocator,
+ DataType::Type size,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
HInstruction* value = invoke->InputAt(1);
if (size == DataType::Type::kInt8) {
@@ -1967,7 +1944,7 @@ static void GenPoke(LocationSummary* locations, DataType::Type size, X86Assemble
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt8, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt8, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1975,7 +1952,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1983,7 +1960,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1991,7 +1968,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt16, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt16, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1999,9 +1976,8 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -2071,18 +2047,19 @@ static void GenUnsafeGet(HInvoke* invoke,
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type,
bool is_volatile) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2104,23 +2081,26 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- arena_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
}
@@ -2144,13 +2124,12 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
}
-static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke,
bool is_volatile) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2168,39 +2147,39 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -2282,17 +2261,18 @@ void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
// Offset is a long, but in 32 bit mode, we only need the low word.
@@ -2320,11 +2300,11 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2334,7 +2314,7 @@ void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
return;
}
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke);
}
static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
@@ -2473,9 +2453,8 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitIntegerReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2516,9 +2495,8 @@ void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitLongReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2553,15 +2531,14 @@ void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) {
}
static void CreateBitCountLocations(
- ArenaAllocator* arena, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) {
+ ArenaAllocator* allocator, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) {
if (!codegen->GetInstructionSetFeatures().HasPopCnt()) {
// Do nothing if there is no popcnt support. This results in generating
// a call for the intrinsic rather than direct code.
return;
}
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->AddTemp(Location::RequiresRegister());
}
@@ -2610,7 +2587,7 @@ static void GenBitCount(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ false);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
@@ -2618,17 +2595,16 @@ void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ true);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->SetInAt(0, Location::RequiresRegister());
} else {
@@ -2715,7 +2691,7 @@ static void GenLeadingZeros(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke, /* is_long */ false);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2723,17 +2699,16 @@ void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke, /* is_long */ true);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->SetInAt(0, Location::RequiresRegister());
} else {
@@ -2807,7 +2782,7 @@ static void GenTrailingZeros(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke, /* is_long */ false);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2815,7 +2790,7 @@ void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invok
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke, /* is_long */ true);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -3352,9 +3327,8 @@ void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index 22f11b1d34..ba3ca0a410 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -49,7 +49,7 @@ class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorX86* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index a2545ee3d8..6337900b71 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -41,7 +41,7 @@ namespace art {
namespace x86_64 {
IntrinsicLocationsBuilderX86_64::IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen)
- : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
+ : allocator_(codegen->GetGraph()->GetAllocator()), codegen_(codegen) {
}
X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
@@ -49,7 +49,7 @@ X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
}
ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
bool IntrinsicLocationsBuilderX86_64::TryDispatch(HInvoke* invoke) {
@@ -128,18 +128,16 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -157,10 +155,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86_64Assemble
}
void IntrinsicLocationsBuilderX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -171,10 +169,10 @@ void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke)
}
void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -184,10 +182,9 @@ void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -216,7 +213,7 @@ static void GenReverseBytes(LocationSummary* locations,
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -224,7 +221,7 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -232,7 +229,7 @@ void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -243,11 +240,10 @@ void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) {
// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
// need is 64b.
-static void CreateFloatToFloatPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFloatToFloatPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
// TODO: Enable memory operations when the assembler supports them.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresFpuRegister()); // FP reg to hold mask.
@@ -275,7 +271,7 @@ static void MathAbsFP(LocationSummary* locations,
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFloatToFloatPlusTemps(arena_, invoke);
+ CreateFloatToFloatPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -283,17 +279,16 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAbsDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFloatToFloatPlusTemps(arena_, invoke);
+ CreateFloatToFloatPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler(), codegen_);
}
-static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -322,7 +317,7 @@ static void GenAbsInteger(LocationSummary* locations, bool is64bit, X86_64Assemb
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsInt(HInvoke* invoke) {
@@ -330,7 +325,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAbsInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsLong(HInvoke* invoke) {
@@ -421,10 +416,9 @@ static void GenMinMaxFP(LocationSummary* locations,
__ Bind(&done);
}
-static void CreateFPFPToFP(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFP(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
// The following is sub-optimal, but all we can do for now. It would be fine to also accept
@@ -433,7 +427,7 @@ static void CreateFPFPToFP(ArenaAllocator* arena, HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -442,7 +436,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -451,7 +445,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -460,7 +454,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -500,17 +494,16 @@ static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
__ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, is_long);
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -518,7 +511,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -526,7 +519,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -534,23 +527,22 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
void IntrinsicLocationsBuilderX86_64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSqrt(HInvoke* invoke) {
@@ -576,18 +568,18 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86_64* codegen, HInvoke* invo
}
}
-static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86_64* codegen) {
+static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- CreateFPToFPLocations(arena, invoke);
+ CreateFPToFPLocations(allocator, invoke);
return;
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -610,7 +602,7 @@ static void GenSSE41FPToFPIntrinsic(CodeGeneratorX86_64* codegen,
}
void IntrinsicLocationsBuilderX86_64::VisitMathCeil(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCeil(HInvoke* invoke) {
@@ -618,7 +610,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCeil(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathFloor(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathFloor(HInvoke* invoke) {
@@ -626,21 +618,20 @@ void IntrinsicCodeGeneratorX86_64::VisitMathFloor(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathRint(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRint(HInvoke* invoke) {
GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
}
-static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86_64* codegen) {
+static void CreateSSE41FPToIntLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -649,8 +640,8 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(RAX));
@@ -659,7 +650,7 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -703,7 +694,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -746,11 +737,9 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -773,7 +762,7 @@ static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorX86_64* codegen,
}
void IntrinsicLocationsBuilderX86_64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCos(HInvoke* invoke) {
@@ -781,7 +770,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSin(HInvoke* invoke) {
@@ -789,7 +778,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathSin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAcos(HInvoke* invoke) {
@@ -797,7 +786,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAcos(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAsin(HInvoke* invoke) {
@@ -805,7 +794,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAsin(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAtan(HInvoke* invoke) {
@@ -813,7 +802,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAtan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCbrt(HInvoke* invoke) {
@@ -821,7 +810,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCbrt(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCosh(HInvoke* invoke) {
@@ -829,7 +818,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCosh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathExp(HInvoke* invoke) {
@@ -837,7 +826,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathExp(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathExpm1(HInvoke* invoke) {
@@ -845,7 +834,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathExpm1(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathLog(HInvoke* invoke) {
@@ -853,7 +842,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathLog(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathLog10(HInvoke* invoke) {
@@ -861,7 +850,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathLog10(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSinh(HInvoke* invoke) {
@@ -869,7 +858,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathSinh(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathTan(HInvoke* invoke) {
@@ -877,18 +866,16 @@ void IntrinsicCodeGeneratorX86_64::VisitMathTan(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
@@ -903,7 +890,7 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86_64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) {
@@ -911,7 +898,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathHypot(HInvoke* invoke) {
@@ -919,7 +906,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathHypot(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathNextAfter(HInvoke* invoke) {
@@ -949,9 +936,8 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke)
}
}
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -1507,9 +1493,8 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1534,9 +1519,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1812,7 +1796,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
@@ -1820,7 +1804,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1829,9 +1813,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1856,9 +1839,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1878,9 +1860,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(RAX));
@@ -1903,9 +1884,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invok
void IntrinsicLocationsBuilderX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
// public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
locations->SetInAt(2, Location::RequiresRegister());
@@ -2018,7 +1998,7 @@ static void GenPeek(LocationSummary* locations, DataType::Type size, X86_64Assem
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -2026,7 +2006,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -2034,7 +2014,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -2042,17 +2022,16 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrInt32Constant(invoke->InputAt(1)));
}
@@ -2104,7 +2083,7 @@ static void GenPoke(LocationSummary* locations, DataType::Type size, X86_64Assem
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -2112,7 +2091,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -2120,7 +2099,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -2128,7 +2107,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -2136,9 +2115,8 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -2194,15 +2172,16 @@ static void GenUnsafeGet(HInvoke* invoke,
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2214,22 +2193,22 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
@@ -2253,12 +2232,11 @@ void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke)
}
-static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2271,31 +2249,31 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -2363,17 +2341,18 @@ void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2391,11 +2370,11 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2405,7 +2384,7 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
return;
}
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke);
}
static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) {
@@ -2537,9 +2516,8 @@ void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2580,9 +2558,8 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerReverse(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitLongReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2625,15 +2602,14 @@ void IntrinsicCodeGeneratorX86_64::VisitLongReverse(HInvoke* invoke) {
}
static void CreateBitCountLocations(
- ArenaAllocator* arena, CodeGeneratorX86_64* codegen, HInvoke* invoke) {
+ ArenaAllocator* allocator, CodeGeneratorX86_64* codegen, HInvoke* invoke) {
if (!codegen->GetInstructionSetFeatures().HasPopCnt()) {
// Do nothing if there is no popcnt support. This results in generating
// a call for the intrinsic rather than direct code.
return;
}
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2672,7 +2648,7 @@ static void GenBitCount(X86_64Assembler* assembler,
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke);
+ CreateBitCountLocations(allocator_, codegen_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -2680,17 +2656,16 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke);
+ CreateBitCountLocations(allocator_, codegen_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateOneBitLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_high) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(is_high ? Location::RegisterLocation(RCX) // needs CL
@@ -2787,7 +2762,7 @@ static void GenOneBit(X86_64Assembler* assembler,
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2795,7 +2770,7 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2803,7 +2778,7 @@ void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2811,17 +2786,16 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
}
-static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2877,7 +2851,7 @@ static void GenLeadingZeros(X86_64Assembler* assembler,
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke);
+ CreateLeadingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2885,17 +2859,16 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* inv
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke);
+ CreateLeadingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2946,7 +2919,7 @@ static void GenTrailingZeros(X86_64Assembler* assembler,
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke);
+ CreateTrailingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2954,7 +2927,7 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* in
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke);
+ CreateTrailingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -3029,9 +3002,8 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 4b287886af..b0fbe91a75 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -49,7 +49,7 @@ class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor {
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorX86_64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 10524b0ae6..7af1a20f98 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -84,10 +84,10 @@ void LICM::Run() {
// Only used during debug.
ArenaBitVector* visited = nullptr;
if (kIsDebugBuild) {
- visited = new (graph_->GetArena()) ArenaBitVector(graph_->GetArena(),
- graph_->GetBlocks().size(),
- false,
- kArenaAllocLICM);
+ visited = new (graph_->GetAllocator()) ArenaBitVector(graph_->GetAllocator(),
+ graph_->GetBlocks().size(),
+ false,
+ kArenaAllocLICM);
}
// Post order visit to visit inner loops before outer loops.
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 0617e60cfe..adc3cabe87 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -27,12 +27,10 @@ namespace art {
/**
* Fixture class for the LICM tests.
*/
-class LICMTest : public CommonCompilerTest {
+class LICMTest : public OptimizingUnitTest {
public:
LICMTest()
- : pool_(),
- allocator_(&pool_),
- entry_(nullptr),
+ : entry_(nullptr),
loop_preheader_(nullptr),
loop_header_(nullptr),
loop_body_(nullptr),
@@ -41,7 +39,7 @@ class LICMTest : public CommonCompilerTest {
parameter_(nullptr),
int_constant_(nullptr),
float_constant_(nullptr) {
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph();
}
~LICMTest() { }
@@ -49,12 +47,12 @@ class LICMTest : public CommonCompilerTest {
// Builds a singly-nested loop structure in CFG. Tests can further populate
// the basic blocks with instructions to set up interesting scenarios.
void BuildLoop() {
- entry_ = new (&allocator_) HBasicBlock(graph_);
- loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
- loop_header_ = new (&allocator_) HBasicBlock(graph_);
- loop_body_ = new (&allocator_) HBasicBlock(graph_);
- return_ = new (&allocator_) HBasicBlock(graph_);
- exit_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_header_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_body_ = new (GetAllocator()) HBasicBlock(graph_);
+ return_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
graph_->AddBlock(loop_preheader_);
@@ -75,18 +73,18 @@ class LICMTest : public CommonCompilerTest {
return_->AddSuccessor(exit_);
// Provide boiler-plate instructions.
- parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry_->AddInstruction(parameter_);
int_constant_ = graph_->GetIntConstant(42);
float_constant_ = graph_->GetFloatConstant(42.0f);
- loop_preheader_->AddInstruction(new (&allocator_) HGoto());
- loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
- loop_body_->AddInstruction(new (&allocator_) HGoto());
- return_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_->AddInstruction(new (&allocator_) HExit());
+ loop_preheader_->AddInstruction(new (GetAllocator()) HGoto());
+ loop_header_->AddInstruction(new (GetAllocator()) HIf(parameter_));
+ loop_body_->AddInstruction(new (GetAllocator()) HGoto());
+ return_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_->AddInstruction(new (GetAllocator()) HExit());
}
// Performs LICM optimizations (after proper set up).
@@ -98,8 +96,6 @@ class LICMTest : public CommonCompilerTest {
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
// Specific basic blocks.
@@ -123,17 +119,17 @@ TEST_F(LICMTest, FieldHoisting) {
BuildLoop();
// Populate the loop with instructions: set/get field with different types.
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
- HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(
parameter_, int_constant_, nullptr, DataType::Type::kInt32, MemberOffset(20),
false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
@@ -150,26 +146,26 @@ TEST_F(LICMTest, NoFieldHoisting) {
// Populate the loop with instructions: set/get field with same types.
ScopedNullHandle<mirror::DexCache> dex_cache;
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
- HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
- get_field,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(parameter_,
+ get_field,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -183,10 +179,10 @@ TEST_F(LICMTest, ArrayHoisting) {
BuildLoop();
// Populate the loop with instructions: set/get array with different types.
- HInstruction* get_array = new (&allocator_) HArrayGet(
+ HInstruction* get_array = new (GetAllocator()) HArrayGet(
parameter_, int_constant_, DataType::Type::kInt32, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
+ HInstruction* set_array = new (GetAllocator()) HArraySet(
parameter_, int_constant_, float_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
@@ -201,10 +197,10 @@ TEST_F(LICMTest, NoArrayHoisting) {
BuildLoop();
// Populate the loop with instructions: set/get array with same types.
- HInstruction* get_array = new (&allocator_) HArrayGet(
+ HInstruction* get_array = new (GetAllocator()) HArrayGet(
parameter_, int_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
+ HInstruction* set_array = new (GetAllocator()) HArraySet(
parameter_, get_array, float_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc
index 80cecd41dc..58e00a810d 100644
--- a/compiler/optimizing/linear_order.cc
+++ b/compiler/optimizing/linear_order.cc
@@ -16,6 +16,9 @@
#include "linear_order.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
+
namespace art {
static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
@@ -34,7 +37,8 @@ static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
}
// Helper method to update work list for linear order.
-static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
+static void AddToListForLinearization(ScopedArenaVector<HBasicBlock*>* worklist,
+ HBasicBlock* block) {
HLoopInformation* block_loop = block->GetLoopInformation();
auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position.
for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
@@ -51,7 +55,7 @@ static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasi
}
// Helper method to validate linear order.
-static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock*>* linear_order) {
+static bool IsLinearOrderWellFormed(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) {
for (HBasicBlock* header : graph->GetBlocks()) {
if (header == nullptr || !header->IsLoopHeader()) {
continue;
@@ -59,7 +63,7 @@ static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock
HLoopInformation* loop = header->GetLoopInformation();
size_t num_blocks = loop->GetBlocks().NumSetBits();
size_t found_blocks = 0u;
- for (HBasicBlock* block : *linear_order) {
+ for (HBasicBlock* block : linear_order) {
if (loop->Contains(*block)) {
found_blocks++;
if (found_blocks == 1u && block != header) {
@@ -79,10 +83,8 @@ static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock
return true;
}
-void LinearizeGraph(const HGraph* graph,
- ArenaAllocator* allocator,
- ArenaVector<HBasicBlock*>* linear_order) {
- DCHECK(linear_order->empty());
+void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) {
+ DCHECK_EQ(linear_order.size(), graph->GetReversePostOrder().size());
// Create a reverse post ordering with the following properties:
// - Blocks in a loop are consecutive,
// - Back-edge is the last block before loop exits.
@@ -92,8 +94,9 @@ void LinearizeGraph(const HGraph* graph,
// current reverse post order in the graph, but it would require making
// order queries to a GrowableArray, which is not the best data structure
// for it.
- ArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
- allocator->Adapter(kArenaAllocLinearOrder));
+ ScopedArenaAllocator allocator(graph->GetArenaStack());
+ ScopedArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
+ allocator.Adapter(kArenaAllocLinearOrder));
for (HBasicBlock* block : graph->GetReversePostOrder()) {
size_t number_of_forward_predecessors = block->GetPredecessors().size();
if (block->IsLoopHeader()) {
@@ -105,13 +108,14 @@ void LinearizeGraph(const HGraph* graph,
// iterate over the successors. When all non-back edge predecessors of a
// successor block are visited, the successor block is added in the worklist
// following an order that satisfies the requirements to build our linear graph.
- linear_order->reserve(graph->GetReversePostOrder().size());
- ArenaVector<HBasicBlock*> worklist(allocator->Adapter(kArenaAllocLinearOrder));
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocLinearOrder));
worklist.push_back(graph->GetEntryBlock());
+ size_t num_added = 0u;
do {
HBasicBlock* current = worklist.back();
worklist.pop_back();
- linear_order->push_back(current);
+ linear_order[num_added] = current;
+ ++num_added;
for (HBasicBlock* successor : current->GetSuccessors()) {
int block_id = successor->GetBlockId();
size_t number_of_remaining_predecessors = forward_predecessors[block_id];
@@ -121,6 +125,7 @@ void LinearizeGraph(const HGraph* graph,
forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
}
} while (!worklist.empty());
+ DCHECK_EQ(num_added, linear_order.size());
DCHECK(graph->HasIrreducibleLoops() || IsLinearOrderWellFormed(graph, linear_order));
}
diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h
index 7122d67be9..151db001e1 100644
--- a/compiler/optimizing/linear_order.h
+++ b/compiler/optimizing/linear_order.h
@@ -17,10 +17,14 @@
#ifndef ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
#define ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
+#include <type_traits>
+
#include "nodes.h"
namespace art {
+void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order);
+
// Linearizes the 'graph' such that:
// (1): a block is always after its dominator,
// (2): blocks of loops are contiguous.
@@ -32,9 +36,15 @@ namespace art {
//
// for (HBasicBlock* block : ReverseRange(linear_order)) // linear post order
//
-void LinearizeGraph(const HGraph* graph,
- ArenaAllocator* allocator,
- ArenaVector<HBasicBlock*>* linear_order);
+template <typename Vector>
+void LinearizeGraph(const HGraph* graph, Vector* linear_order) {
+ static_assert(std::is_same<HBasicBlock*, typename Vector::value_type>::value,
+ "Vector::value_type must be HBasicBlock*.");
+ // Resize the vector and pass an ArrayRef<> to internal implementation which is shared
+ // for all kinds of vectors, i.e. ArenaVector<> or ScopedArenaVector<>.
+ linear_order->resize(graph->GetReversePostOrder().size());
+ LinearizeGraphInternal(graph, ArrayRef<HBasicBlock*>(*linear_order));
+}
} // namespace art
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 3831aa6c91..e82fab9b46 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -32,13 +32,16 @@
namespace art {
-class LinearizeTest : public CommonCompilerTest {};
+class LinearizeTest : public OptimizingUnitTest {
+ protected:
+ template <size_t number_of_blocks>
+ void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]);
+};
template <size_t number_of_blocks>
-static void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void LinearizeTest::TestCode(const uint16_t* data,
+ const uint32_t (&expected_order)[number_of_blocks]) {
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index f9a955fb0a..8087fe0988 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -29,10 +29,13 @@
namespace art {
-class LiveRangesTest : public CommonCompilerTest {};
+class LiveRangesTest : public OptimizingUnitTest {
+ public:
+ HGraph* BuildGraph(const uint16_t* data);
+};
-static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = CreateCFG(allocator, data);
+HGraph* LiveRangesTest::BuildGraph(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
@@ -58,9 +61,7 @@ TEST_F(LiveRangesTest, CFG1) {
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
@@ -107,9 +108,7 @@ TEST_F(LiveRangesTest, CFG2) {
Instruction::GOTO | 0x100,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -158,9 +157,7 @@ TEST_F(LiveRangesTest, CFG3) {
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -236,9 +233,7 @@ TEST_F(LiveRangesTest, Loop1) {
Instruction::CONST_4 | 5 << 12 | 1 << 8,
Instruction::RETURN | 1 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
RemoveSuspendChecks(graph);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
@@ -316,9 +311,7 @@ TEST_F(LiveRangesTest, Loop2) {
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -394,9 +387,7 @@ TEST_F(LiveRangesTest, CFG4) {
Instruction::ADD_INT, 1 << 8,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 37b58ded59..7793965148 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -29,7 +29,10 @@
namespace art {
-class LivenessTest : public CommonCompilerTest {};
+class LivenessTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
static void DumpBitVector(BitVector* vector,
std::ostream& buffer,
@@ -43,10 +46,8 @@ static void DumpBitVector(BitVector* vector,
buffer << ")\n";
}
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void LivenessTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index d46b904c9e..6a25da3cfd 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -172,9 +172,9 @@ class HeapLocationCollector : public HGraphVisitor {
explicit HeapLocationCollector(HGraph* graph)
: HGraphVisitor(graph),
- ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- aliasing_matrix_(graph->GetArena(),
+ ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ aliasing_matrix_(graph->GetAllocator(),
kInitialAliasingMatrixBitVectorSize,
true,
kArenaAllocLSE),
@@ -362,7 +362,7 @@ class HeapLocationCollector : public HGraphVisitor {
ReferenceInfo* ref_info = FindReferenceInfoOf(instruction);
if (ref_info == nullptr) {
size_t pos = ref_info_array_.size();
- ref_info = new (GetGraph()->GetArena()) ReferenceInfo(instruction, pos);
+ ref_info = new (GetGraph()->GetAllocator()) ReferenceInfo(instruction, pos);
ref_info_array_.push_back(ref_info);
}
return ref_info;
@@ -385,7 +385,7 @@ class HeapLocationCollector : public HGraphVisitor {
size_t heap_location_idx = FindHeapLocationIndex(
ref_info, offset, index, declaring_class_def_index);
if (heap_location_idx == kHeapLocationNotFound) {
- HeapLocation* heap_loc = new (GetGraph()->GetArena())
+ HeapLocation* heap_loc = new (GetGraph()->GetAllocator())
HeapLocation(ref_info, offset, index, declaring_class_def_index);
heap_locations_.push_back(heap_loc);
return heap_loc;
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 0df2f27e82..86696d02a1 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -22,19 +22,15 @@
namespace art {
-class LoadStoreAnalysisTest : public CommonCompilerTest {
+class LoadStoreAnalysisTest : public OptimizingUnitTest {
public:
- LoadStoreAnalysisTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ LoadStoreAnalysisTest() : graph_(CreateGraph()) { }
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
@@ -48,18 +44,19 @@ TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
// array_get2 ArrayGet [array, c2]
// array_set1 ArraySet [array, c1, c3]
// array_set2 ArraySet [array, index, c3]
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* array_get1 = new (&allocator_) HArrayGet(array, c1, DataType::Type::kInt32, 0);
- HInstruction* array_get2 = new (&allocator_) HArrayGet(array, c2, DataType::Type::kInt32, 0);
- HInstruction* array_set1 = new (&allocator_) HArraySet(array, c1, c3, DataType::Type::kInt32, 0);
+ HInstruction* array_get1 = new (GetAllocator()) HArrayGet(array, c1, DataType::Type::kInt32, 0);
+ HInstruction* array_get2 = new (GetAllocator()) HArrayGet(array, c2, DataType::Type::kInt32, 0);
+ HInstruction* array_set1 =
+ new (GetAllocator()) HArraySet(array, c1, c3, DataType::Type::kInt32, 0);
HInstruction* array_set2 =
- new (&allocator_) HArraySet(array, index, c3, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, index, c3, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
entry->AddInstruction(index);
entry->AddInstruction(array_get1);
@@ -107,7 +104,7 @@ TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
}
TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
@@ -119,38 +116,38 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
// get_field20 InstanceFieldGet [object, 20]
HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
- HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
- HInstanceFieldGet* get_field10 = new (&allocator_) HInstanceFieldGet(object,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
- HInstanceFieldGet* get_field20 = new (&allocator_) HInstanceFieldGet(object,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(20),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
+ HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object,
+ c1,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field10 = new (GetAllocator()) HInstanceFieldGet(object,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field20 = new (GetAllocator()) HInstanceFieldGet(object,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(20),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
entry->AddInstruction(object);
entry->AddInstruction(set_field10);
entry->AddInstruction(get_field10);
@@ -186,34 +183,38 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
}
TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c0 = graph_->GetIntConstant(0);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c_neg1 = graph_->GetIntConstant(-1);
- HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c1);
- HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c1);
- HInstruction* sub_neg1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c_neg1);
- HInstruction* rev_sub1 = new (&allocator_) HSub(DataType::Type::kInt32, c1, index);
- HInstruction* arr_set1 = new (&allocator_) HArraySet(array, c0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set2 = new (&allocator_) HArraySet(array, c1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set3 = new (&allocator_) HArraySet(array, add0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set4 = new (&allocator_) HArraySet(array, add1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set5 = new (&allocator_) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set6 = new (&allocator_) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0);
+ HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c1);
+ HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0);
+ HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c1);
+ HInstruction* sub_neg1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c_neg1);
+ HInstruction* rev_sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, c1, index);
+ HInstruction* arr_set1 = new (GetAllocator()) HArraySet(array, c0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set2 = new (GetAllocator()) HArraySet(array, c1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set3 =
+ new (GetAllocator()) HArraySet(array, add0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set4 =
+ new (GetAllocator()) HArraySet(array, add1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set5 =
+ new (GetAllocator()) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set6 =
+ new (GetAllocator()) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set7 =
- new (&allocator_) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set8 =
- new (&allocator_) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
entry->AddInstruction(index);
@@ -272,14 +273,14 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
}
TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c0 = graph_->GetIntConstant(0);
@@ -290,40 +291,40 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
HInstruction* c_0x80000001 = graph_->GetIntConstant(0x80000001);
// `index+0x80000000` and `index-0x80000000` array indices MAY alias.
- HInstruction* add_0x80000000 = new (&allocator_) HAdd(
+ HInstruction* add_0x80000000 = new (GetAllocator()) HAdd(
DataType::Type::kInt32, index, c_0x80000000);
- HInstruction* sub_0x80000000 = new (&allocator_) HSub(
+ HInstruction* sub_0x80000000 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0x80000000);
- HInstruction* arr_set_1 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_1 = new (GetAllocator()) HArraySet(
array, add_0x80000000, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_2 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_2 = new (GetAllocator()) HArraySet(
array, sub_0x80000000, c0, DataType::Type::kInt32, 0);
// `index+0x10` and `index-0xFFFFFFF0` array indices MAY alias.
- HInstruction* add_0x10 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c_0x10);
- HInstruction* sub_0xFFFFFFF0 = new (&allocator_) HSub(
+ HInstruction* add_0x10 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c_0x10);
+ HInstruction* sub_0xFFFFFFF0 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0xFFFFFFF0);
- HInstruction* arr_set_3 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_3 = new (GetAllocator()) HArraySet(
array, add_0x10, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_4 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_4 = new (GetAllocator()) HArraySet(
array, sub_0xFFFFFFF0, c0, DataType::Type::kInt32, 0);
// `index+0x7FFFFFFF` and `index-0x80000001` array indices MAY alias.
- HInstruction* add_0x7FFFFFFF = new (&allocator_) HAdd(
+ HInstruction* add_0x7FFFFFFF = new (GetAllocator()) HAdd(
DataType::Type::kInt32, index, c_0x7FFFFFFF);
- HInstruction* sub_0x80000001 = new (&allocator_) HSub(
+ HInstruction* sub_0x80000001 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0x80000001);
- HInstruction* arr_set_5 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_5 = new (GetAllocator()) HArraySet(
array, add_0x7FFFFFFF, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_6 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_6 = new (GetAllocator()) HArraySet(
array, sub_0x80000001, c0, DataType::Type::kInt32, 0);
// `index+0` and `index-0` array indices MAY alias.
- HInstruction* add_0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0);
- HInstruction* sub_0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0);
- HInstruction* arr_set_7 = new (&allocator_) HArraySet(
+ HInstruction* add_0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0);
+ HInstruction* sub_0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0);
+ HInstruction* arr_set_7 = new (GetAllocator()) HArraySet(
array, add_0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_8 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_8 = new (GetAllocator()) HArraySet(
array, sub_0, c0, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 54c2d43e9c..39bfc86432 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -49,13 +49,13 @@ class LSEVisitor : public HGraphVisitor {
ArenaVector<HInstruction*>(heap_locations_collector.
GetNumberOfHeapLocations(),
kUnknownHeapValue,
- graph->GetArena()->Adapter(kArenaAllocLSE)),
- graph->GetArena()->Adapter(kArenaAllocLSE)),
- removed_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- substitute_instructions_for_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- possibly_removed_stores_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_arrays_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+ graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ removed_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ substitute_instructions_for_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ possibly_removed_stores_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ singleton_new_instances_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ singleton_new_arrays_(graph->GetAllocator()->Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 40fe35b31b..29dfddf4d6 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -28,10 +28,10 @@ static_assert(std::is_trivially_copyable<Location>::value, "Location should be t
LocationSummary::LocationSummary(HInstruction* instruction,
CallKind call_kind,
- bool intrinsified)
- : inputs_(instruction->InputCount(),
- instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
- temps_(instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
+ bool intrinsified,
+ ArenaAllocator* allocator)
+ : inputs_(instruction->InputCount(), allocator->Adapter(kArenaAllocLocationSummary)),
+ temps_(allocator->Adapter(kArenaAllocLocationSummary)),
call_kind_(call_kind),
intrinsified_(intrinsified),
has_custom_slow_path_calling_convention_(false),
@@ -43,11 +43,18 @@ LocationSummary::LocationSummary(HInstruction* instruction,
instruction->SetLocations(this);
if (NeedsSafepoint()) {
- ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
stack_mask_ = ArenaBitVector::Create(arena, 0, true, kArenaAllocLocationSummary);
}
}
+LocationSummary::LocationSummary(HInstruction* instruction,
+ CallKind call_kind,
+ bool intrinsified)
+ : LocationSummary(instruction,
+ call_kind,
+ intrinsified,
+ instruction->GetBlock()->GetGraph()->GetAllocator()) {}
Location Location::RegisterOrConstant(HInstruction* instruction) {
return instruction->IsConstant()
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 6f0dbce2df..d56c151748 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -665,6 +665,11 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
}
private:
+ LocationSummary(HInstruction* instruction,
+ CallKind call_kind,
+ bool intrinsified,
+ ArenaAllocator* allocator);
+
ArenaVector<Location> inputs_;
ArenaVector<Location> temps_;
const CallKind call_kind_;
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index c51fafa695..d87861bde0 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -429,7 +429,7 @@ static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
// Check that instructions from the induction sets are fully removed: have no uses
// and no other instructions use them.
-static bool CheckInductionSetFullyRemoved(ArenaSet<HInstruction*>* iset) {
+static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
for (HInstruction* instr : *iset) {
if (instr->GetBlock() != nullptr ||
!instr->GetUses().empty() ||
@@ -453,7 +453,7 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
compiler_driver_(compiler_driver),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
- global_allocator_(graph_->GetArena()),
+ global_allocator_(graph_->GetAllocator()),
top_loop_(nullptr),
last_loop_(nullptr),
iset_(nullptr),
@@ -465,7 +465,12 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
vector_runtime_test_a_(nullptr),
vector_runtime_test_b_(nullptr),
vector_map_(nullptr),
- vector_permanent_map_(nullptr) {
+ vector_permanent_map_(nullptr),
+ vector_mode_(kSequential),
+ vector_preheader_(nullptr),
+ vector_header_(nullptr),
+ vector_body_(nullptr),
+ vector_index_(nullptr) {
}
void HLoopOptimization::Run() {
@@ -475,10 +480,8 @@ void HLoopOptimization::Run() {
return;
}
- // Phase-local allocator that draws from the global pool. Since the allocator
- // itself resides on the stack, it is destructed on exiting Run(), which
- // implies its underlying memory is released immediately.
- ArenaAllocator allocator(global_allocator_->GetArenaPool());
+ // Phase-local allocator.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
loop_allocator_ = &allocator;
// Perform loop optimizations.
@@ -499,8 +502,8 @@ void HLoopOptimization::Run() {
void HLoopOptimization::LocalRun() {
// Build the linear order using the phase-local allocator. This step enables building
// a loop hierarchy that properly reflects the outer-inner and previous-next relation.
- ArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
- LinearizeGraph(graph_, loop_allocator_, &linear_order);
+ ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
+ LinearizeGraph(graph_, &linear_order);
// Build the loop hierarchy.
for (HBasicBlock* block : linear_order) {
@@ -513,13 +516,13 @@ void HLoopOptimization::LocalRun() {
// temporary data structures using the phase-local allocator. All new HIR
// should use the global allocator.
if (top_loop_ != nullptr) {
- ArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> reds(
+ ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> map(
+ ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> perm(
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
// Attach.
iset_ = &iset;
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 6e6e3873f9..b1b3d110bc 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
#define ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "induction_var_range.h"
#include "nodes.h"
#include "optimization.h"
@@ -220,7 +222,7 @@ class HLoopOptimization : public HOptimization {
// Phase-local heap memory allocator for the loop optimizer. Storage obtained
// through this allocator is immediately released when the loop optimizer is done.
- ArenaAllocator* loop_allocator_;
+ ScopedArenaAllocator* loop_allocator_;
// Global heap memory allocator. Used to build HIR.
ArenaAllocator* global_allocator_;
@@ -232,14 +234,14 @@ class HLoopOptimization : public HOptimization {
// Temporary bookkeeping of a set of instructions.
// Contents reside in phase-local heap memory.
- ArenaSet<HInstruction*>* iset_;
+ ScopedArenaSet<HInstruction*>* iset_;
// Temporary bookkeeping of reduction instructions. Mapping is two-fold:
// (1) reductions in the loop-body are mapped back to their phi definition,
// (2) phi definitions are mapped to their initial value (updated during
// code generation to feed the proper values into the new chain).
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* reductions_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* reductions_;
// Flag that tracks if any simplifications have occurred.
bool simplified_;
@@ -249,7 +251,7 @@ class HLoopOptimization : public HOptimization {
// Set of array references in the vector loop.
// Contents reside in phase-local heap memory.
- ArenaSet<ArrayReference>* vector_refs_;
+ ScopedArenaSet<ArrayReference>* vector_refs_;
// Dynamic loop peeling candidate for alignment.
const ArrayReference* vector_peeling_candidate_;
@@ -262,11 +264,11 @@ class HLoopOptimization : public HOptimization {
// loop (mode is kSequential) and the actual vector loop (mode is kVector). The data
// structure maps original instructions into the new instructions.
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* vector_map_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_map_;
// Permanent mapping used during vectorization synthesis.
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_;
// Temporary vectorization bookkeeping.
VectorMode vector_mode_; // synthesis mode
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 95718ae388..4e1857df5b 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -24,14 +24,12 @@ namespace art {
* constructing the loop hierarchy. Actual optimizations are tested
* through the checker tests.
*/
-class LoopOptimizationTest : public CommonCompilerTest {
+class LoopOptimizationTest : public OptimizingUnitTest {
public:
LoopOptimizationTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
- iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
- loop_opt_(new (&allocator_) HLoopOptimization(graph_, nullptr, iva_, nullptr)) {
+ : graph_(CreateGraph()),
+ iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
+ loop_opt_(new (GetAllocator()) HLoopOptimization(graph_, nullptr, iva_, nullptr)) {
BuildGraph();
}
@@ -40,38 +38,38 @@ class LoopOptimizationTest : public CommonCompilerTest {
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- return_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ return_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(return_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
- parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(parameter_);
- return_block_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_block_->AddInstruction(new (&allocator_) HExit());
+ return_block_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
entry_block_->AddSuccessor(return_block_);
return_block_->AddSuccessor(exit_block_);
}
/** Adds a loop nest at given position before successor. */
HBasicBlock* AddLoop(HBasicBlock* position, HBasicBlock* successor) {
- HBasicBlock* header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* body = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(header);
graph_->AddBlock(body);
// Control flow.
position->ReplaceSuccessor(successor, header);
header->AddSuccessor(body);
header->AddSuccessor(successor);
- header->AddInstruction(new (&allocator_) HIf(parameter_));
+ header->AddInstruction(new (GetAllocator()) HIf(parameter_));
body->AddSuccessor(header);
- body->AddInstruction(new (&allocator_) HGoto());
+ body->AddInstruction(new (GetAllocator()) HGoto());
return header;
}
@@ -80,7 +78,8 @@ class LoopOptimizationTest : public CommonCompilerTest {
graph_->BuildDominatorTree();
iva_->Run();
// Do not release the loop hierarchy.
- loop_opt_->loop_allocator_ = &allocator_;
+ ScopedArenaAllocator loop_allocator(GetArenaStack());
+ loop_opt_->loop_allocator_ = &loop_allocator;
loop_opt_->LocalRun();
}
@@ -101,8 +100,6 @@ class LoopOptimizationTest : public CommonCompilerTest {
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HInductionVarAnalysis* iva_;
HLoopOptimization* loop_opt_;
@@ -199,8 +196,8 @@ TEST_F(LoopOptimizationTest, LoopNestWithSequence) {
// predecessors.
TEST_F(LoopOptimizationTest, SimplifyLoop) {
// Can't use AddLoop as we want special order for blocks predecessors.
- HBasicBlock* header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* body = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(header);
graph_->AddBlock(body);
@@ -213,11 +210,11 @@ TEST_F(LoopOptimizationTest, SimplifyLoop) {
DCHECK(header->GetSuccessors()[1] == return_block_);
// Data flow.
- header->AddInstruction(new (&allocator_) HIf(parameter_));
- body->AddInstruction(new (&allocator_) HGoto());
+ header->AddInstruction(new (GetAllocator()) HIf(parameter_));
+ body->AddInstruction(new (GetAllocator()) HGoto());
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, parameter_);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, parameter_);
header->AddPhi(phi);
body->AddInstruction(add);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index cae5054ef7..1a537ca47e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -56,13 +56,13 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
DCHECK_EQ(visited->GetHighestBitSet(), -1);
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(),
0u,
- arena_->Adapter(kArenaAllocGraphBuilder));
+ allocator_->Adapter(kArenaAllocGraphBuilder));
// Stack of nodes that we're currently visiting (same as marked in "visiting" above).
- ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
visited->SetBit(entry_block_->GetBlockId());
@@ -173,7 +173,7 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) {
}
GraphAnalysisResult HGraph::BuildDominatorTree() {
- ArenaBitVector visited(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
// (1) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
@@ -259,13 +259,13 @@ void HGraph::ComputeDominanceInformation() {
reverse_post_order_.push_back(entry_block_);
// Number of visits of a given node, indexed by block id.
- ArenaVector<size_t> visits(blocks_.size(), 0u, arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder));
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(),
0u,
- arena_->Adapter(kArenaAllocGraphBuilder));
+ allocator_->Adapter(kArenaAllocGraphBuilder));
// Nodes for which we need to visit successors.
- ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
worklist.push_back(entry_block_);
@@ -335,7 +335,7 @@ void HGraph::ComputeDominanceInformation() {
}
HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
- HBasicBlock* new_block = new (arena_) HBasicBlock(this, successor->GetDexPc());
+ HBasicBlock* new_block = new (allocator_) HBasicBlock(this, successor->GetDexPc());
AddBlock(new_block);
// Use `InsertBetween` to ensure the predecessor index and successor index of
// `block` and `successor` are preserved.
@@ -347,7 +347,7 @@ void HGraph::SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor) {
// Insert a new node between `block` and `successor` to split the
// critical edge.
HBasicBlock* new_block = SplitEdge(block, successor);
- new_block->AddInstruction(new (arena_) HGoto(successor->GetDexPc()));
+ new_block->AddInstruction(new (allocator_) HGoto(successor->GetDexPc()));
if (successor->IsLoopHeader()) {
// If we split at a back edge boundary, make the new block the back edge.
HLoopInformation* info = successor->GetLoopInformation();
@@ -396,9 +396,9 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
// this graph.
size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges();
if (number_of_incomings != 1 || (GetEntryBlock()->GetSingleSuccessor() == header)) {
- HBasicBlock* pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(pre_header);
- pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc()));
+ pre_header->AddInstruction(new (allocator_) HGoto(header->GetDexPc()));
for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
HBasicBlock* predecessor = header->GetPredecessors()[pred];
@@ -440,7 +440,7 @@ void HGraph::ComputeTryBlockInformation() {
try_entry != &block->GetTryCatchInformation()->GetTryEntry())) {
// We are either setting try block membership for the first time or it
// has changed.
- block->SetTryCatchInformation(new (arena_) TryCatchInformation(*try_entry));
+ block->SetTryCatchInformation(new (allocator_) TryCatchInformation(*try_entry));
}
}
}
@@ -547,7 +547,7 @@ HNullConstant* HGraph::GetNullConstant(uint32_t dex_pc) {
// not null and not in a block. Otherwise, we need to clear the instruction
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) {
- cached_null_constant_ = new (arena_) HNullConstant(dex_pc);
+ cached_null_constant_ = new (allocator_) HNullConstant(dex_pc);
cached_null_constant_->SetReferenceTypeInfo(inexact_object_rti_);
InsertConstant(cached_null_constant_);
}
@@ -563,7 +563,7 @@ HCurrentMethod* HGraph::GetCurrentMethod() {
// not null and not in a block. Otherwise, we need to clear the instruction
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) {
- cached_current_method_ = new (arena_) HCurrentMethod(
+ cached_current_method_ = new (allocator_) HCurrentMethod(
Is64BitInstructionSet(instruction_set_) ? DataType::Type::kInt64 : DataType::Type::kInt32,
entry_block_->GetDexPc());
if (entry_block_->GetFirstInstruction() == nullptr) {
@@ -710,7 +710,7 @@ void HLoopInformation::Populate() {
bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader();
if (is_irreducible_loop) {
- ArenaBitVector visited(graph->GetArena(),
+ ArenaBitVector visited(graph->GetAllocator(),
graph->GetBlocks().size(),
/* expandable */ false,
kArenaAllocGraphBuilder);
@@ -1655,8 +1655,8 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) {
DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented.";
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
- cursor->GetDexPc());
+ HBasicBlock* new_block =
+ new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc());
new_block->instructions_.first_instruction_ = cursor;
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
instructions_.last_instruction_ = cursor->previous_;
@@ -1668,7 +1668,7 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) {
}
new_block->instructions_.SetBlockOfInstructions(new_block);
- AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc()));
+ AddInstruction(new (GetGraph()->GetAllocator()) HGoto(new_block->GetDexPc()));
for (HBasicBlock* successor : GetSuccessors()) {
successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
@@ -1685,7 +1685,7 @@ HBasicBlock* HBasicBlock::CreateImmediateDominator() {
DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented.";
DCHECK(!IsCatchBlock()) << "Support for updating try/catch information not implemented.";
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
+ HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc());
for (HBasicBlock* predecessor : GetPredecessors()) {
predecessor->successors_[predecessor->GetSuccessorIndexOf(this)] = new_block;
@@ -1701,8 +1701,8 @@ HBasicBlock* HBasicBlock::CreateImmediateDominator() {
HBasicBlock* HBasicBlock::SplitBeforeForInlining(HInstruction* cursor) {
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
- cursor->GetDexPc());
+ HBasicBlock* new_block =
+ new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc());
new_block->instructions_.first_instruction_ = cursor;
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
instructions_.last_instruction_ = cursor->previous_;
@@ -1734,7 +1734,7 @@ HBasicBlock* HBasicBlock::SplitAfterForInlining(HInstruction* cursor) {
DCHECK_NE(instructions_.last_instruction_, cursor);
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
+ HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc());
new_block->instructions_.first_instruction_ = cursor->GetNext();
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
cursor->next_->previous_ = nullptr;
@@ -2030,7 +2030,7 @@ void HBasicBlock::DisconnectAndDelete() {
last_instruction->IsPackedSwitch() ||
(last_instruction->IsTryBoundary() && IsCatchBlock()));
predecessor->RemoveInstruction(last_instruction);
- predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc()));
+ predecessor->AddInstruction(new (graph_->GetAllocator()) HGoto(last_instruction->GetDexPc()));
} else if (num_pred_successors == 0u) {
// The predecessor has no remaining successors and therefore must be dead.
// We deliberately leave it without a control-flow instruction so that the
@@ -2241,7 +2241,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
if (current->NeedsEnvironment()) {
DCHECK(current->HasEnvironment());
current->GetEnvironment()->SetAndCopyParentChain(
- outer_graph->GetArena(), invoke->GetEnvironment());
+ outer_graph->GetAllocator(), invoke->GetEnvironment());
}
}
}
@@ -2294,7 +2294,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// into two blocks, merge the first block of the inlined graph into
// the first half, and replace the exit block of the inlined graph
// with the second half.
- ArenaAllocator* allocator = outer_graph->GetArena();
+ ArenaAllocator* allocator = outer_graph->GetAllocator();
HBasicBlock* at = invoke->GetBlock();
// Note that we split before the invoke only to simplify polymorphic inlining.
HBasicBlock* to = at->SplitBeforeForInlining(invoke);
@@ -2478,10 +2478,10 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
HBasicBlock* old_pre_header = header->GetDominator();
// Need extra block to avoid critical edge.
- HBasicBlock* if_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* true_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* false_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* if_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* true_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* false_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(if_block);
AddBlock(true_block);
AddBlock(false_block);
@@ -2536,9 +2536,9 @@ HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
HLoopInformation* loop = header->GetLoopInformation();
// Add new loop blocks.
- HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_header = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_body = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_body = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(new_pre_header);
AddBlock(new_header);
AddBlock(new_body);
@@ -2570,10 +2570,10 @@ HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
reverse_post_order_[index_of_body] = new_body;
// Add gotos and suspend check (client must add conditional in header).
- new_pre_header->AddInstruction(new (arena_) HGoto());
- HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(header->GetDexPc());
+ new_pre_header->AddInstruction(new (allocator_) HGoto());
+ HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(header->GetDexPc());
new_header->AddInstruction(suspend_check);
- new_body->AddInstruction(new (arena_) HGoto());
+ new_body->AddInstruction(new (allocator_) HGoto());
suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment(
loop->GetSuspendCheck()->GetEnvironment(), header);
@@ -2891,7 +2891,7 @@ void HInstruction::RemoveEnvironmentUsers() {
// Returns an instruction with the opposite Boolean value from 'cond'.
HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) {
- ArenaAllocator* allocator = GetArena();
+ ArenaAllocator* allocator = GetAllocator();
if (cond->IsCondition() &&
!DataType::IsFloatingPointType(cond->InputAt(0)->GetType())) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fef0c865ae..3c584bd67d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -45,6 +45,7 @@
namespace art {
+class ArenaStack;
class GraphChecker;
class HBasicBlock;
class HConstructorFence;
@@ -305,7 +306,8 @@ std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocGraph> {
public:
- HGraph(ArenaAllocator* arena,
+ HGraph(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
const DexFile& dex_file,
uint32_t method_idx,
InstructionSet instruction_set,
@@ -313,10 +315,11 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
bool debuggable = false,
bool osr = false,
int start_instruction_id = 0)
- : arena_(arena),
- blocks_(arena->Adapter(kArenaAllocBlockList)),
- reverse_post_order_(arena->Adapter(kArenaAllocReversePostOrder)),
- linear_order_(arena->Adapter(kArenaAllocLinearOrder)),
+ : allocator_(allocator),
+ arena_stack_(arena_stack),
+ blocks_(allocator->Adapter(kArenaAllocBlockList)),
+ reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
+ linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
entry_block_(nullptr),
exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
@@ -337,22 +340,23 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
number_of_cha_guards_(0),
instruction_set_(instruction_set),
cached_null_constant_(nullptr),
- cached_int_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_float_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
+ cached_int_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_float_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_long_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
art_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
osr_(osr),
- cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
+ cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
// Acquires and stores RTI of inexact Object to be used when creating HNullConstant.
void InitializeInexactObjectRTI(VariableSizedHandleScope* handles);
- ArenaAllocator* GetArena() const { return arena_; }
+ ArenaAllocator* GetAllocator() const { return allocator_; }
+ ArenaStack* GetArenaStack() const { return arena_stack_; }
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
bool IsInSsaForm() const { return in_ssa_form_; }
@@ -613,7 +617,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// If not found or previously deleted, create and cache a new instruction.
// Don't bother reviving a previously deleted instruction, for simplicity.
if (constant == nullptr || constant->GetBlock() == nullptr) {
- constant = new (arena_) InstructionType(value, dex_pc);
+ constant = new (allocator_) InstructionType(value, dex_pc);
cache->Overwrite(value, constant);
InsertConstant(constant);
}
@@ -629,7 +633,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// See CacheFloatConstant comment.
void CacheDoubleConstant(HDoubleConstant* constant);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
+ ArenaStack* const arena_stack_;
// List of blocks in insertion order.
ArenaVector<HBasicBlock*> blocks_;
@@ -751,9 +756,12 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
suspend_check_(nullptr),
irreducible_(false),
contains_irreducible_loop_(false),
- back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)),
+ back_edges_(graph->GetAllocator()->Adapter(kArenaAllocLoopInfoBackEdges)),
// Make bit vector growable, as the number of blocks may change.
- blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) {
+ blocks_(graph->GetAllocator(),
+ graph->GetBlocks().size(),
+ true,
+ kArenaAllocLoopInfoBackEdges) {
back_edges_.reserve(kDefaultNumberOfBackEdges);
}
@@ -916,11 +924,11 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
- predecessors_(graph->GetArena()->Adapter(kArenaAllocPredecessors)),
- successors_(graph->GetArena()->Adapter(kArenaAllocSuccessors)),
+ predecessors_(graph->GetAllocator()->Adapter(kArenaAllocPredecessors)),
+ successors_(graph->GetAllocator()->Adapter(kArenaAllocSuccessors)),
loop_information_(nullptr),
dominator_(nullptr),
- dominated_blocks_(graph->GetArena()->Adapter(kArenaAllocDominated)),
+ dominated_blocks_(graph->GetAllocator()->Adapter(kArenaAllocDominated)),
block_id_(kInvalidBlockId),
dex_pc_(dex_pc),
lifetime_start_(kNoLifetime),
@@ -972,7 +980,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
void AddBackEdge(HBasicBlock* back_edge) {
if (loop_information_ == nullptr) {
- loop_information_ = new (graph_->GetArena()) HLoopInformation(this, graph_);
+ loop_information_ = new (graph_->GetAllocator()) HLoopInformation(this, graph_);
}
DCHECK_EQ(loop_information_->GetHeader(), this);
loop_information_->AddBackEdge(back_edge);
@@ -1925,7 +1933,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
HInstruction* GetPreviousDisregardingMoves() const;
HBasicBlock* GetBlock() const { return block_; }
- ArenaAllocator* GetArena() const { return block_->GetGraph()->GetArena(); }
+ ArenaAllocator* GetAllocator() const { return block_->GetGraph()->GetAllocator(); }
void SetBlock(HBasicBlock* block) { block_ = block; }
bool IsInBlock() const { return block_ != nullptr; }
bool IsInLoop() const { return block_->IsInLoop(); }
@@ -2015,7 +2023,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// Note: fixup_end remains valid across push_front().
auto fixup_end = uses_.empty() ? uses_.begin() : ++uses_.begin();
HUseListNode<HInstruction*>* new_node =
- new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HInstruction*>(user, index);
+ new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HInstruction*>(user, index);
uses_.push_front(*new_node);
FixUpUserRecordsAfterUseInsertion(fixup_end);
}
@@ -2025,7 +2033,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// Note: env_fixup_end remains valid across push_front().
auto env_fixup_end = env_uses_.empty() ? env_uses_.begin() : ++env_uses_.begin();
HUseListNode<HEnvironment*>* new_node =
- new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HEnvironment*>(user, index);
+ new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HEnvironment*>(user, index);
env_uses_.push_front(*new_node);
FixUpUserRecordsAfterEnvUseInsertion(env_fixup_end);
}
@@ -2108,7 +2116,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// copying, the uses lists are being updated.
void CopyEnvironmentFrom(HEnvironment* environment) {
DCHECK(environment_ == nullptr);
- ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator();
environment_ = new (allocator) HEnvironment(allocator, *environment, this);
environment_->CopyFrom(environment);
if (environment->GetParent() != nullptr) {
@@ -2119,7 +2127,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
void CopyEnvironmentFromWithLoopPhiAdjustment(HEnvironment* environment,
HBasicBlock* block) {
DCHECK(environment_ == nullptr);
- ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator();
environment_ = new (allocator) HEnvironment(allocator, *environment, this);
environment_->CopyFromWithLoopPhiAdjustment(environment, block);
if (environment->GetParent() != nullptr) {
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index ada6177bfb..b2180d9f98 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -23,37 +23,36 @@
namespace art {
+class NodeTest : public OptimizingUnitTest {};
+
/**
* Test that removing instruction from the graph removes itself from user lists
* and environment lists.
*/
-TEST(Node, RemoveInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, RemoveInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- entry->AddInstruction(new (&allocator) HGoto());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(first_block);
entry->AddSuccessor(first_block);
- HInstruction* null_check = new (&allocator) HNullCheck(parameter, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
first_block->AddInstruction(null_check);
- first_block->AddInstruction(new (&allocator) HReturnVoid());
+ first_block->AddInstruction(new (GetAllocator()) HReturnVoid());
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
first_block->AddSuccessor(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
- HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, null_check);
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, null_check);
null_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, parameter);
parameter->AddEnvUseAt(null_check->GetEnvironment(), 0);
@@ -70,25 +69,22 @@ TEST(Node, RemoveInstruction) {
/**
* Test that inserting an instruction in the graph updates user lists.
*/
-TEST(Node, InsertInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, InsertInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* parameter2 = new (&allocator) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- entry->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HExit());
ASSERT_FALSE(parameter1->HasUses());
- HInstruction* to_insert = new (&allocator) HNullCheck(parameter1, 0);
+ HInstruction* to_insert = new (GetAllocator()) HNullCheck(parameter1, 0);
entry->InsertInstructionBefore(to_insert, parameter2);
ASSERT_TRUE(parameter1->HasUses());
@@ -98,48 +94,42 @@ TEST(Node, InsertInstruction) {
/**
* Test that adding an instruction in the graph updates user lists.
*/
-TEST(Node, AddInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, AddInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
ASSERT_FALSE(parameter->HasUses());
- HInstruction* to_add = new (&allocator) HNullCheck(parameter, 0);
+ HInstruction* to_add = new (GetAllocator()) HNullCheck(parameter, 0);
entry->AddInstruction(to_add);
ASSERT_TRUE(parameter->HasUses());
ASSERT_TRUE(parameter->GetUses().HasExactlyOneElement());
}
-TEST(Node, ParentEnvironment) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, ParentEnvironment) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0);
+ HInstruction* with_environment = new (GetAllocator()) HNullCheck(parameter1, 0);
entry->AddInstruction(parameter1);
entry->AddInstruction(with_environment);
- entry->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HExit());
ASSERT_TRUE(parameter1->HasUses());
ASSERT_TRUE(parameter1->GetUses().HasExactlyOneElement());
- HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, with_environment);
- ArenaVector<HInstruction*> array(allocator.Adapter());
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment);
+ ArenaVector<HInstruction*> array(GetAllocator()->Adapter());
array.push_back(parameter1);
environment->CopyFrom(array);
@@ -148,22 +138,22 @@ TEST(Node, ParentEnvironment) {
ASSERT_TRUE(parameter1->HasEnvironmentUses());
ASSERT_TRUE(parameter1->GetEnvUses().HasExactlyOneElement());
- HEnvironment* parent1 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, nullptr);
+ HEnvironment* parent1 = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
parent1->CopyFrom(array);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
- HEnvironment* parent2 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, nullptr);
+ HEnvironment* parent2 = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
parent2->CopyFrom(array);
- parent1->SetAndCopyParentChain(&allocator, parent2);
+ parent1->SetAndCopyParentChain(GetAllocator(), parent2);
// One use for parent2, and one other use for the new parent of parent1.
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 4u);
// We have copied the parent chain. So we now have two more uses.
- environment->SetAndCopyParentChain(&allocator, parent1);
+ environment->SetAndCopyParentChain(GetAllocator(), parent1);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 6u);
}
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index d3a499cb31..ab9d7594d9 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -23,12 +23,10 @@ namespace art {
/**
* Fixture class for testing vector nodes.
*/
-class NodesVectorTest : public CommonCompilerTest {
+class NodesVectorTest : public OptimizingUnitTest {
public:
NodesVectorTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)) {
+ : graph_(CreateGraph()) {
BuildGraph();
}
@@ -36,32 +34,30 @@ class NodesVectorTest : public CommonCompilerTest {
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
- int8_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 0,
- DataType::Type::kInt8);
+ int8_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 0,
+ DataType::Type::kInt8);
entry_block_->AddInstruction(int8_parameter_);
- int16_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(2),
- 0,
- DataType::Type::kInt16);
+ int16_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(2),
+ 0,
+ DataType::Type::kInt16);
entry_block_->AddInstruction(int16_parameter_);
- int32_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ int32_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(int32_parameter_);
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HBasicBlock* entry_block_;
@@ -134,16 +130,16 @@ TEST(NodesVector, AlignmentString) {
}
TEST_F(NodesVectorTest, VectorOperationProperties) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* v1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* v2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 2, kNoDexPc);
- HVecOperation* v3 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
- HVecOperation* v4 = new (&allocator_) HVecStore(
- &allocator_,
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 2, kNoDexPc);
+ HVecOperation* v3 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecOperation* v4 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
int32_parameter_,
int32_parameter_,
v0,
@@ -198,30 +194,30 @@ TEST_F(NodesVectorTest, VectorOperationProperties) {
}
TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) {
- HVecLoad* v0 = new (&allocator_) HVecLoad(&allocator_,
- int32_parameter_,
- int32_parameter_,
- DataType::Type::kInt32,
- SideEffects::ArrayReadOfType(DataType::Type::kInt32),
- 4,
- /*is_string_char_at*/ false,
- kNoDexPc);
- HVecLoad* v1 = new (&allocator_) HVecLoad(&allocator_,
- int32_parameter_,
- int32_parameter_,
- DataType::Type::kInt32,
- SideEffects::ArrayReadOfType(DataType::Type::kInt32),
- 4,
- /*is_string_char_at*/ false,
- kNoDexPc);
- HVecLoad* v2 = new (&allocator_) HVecLoad(&allocator_,
- int32_parameter_,
- int32_parameter_,
- DataType::Type::kInt32,
- SideEffects::ArrayReadOfType(DataType::Type::kInt32),
- 4,
- /*is_string_char_at*/ true,
- kNoDexPc);
+ HVecLoad* v0 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
+ HVecLoad* v1 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
+ HVecLoad* v2 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ true,
+ kNoDexPc);
EXPECT_TRUE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
@@ -250,10 +246,10 @@ TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) {
}
TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecStore* v0 = new (&allocator_) HVecStore(
- &allocator_,
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecStore* v0 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
int32_parameter_,
int32_parameter_,
p0,
@@ -261,8 +257,8 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
4,
kNoDexPc);
- HVecStore* v1 = new (&allocator_) HVecStore(
- &allocator_,
+ HVecStore* v1 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
int32_parameter_,
int32_parameter_,
p0,
@@ -287,27 +283,27 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
}
TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMin* v0 = new (&allocator_) HVecMin(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v1 = new (&allocator_) HVecMin(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v2 = new (&allocator_) HVecMin(
- &allocator_, p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v3 = new (&allocator_) HVecMin(
- &allocator_, p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v4 = new (&allocator_) HVecMin(
- &allocator_, p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v5 = new (&allocator_) HVecMin(
- &allocator_, p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v6 = new (&allocator_) HVecMin(
- &allocator_, p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+
+ HVecMin* v0 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
+ HVecMin* v1 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v2 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
+ HVecMin* v3 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v4 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v5 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v6 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
EXPECT_FALSE(p0->CanBeMoved());
@@ -331,27 +327,27 @@ TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
}
TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMax* v0 = new (&allocator_) HVecMax(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v1 = new (&allocator_) HVecMax(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v2 = new (&allocator_) HVecMax(
- &allocator_, p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v3 = new (&allocator_) HVecMax(
- &allocator_, p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v4 = new (&allocator_) HVecMax(
- &allocator_, p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v5 = new (&allocator_) HVecMax(
- &allocator_, p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v6 = new (&allocator_) HVecMax(
- &allocator_, p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+
+ HVecMax* v0 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
+ HVecMax* v1 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v2 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
+ HVecMax* v3 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v4 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v5 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v6 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
EXPECT_FALSE(p0->CanBeMoved());
@@ -375,51 +371,51 @@ TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
}
TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecHalvingAdd* v0 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+
+ HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
- HVecHalvingAdd* v1 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
- HVecHalvingAdd* v2 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v3 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v4 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 2,
+ HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
/*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
- HVecHalvingAdd* v5 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kUint8, 16,
+ HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v6 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kUint8, 16,
+ HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v7 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kInt8, 16,
+ HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v8 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kInt8, 16,
+ HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v9 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kUint16, 8,
+ HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v10 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kUint16, 8,
+ HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v11 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kInt16, 2,
+ HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v12 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kInt16, 2,
+ HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
@@ -460,15 +456,15 @@ TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
}
TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecMultiplyAccumulate* v1 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
- HVecMultiplyAccumulate* v2 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
- HVecMultiplyAccumulate* v3 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2, kNoDexPc);
+ HVecMultiplyAccumulate* v1 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecMultiplyAccumulate* v2 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecMultiplyAccumulate* v3 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2, kNoDexPc);
EXPECT_FALSE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
@@ -488,15 +484,15 @@ TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) {
}
TEST_F(NodesVectorTest, VectorKindMattersOnReduce) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
-
- HVecReduce* v1 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kSum, kNoDexPc);
- HVecReduce* v2 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMin, kNoDexPc);
- HVecReduce* v3 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMax, kNoDexPc);
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+
+ HVecReduce* v1 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kSum, kNoDexPc);
+ HVecReduce* v2 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMin, kNoDexPc);
+ HVecReduce* v3 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMax, kNoDexPc);
EXPECT_FALSE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 99d5284714..bd65cbf25e 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -46,19 +46,20 @@ class OptimizingCFITest : public CFITest {
static constexpr bool kGenerateExpected = false;
OptimizingCFITest()
- : pool_(),
- allocator_(&pool_),
+ : pool_and_allocator_(),
opts_(),
isa_features_(),
graph_(nullptr),
code_gen_(),
- blocks_(allocator_.Adapter()) {}
+ blocks_(GetAllocator()->Adapter()) {}
+
+ ArenaAllocator* GetAllocator() { return pool_and_allocator_.GetAllocator(); }
void SetUpFrame(InstructionSet isa) {
// Setup simple context.
std::string error;
isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph(&pool_and_allocator_);
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
@@ -142,8 +143,7 @@ class OptimizingCFITest : public CFITest {
DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
};
- ArenaPool pool_;
- ArenaAllocator allocator_;
+ ArenaPoolAndAllocator pool_and_allocator_;
CompilerOptions opts_;
std::unique_ptr<const InstructionSetFeatures> isa_features_;
HGraph* graph_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1e06ea86a2..50b870ba8c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -45,6 +45,7 @@
#include "base/dumpable.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/scoped_arena_allocator.h"
#include "base/timing_logger.h"
#include "bounds_check_elimination.h"
#include "builder.h"
@@ -148,7 +149,7 @@ class PassObserver : public ValueObject {
cached_method_name_(),
timing_logger_enabled_(compiler_driver->GetDumpPasses()),
timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
- disasm_info_(graph->GetArena()),
+ disasm_info_(graph->GetAllocator()),
visualizer_oss_(),
visualizer_output_(visualizer_output),
visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()),
@@ -364,7 +365,8 @@ class OptimizingCompiler FINAL : public Compiler {
// 2) Transforms the graph to SSA. Returns null if it failed.
// 3) Runs optimizations on the graph, including register allocator.
// 4) Generates code with the `code_allocator` provided.
- CodeGenerator* TryCompile(ArenaAllocator* arena,
+ CodeGenerator* TryCompile(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -608,7 +610,7 @@ void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
return;
}
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- HInliner* inliner = new (graph->GetArena()) HInliner(
+ HInliner* inliner = new (graph->GetAllocator()) HInliner(
graph, // outer_graph
graph, // outermost_graph
codegen,
@@ -631,17 +633,18 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
PassObserver* pass_observer) const {
UNUSED(codegen); // To avoid compilation error when compiling for svelte
OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#if defined(ART_ENABLE_CODEGEN_arm)
case kThumb2:
case kArm: {
arm::InstructionSimplifierArm* simplifier =
- new (arena) arm::InstructionSimplifierArm(graph, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) arm::InstructionSimplifierArm(graph, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HInstructionScheduling* scheduling =
- new (arena) HInstructionScheduling(graph, instruction_set, codegen);
+ new (allocator) HInstructionScheduling(graph, instruction_set, codegen);
HOptimization* arm_optimizations[] = {
simplifier,
side_effects,
@@ -655,11 +658,12 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
arm64::InstructionSimplifierArm64* simplifier =
- new (arena) arm64::InstructionSimplifierArm64(graph, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HInstructionScheduling* scheduling =
- new (arena) HInstructionScheduling(graph, instruction_set);
+ new (allocator) HInstructionScheduling(graph, instruction_set);
HOptimization* arm64_optimizations[] = {
simplifier,
side_effects,
@@ -673,11 +677,12 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#ifdef ART_ENABLE_CODEGEN_mips
case kMips: {
mips::InstructionSimplifierMips* simplifier =
- new (arena) mips::InstructionSimplifierMips(graph, codegen, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
mips::PcRelativeFixups* pc_relative_fixups =
- new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
HOptimization* mips_optimizations[] = {
simplifier,
side_effects,
@@ -690,8 +695,9 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* mips64_optimizations[] = {
side_effects,
gvn,
@@ -702,12 +708,13 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
x86::PcRelativeFixups* pc_relative_fixups =
- new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
x86::X86MemoryOperandGeneration* memory_gen =
- new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_optimizations[] = {
side_effects,
gvn,
@@ -720,10 +727,11 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
x86::X86MemoryOperandGeneration* memory_gen =
- new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_64_optimizations[] = {
side_effects,
gvn,
@@ -756,7 +764,9 @@ static void AllocateRegisters(HGraph* graph,
}
{
PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters();
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(graph->GetAllocator(), codegen, liveness, strategy);
+ register_allocator->AllocateRegisters();
}
}
@@ -767,7 +777,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* arena = graph->GetAllocator();
if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
ArenaVector<HOptimization*> optimizations = BuildOptimizations(
*driver->GetCompilerOptions().GetPassesToRun(),
@@ -865,7 +875,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
}
static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
- ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter());
+ ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
codegen->EmitLinkerPatches(&linker_patches);
// Sort patches by literal offset. Required for .oat_patches encoding.
@@ -912,7 +922,8 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
return compiled_method;
}
-CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
+CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -970,8 +981,9 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
/* verified_method */ nullptr,
dex_cache);
- HGraph* graph = new (arena) HGraph(
- arena,
+ HGraph* graph = new (allocator) HGraph(
+ allocator,
+ arena_stack,
dex_file,
method_idx,
compiler_driver->GetInstructionSet(),
@@ -1024,7 +1036,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
- dex_cache,
handles);
GraphAnalysisResult result = builder.BuildGraph();
if (result != kAnalysisSuccess) {
@@ -1091,11 +1102,12 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
DCHECK(Runtime::Current()->IsAotCompiler());
const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || verifier::CanCompilerHandleVerificationFailure(
- verified_method->GetEncounteredVerificationFailures())) {
- ArenaAllocator arena(Runtime::Current()->GetArenaPool());
- CodeVectorAllocator code_allocator(&arena);
+ if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
+ verifier::CanCompilerHandleVerificationFailure(
+ verified_method->GetEncounteredVerificationFailures())) {
+ ArenaAllocator allocator(Runtime::Current()->GetArenaPool());
+ ArenaStack arena_stack(Runtime::Current()->GetArenaPool());
+ CodeVectorAllocator code_allocator(&allocator);
std::unique_ptr<CodeGenerator> codegen;
{
ScopedObjectAccess soa(Thread::Current());
@@ -1103,7 +1115,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
codegen.reset(
- TryCompile(&arena,
+ TryCompile(&allocator,
+ &arena_stack,
&code_allocator,
code_item,
access_flags,
@@ -1120,12 +1133,16 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
if (codegen.get() != nullptr) {
MaybeRecordStat(compilation_stats_.get(),
MethodCompilationStat::kCompiled);
- method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item);
+ method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item);
if (kArenaAllocatorCountAllocations) {
- if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(arena.GetMemStats());
- LOG(INFO) << dex_file.PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << dex_file.PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
}
}
}
@@ -1200,8 +1217,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const uint32_t access_flags = method->GetAccessFlags();
const InvokeType invoke_type = method->GetInvokeType();
- ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
- CodeVectorAllocator code_allocator(&arena);
+ ArenaAllocator allocator(Runtime::Current()->GetJitArenaPool());
+ ArenaStack arena_stack(Runtime::Current()->GetJitArenaPool());
+ CodeVectorAllocator code_allocator(&allocator);
VariableSizedHandleScope handles(self);
std::unique_ptr<CodeGenerator> codegen;
@@ -1209,7 +1227,8 @@ bool OptimizingCompiler::JitCompile(Thread* self,
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
codegen.reset(
- TryCompile(&arena,
+ TryCompile(&allocator,
+ &arena_stack,
&code_allocator,
code_item,
access_flags,
@@ -1227,9 +1246,13 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
if (kArenaAllocatorCountAllocations) {
- if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(arena.GetMemStats());
- LOG(INFO) << dex_file->PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << dex_file->PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
}
}
}
@@ -1321,7 +1344,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
}
- Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed());
+ Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
if (jit_logger != nullptr) {
jit_logger->WriteLog(code, code_allocator.GetSize(), method);
}
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 33f1a4affe..f31ad828eb 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
+#include "base/scoped_arena_allocator.h"
#include "builder.h"
#include "common_compiler_test.h"
#include "dex_file.h"
@@ -78,30 +79,65 @@ void RemoveSuspendChecks(HGraph* graph) {
}
}
-inline HGraph* CreateGraph(ArenaAllocator* allocator) {
- return new (allocator) HGraph(
- allocator,
- *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))),
+class ArenaPoolAndAllocator {
+ public:
+ ArenaPoolAndAllocator() : pool_(), allocator_(&pool_), arena_stack_(&pool_) { }
+
+ ArenaAllocator* GetAllocator() { return &allocator_; }
+ ArenaStack* GetArenaStack() { return &arena_stack_; }
+
+ private:
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ ArenaStack arena_stack_;
+};
+
+inline HGraph* CreateGraph(ArenaPoolAndAllocator* pool_and_allocator) {
+ return new (pool_and_allocator->GetAllocator()) HGraph(
+ pool_and_allocator->GetAllocator(),
+ pool_and_allocator->GetArenaStack(),
+ *reinterpret_cast<DexFile*>(pool_and_allocator->GetAllocator()->Alloc(sizeof(DexFile))),
/*method_idx*/-1,
kRuntimeISA);
}
-// Create a control-flow graph from Dex instructions.
-inline HGraph* CreateCFG(ArenaAllocator* allocator,
- const uint16_t* data,
- DataType::Type return_type = DataType::Type::kInt32) {
- const DexFile::CodeItem* item =
- reinterpret_cast<const DexFile::CodeItem*>(data);
- HGraph* graph = CreateGraph(allocator);
-
- {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope handles(soa.Self());
- HGraphBuilder builder(graph, *item, &handles, return_type);
- bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
- return graph_built ? graph : nullptr;
+class OptimizingUnitTest : public CommonCompilerTest {
+ protected:
+ OptimizingUnitTest() : pool_and_allocator_(new ArenaPoolAndAllocator()) { }
+
+ ArenaAllocator* GetAllocator() { return pool_and_allocator_->GetAllocator(); }
+ ArenaStack* GetArenaStack() { return pool_and_allocator_->GetArenaStack(); }
+
+ void ResetPoolAndAllocator() {
+ pool_and_allocator_.reset(new ArenaPoolAndAllocator());
+ handles_.reset(); // When getting rid of the old HGraph, we can also reset handles_.
}
-}
+
+ HGraph* CreateGraph() {
+ return art::CreateGraph(pool_and_allocator_.get());
+ }
+
+ // Create a control-flow graph from Dex instructions.
+ HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
+ const DexFile::CodeItem* item =
+ reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = CreateGraph();
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (handles_ == nullptr) {
+ handles_.reset(new VariableSizedHandleScope(soa.Self()));
+ }
+ HGraphBuilder builder(graph, *item, handles_.get(), return_type);
+ bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
+ return graph_built ? graph : nullptr;
+ }
+ }
+
+ private:
+ std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_;
+ std::unique_ptr<VariableSizedHandleScope> handles_;
+};
// Naive string diff data type.
typedef std::list<std::pair<std::string, std::string>> diff_t;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index e569b78c9d..9d5358514e 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -52,7 +52,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
// Insert the base at the start of the entry block, move it to a better
// position later in MoveBaseIfNeeded().
- base_ = new (GetGraph()->GetArena()) HMipsComputeBaseMethodAddress();
+ base_ = new (GetGraph()->GetAllocator()) HMipsComputeBaseMethodAddress();
HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
entry_block->InsertInstructionBefore(base_, entry_block->GetFirstInstruction());
DCHECK(base_ != nullptr);
@@ -112,7 +112,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
InitializePCRelativeBasePointer();
HGraph* graph = GetGraph();
HBasicBlock* block = switch_insn->GetBlock();
- HMipsPackedSwitch* mips_switch = new (graph->GetArena()) HMipsPackedSwitch(
+ HMipsPackedSwitch* mips_switch = new (graph->GetAllocator()) HMipsPackedSwitch(
switch_insn->GetStartValue(),
switch_insn->GetNumEntries(),
switch_insn->InputAt(0),
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index a114e78eb4..f92f4b274a 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -137,7 +137,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg);
HGraph* graph = GetGraph();
HBasicBlock* block = neg->GetBlock();
- HX86FPNeg* x86_fp_neg = new (graph->GetArena()) HX86FPNeg(
+ HX86FPNeg* x86_fp_neg = new (graph->GetAllocator()) HX86FPNeg(
neg->GetType(),
neg->InputAt(0),
method_address,
@@ -156,7 +156,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(switch_insn);
HGraph* graph = GetGraph();
HBasicBlock* block = switch_insn->GetBlock();
- HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ HX86PackedSwitch* x86_switch = new (graph->GetAllocator()) HX86PackedSwitch(
switch_insn->GetStartValue(),
switch_insn->GetNumEntries(),
switch_insn->InputAt(0),
@@ -176,7 +176,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
// Insert the base at the start of the entry block, move it to a better
// position later in MoveBaseIfNeeded().
HX86ComputeBaseMethodAddress* method_address =
- new (GetGraph()->GetArena()) HX86ComputeBaseMethodAddress();
+ new (GetGraph()->GetAllocator()) HX86ComputeBaseMethodAddress();
if (has_irreducible_loops) {
cursor->GetBlock()->InsertInstructionBefore(method_address, cursor);
} else {
@@ -190,7 +190,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(insn);
HX86LoadFromConstantTable* load_constant =
- new (GetGraph()->GetArena()) HX86LoadFromConstantTable(method_address, value);
+ new (GetGraph()->GetAllocator()) HX86LoadFromConstantTable(method_address, value);
if (!materialize) {
load_constant->MarkEmittedAtUseSite();
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index b52de367d1..5ec76b457b 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -56,7 +56,7 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
// Add a fake environment for String.charAt() inline info as we want
// the exception to appear as being thrown from there.
ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
- ArenaAllocator* arena = GetGraph()->GetArena();
+ ArenaAllocator* arena = GetGraph()->GetAllocator();
HEnvironment* environment = new (arena) HEnvironment(arena,
/* number_of_vregs */ 0u,
char_at_method,
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 14d2360392..4aec6d3999 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -27,17 +27,18 @@
namespace art {
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+class PrettyPrinterTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
+
+void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
StringPrettyPrinter printer(graph);
printer.VisitInsertionOrder();
ASSERT_STREQ(expected, printer.str().c_str());
}
-class PrettyPrinterTest : public CommonCompilerTest {};
-
TEST_F(PrettyPrinterTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f5064c3057..6d9ebc8d91 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -122,7 +122,7 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
- worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocReferenceTypePropagation)),
is_first_run_(is_first_run) {
}
@@ -235,7 +235,7 @@ static void BoundTypeIn(HInstruction* receiver,
: start_block->GetFirstInstruction();
if (ShouldCreateBoundType(
insert_point, receiver, class_rti, start_instruction, start_block)) {
- bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver);
+ bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
start_block->InsertInstructionBefore(bound_type, insert_point);
// To comply with the RTP algorithm, don't type the bound type just yet, it will
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index cb2af91d87..028b6d3b79 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -28,22 +28,20 @@ namespace art {
* Fixture class for unit testing the ReferenceTypePropagation phase. Used to verify the
* functionality of methods and situations that are hard to set up with checker tests.
*/
-class ReferenceTypePropagationTest : public CommonCompilerTest {
+class ReferenceTypePropagationTest : public OptimizingUnitTest {
public:
- ReferenceTypePropagationTest() : pool_(), allocator_(&pool_), propagation_(nullptr) {
- graph_ = CreateGraph(&allocator_);
- }
+ ReferenceTypePropagationTest() : graph_(CreateGraph()), propagation_(nullptr) { }
~ReferenceTypePropagationTest() { }
void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
- propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
- Handle<mirror::ClassLoader>(),
- Handle<mirror::DexCache>(),
- handles,
- true,
- "test_prop");
+ propagation_ = new (GetAllocator()) ReferenceTypePropagation(graph_,
+ Handle<mirror::ClassLoader>(),
+ Handle<mirror::DexCache>(),
+ handles,
+ true,
+ "test_prop");
}
// Relay method to merge type in reference type propagation.
@@ -68,8 +66,6 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
ReferenceTypePropagation* propagation_;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 59987e26b6..9c8b1df1f8 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -38,12 +38,26 @@ using Strategy = RegisterAllocator::Strategy;
// Note: the register allocator tests rely on the fact that constants have live
// intervals and registers get allocated to them.
-class RegisterAllocatorTest : public CommonCompilerTest {
+class RegisterAllocatorTest : public OptimizingUnitTest {
protected:
// These functions need to access private variables of LocationSummary, so we declare it
// as a member of RegisterAllocatorTest, which we make a friend class.
- static void SameAsFirstInputHint(Strategy strategy);
- static void ExpectedInRegisterHint(Strategy strategy);
+ void SameAsFirstInputHint(Strategy strategy);
+ void ExpectedInRegisterHint(Strategy strategy);
+
+ // Helper functions that make use of the OptimizingUnitTest's members.
+ bool Check(const uint16_t* data, Strategy strategy);
+ void CFG1(Strategy strategy);
+ void Loop1(Strategy strategy);
+ void Loop2(Strategy strategy);
+ void Loop3(Strategy strategy);
+ void DeadPhi(Strategy strategy);
+ HGraph* BuildIfElseWithPhi(HPhi** phi, HInstruction** input1, HInstruction** input2);
+ void PhiHint(Strategy strategy);
+ HGraph* BuildFieldReturn(HInstruction** field, HInstruction** ret);
+ HGraph* BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub);
+ HGraph* BuildDiv(HInstruction** div);
+ void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy);
};
// This macro should include all register allocation strategies that should be tested.
@@ -55,17 +69,15 @@ TEST_F(RegisterAllocatorTest, test_name##_GraphColor) {\
test_name(Strategy::kRegisterAllocatorGraphColor);\
}
-static bool Check(const uint16_t* data, Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+bool RegisterAllocatorTest::Check(const uint16_t* data, Strategy strategy) {
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
return register_allocator->Validate(false);
}
@@ -75,95 +87,93 @@ static bool Check(const uint16_t* data, Strategy strategy) {
* tests are based on this validation method.
*/
TEST_F(RegisterAllocatorTest, ValidateIntervals) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- ArenaVector<LiveInterval*> intervals(allocator.Adapter());
+ ArenaVector<LiveInterval*> intervals(GetAllocator()->Adapter());
// Test with two intervals of the same range.
{
static constexpr size_t ranges[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetAllocator(), 1));
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals[1]->SetRegister(0);
ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals.clear();
}
// Test with two non-intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals[1]->SetRegister(0);
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals.clear();
}
// Test with two non-intersecting intervals, with one with a lifetime hole.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals[1]->SetRegister(0);
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals.clear();
}
// Test with intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals[1]->SetRegister(0);
ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals.clear();
}
// Test with siblings.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
intervals[0]->SplitAt(43);
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals[1]->SetRegister(0);
// Sibling of the first interval has no register allocated to it.
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
intervals[0]->GetNextSibling()->SetRegister(0);
ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
}
}
-static void CFG1(Strategy strategy) {
+void RegisterAllocatorTest::CFG1(Strategy strategy) {
/*
* Test the following snippet:
* return 0;
@@ -185,7 +195,7 @@ static void CFG1(Strategy strategy) {
TEST_ALL_STRATEGIES(CFG1);
-static void Loop1(Strategy strategy) {
+void RegisterAllocatorTest::Loop1(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -226,7 +236,7 @@ static void Loop1(Strategy strategy) {
TEST_ALL_STRATEGIES(Loop1);
-static void Loop2(Strategy strategy) {
+void RegisterAllocatorTest::Loop2(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -277,7 +287,7 @@ static void Loop2(Strategy strategy) {
TEST_ALL_STRATEGIES(Loop2);
-static void Loop3(Strategy strategy) {
+void RegisterAllocatorTest::Loop3(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0
@@ -314,16 +324,14 @@ static void Loop3(Strategy strategy) {
Instruction::MOVE | 1 << 12 | 0 << 8,
Instruction::GOTO | 0xF900);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
@@ -351,9 +359,7 @@ TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8 | 1,
Instruction::RETURN_VOID);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -383,7 +389,7 @@ TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
}
-static void DeadPhi(Strategy strategy) {
+void RegisterAllocatorTest::DeadPhi(Strategy strategy) {
/* Test for a dead loop phi taking as back-edge input a phi that also has
* this loop phi as input. Walking backwards in SsaDeadPhiElimination
* does not solve the problem because the loop phi will be visited last.
@@ -405,9 +411,7 @@ static void DeadPhi(Strategy strategy) {
Instruction::GOTO | 0xFD00,
Instruction::RETURN_VOID);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
@@ -415,7 +419,7 @@ static void DeadPhi(Strategy strategy) {
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
}
@@ -433,16 +437,14 @@ TEST_F(RegisterAllocatorTest, FreeUntil) {
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetAllocator(), &codegen, liveness);
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
@@ -461,20 +463,21 @@ TEST_F(RegisterAllocatorTest, FreeUntil) {
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ LiveInterval* interval =
+ LiveInterval::MakeFixedInterval(GetAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(40, 50);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(20, 30);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(60, 70);
register_allocator.inactive_.push_back(interval);
register_allocator.number_of_registers_ = 1;
- register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+ register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1);
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = &register_allocator.unhandled_core_intervals_;
@@ -487,36 +490,35 @@ TEST_F(RegisterAllocatorTest, FreeUntil) {
ASSERT_EQ(20u, register_allocator.unhandled_->front()->GetStart());
}
-static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
- HPhi** phi,
- HInstruction** input1,
- HInstruction** input2) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildIfElseWithPhi(HPhi** phi,
+ HInstruction** input1,
+ HInstruction** input2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- HInstruction* test = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(22),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ HInstruction* test = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(22),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
block->AddInstruction(test);
- block->AddInstruction(new (allocator) HIf(test));
- HBasicBlock* then = new (allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (allocator) HBasicBlock(graph);
- HBasicBlock* join = new (allocator) HBasicBlock(graph);
+ block->AddInstruction(new (GetAllocator()) HIf(test));
+ HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(then);
graph->AddBlock(else_);
graph->AddBlock(join);
@@ -525,32 +527,32 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
block->AddSuccessor(else_);
then->AddSuccessor(join);
else_->AddSuccessor(join);
- then->AddInstruction(new (allocator) HGoto());
- else_->AddInstruction(new (allocator) HGoto());
+ then->AddInstruction(new (GetAllocator()) HGoto());
+ else_->AddInstruction(new (GetAllocator()) HGoto());
- *phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32);
+ *phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
join->AddPhi(*phi);
- *input1 = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
- *input2 = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ *input1 = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
+ *input2 = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
- join->AddInstruction(new (allocator) HExit());
+ join->AddInstruction(new (GetAllocator()) HExit());
(*phi)->AddInput(*input1);
(*phi)->AddInput(*input2);
@@ -559,14 +561,12 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
return graph;
}
-static void PhiHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+void RegisterAllocatorTest::PhiHint(Strategy strategy) {
HPhi *phi;
HInstruction *input1, *input2;
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -575,7 +575,7 @@ static void PhiHint(Strategy strategy) {
// Check that the register allocator is deterministic.
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0);
@@ -584,7 +584,7 @@ static void PhiHint(Strategy strategy) {
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -595,7 +595,7 @@ static void PhiHint(Strategy strategy) {
// the same register.
phi->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -604,7 +604,7 @@ static void PhiHint(Strategy strategy) {
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -615,7 +615,7 @@ static void PhiHint(Strategy strategy) {
// the same register.
input1->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -624,7 +624,7 @@ static void PhiHint(Strategy strategy) {
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -635,7 +635,7 @@ static void PhiHint(Strategy strategy) {
// the same register.
input2->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -650,50 +650,46 @@ TEST_F(RegisterAllocatorTest, PhiHint_LinearScan) {
PhiHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
- HInstruction** field,
- HInstruction** ret) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildFieldReturn(HInstruction** field, HInstruction** ret) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *field = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ *field = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
block->AddInstruction(*field);
- *ret = new (allocator) HReturn(*field);
+ *ret = new (GetAllocator()) HReturn(*field);
block->AddInstruction(*ret);
- HBasicBlock* exit = new (allocator) HBasicBlock(graph);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (allocator) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
HInstruction *field, *ret;
{
- HGraph* graph = BuildFieldReturn(&allocator, &field, &ret);
+ HGraph* graph = BuildFieldReturn(&field, &ret);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -701,7 +697,7 @@ void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
@@ -709,7 +705,7 @@ void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
}
{
- HGraph* graph = BuildFieldReturn(&allocator, &field, &ret);
+ HGraph* graph = BuildFieldReturn(&field, &ret);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -721,7 +717,7 @@ void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2);
@@ -734,42 +730,38 @@ TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint_LinearScan) {
ExpectedInRegisterHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
- HInstruction** first_sub,
- HInstruction** second_sub) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(parameter);
HInstruction* constant1 = graph->GetIntConstant(1);
HInstruction* constant2 = graph->GetIntConstant(2);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *first_sub = new (allocator) HSub(DataType::Type::kInt32, parameter, constant1);
+ *first_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, parameter, constant1);
block->AddInstruction(*first_sub);
- *second_sub = new (allocator) HSub(DataType::Type::kInt32, *first_sub, constant2);
+ *second_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, *first_sub, constant2);
block->AddInstruction(*second_sub);
- block->AddInstruction(new (allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
HInstruction *first_sub, *second_sub;
{
- HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
+ HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -777,7 +769,7 @@ void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the registers are the same.
@@ -786,7 +778,7 @@ void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
}
{
- HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
+ HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -800,7 +792,7 @@ void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
@@ -814,53 +806,47 @@ TEST_F(RegisterAllocatorTest, SameAsFirstInputHint_LinearScan) {
SameAsFirstInputHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildDiv(ArenaAllocator* allocator,
- HInstruction** div) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildDiv(HInstruction** div) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* first = new (allocator) HParameterValue(
+ HInstruction* first = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* second = new (allocator) HParameterValue(
+ HInstruction* second = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(first);
entry->AddInstruction(second);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *div =
- new (allocator) HDiv(DataType::Type::kInt32, first, second, 0); // don't care about dex_pc.
+ *div = new (GetAllocator()) HDiv(
+ DataType::Type::kInt32, first, second, 0); // don't care about dex_pc.
block->AddInstruction(*div);
- block->AddInstruction(new (allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
-static void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+void RegisterAllocatorTest::ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
HInstruction *div;
+ HGraph* graph = BuildDiv(&div);
+ std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+ X86InstructionSetFeatures::FromCppDefines());
+ x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ SsaLivenessAnalysis liveness(graph, &codegen);
+ liveness.Analyze();
- {
- HGraph* graph = BuildDiv(&allocator, &div);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
- liveness.Analyze();
-
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
- register_allocator->AllocateRegisters();
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ register_allocator->AllocateRegisters();
- // div on x86 requires its first input in eax and the output be the same as the first input.
- ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
- }
+ // div on x86 requires its first input in eax and the output be the same as the first input.
+ ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
}
// TODO: Enable this test for graph coloring register allocation when iterative move
@@ -874,59 +860,57 @@ TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint_LinearSca
// position.
// This test only applies to the linear scan allocator.
TEST_F(RegisterAllocatorTest, SpillInactive) {
- ArenaPool pool;
-
// Create a synthesized graph to please the register_allocator and
// ssa_liveness_analysis code.
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* one = new (&allocator) HParameterValue(
+ HInstruction* one = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* two = new (&allocator) HParameterValue(
+ HInstruction* two = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* three = new (&allocator) HParameterValue(
+ HInstruction* three = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* four = new (&allocator) HParameterValue(
+ HInstruction* four = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(one);
entry->AddInstruction(two);
entry->AddInstruction(three);
entry->AddInstruction(four);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
// We create a synthesized user requesting a register, to avoid just spilling the
// intervals.
- HPhi* user = new (&allocator) HPhi(&allocator, 0, 1, DataType::Type::kInt32);
+ HPhi* user = new (GetAllocator()) HPhi(GetAllocator(), 0, 1, DataType::Type::kInt32);
user->AddInput(one);
user->SetBlock(block);
- LocationSummary* locations = new (&allocator) LocationSummary(user, LocationSummary::kNoCall);
+ LocationSummary* locations = new (GetAllocator()) LocationSummary(user, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
static constexpr size_t phi_ranges[][2] = {{20, 30}};
- BuildInterval(phi_ranges, arraysize(phi_ranges), &allocator, -1, user);
+ BuildInterval(phi_ranges, arraysize(phi_ranges), GetAllocator(), -1, user);
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
- LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), &allocator, -1, one);
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 7));
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 6));
+ LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), -1, one);
+ first->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 8));
+ first->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 7));
+ first->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 6));
- locations = new (&allocator) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
+ locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
first = first->SplitAt(1);
// Create an interval that conflicts with the next interval, to force the next
// interval to call `AllocateBlockedReg`.
static constexpr size_t ranges2[][2] = {{2, 4}};
- LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), &allocator, -1, two);
- locations = new (&allocator) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), -1, two);
+ locations =
+ new (GetAllocator()) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
// Create an interval that will lead to splitting the first interval. The bug occured
@@ -935,19 +919,20 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// "[0, 2(, [4, 6(" in the list of handled intervals, even though we haven't processed intervals
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
- LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), &allocator, -1, three);
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 4));
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 3));
- locations = new (&allocator) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetAllocator(), -1, three);
+ third->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 8));
+ third->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 4));
+ third->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 3));
+ locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
// Because the first part of the split interval was considered handled, this interval
// was free to allocate the same register, even though it conflicts with it.
static constexpr size_t ranges4[][2] = {{4, 6}};
- LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), &allocator, -1, four);
- locations = new (&allocator) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), GetAllocator(), -1, four);
+ locations =
+ new (GetAllocator()) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
@@ -959,7 +944,7 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
liveness.instructions_from_lifetime_position_.push_back(user);
}
- RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetAllocator(), &codegen, liveness);
register_allocator.unhandled_core_intervals_.push_back(fourth);
register_allocator.unhandled_core_intervals_.push_back(third);
register_allocator.unhandled_core_intervals_.push_back(second);
@@ -967,19 +952,19 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// Set just one register available to make all intervals compete for the same.
register_allocator.number_of_registers_ = 1;
- register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+ register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1);
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = &register_allocator.unhandled_core_intervals_;
register_allocator.LinearScan();
// Test that there is no conflicts between intervals.
- ArenaVector<LiveInterval*> intervals(allocator.Adapter());
+ ArenaVector<LiveInterval*> intervals(GetAllocator()->Adapter());
intervals.push_back(first);
intervals.push_back(second);
intervals.push_back(third);
intervals.push_back(fourth);
ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals, 0, 0, codegen, GetAllocator(), true, false));
}
} // namespace art
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 5212e866cf..9acf6d2906 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -18,6 +18,8 @@
#include "scheduler.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "data_type-inl.h"
#include "prepare_for_register_allocation.h"
@@ -442,7 +444,7 @@ static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) {
}
void SchedulingGraph::DumpAsDotGraph(const std::string& description,
- const ArenaVector<SchedulingNode*>& initial_candidates) {
+ const ScopedArenaVector<SchedulingNode*>& initial_candidates) {
// TODO(xueliang): ideally we should move scheduling information into HInstruction, after that
// we should move this dotty graph dump feature to visualizer, and have a compiler option for it.
std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app);
@@ -451,7 +453,7 @@ void SchedulingGraph::DumpAsDotGraph(const std::string& description,
// Start the dot graph. Use an increasing index for easier differentiation.
output << "digraph G {\n";
for (const auto& entry : nodes_map_) {
- SchedulingNode* node = entry.second;
+ SchedulingNode* node = entry.second.get();
DumpAsDotNode(output, node);
}
// Create a fake 'end_of_scheduling' node to help visualization of critical_paths.
@@ -466,7 +468,7 @@ void SchedulingGraph::DumpAsDotGraph(const std::string& description,
}
SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
- ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
+ ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
// Schedule condition inputs that can be materialized immediately before their use.
// In following example, after we've scheduled HSelect, we want LessThan to be scheduled
// immediately, because it is a materialized condition, and will be emitted right before HSelect
@@ -506,7 +508,7 @@ SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
}
SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode(
- ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
+ ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
DCHECK(!nodes->empty());
SchedulingNode* select_node = nullptr;
@@ -562,7 +564,7 @@ void HScheduler::Schedule(HGraph* graph) {
}
void HScheduler::Schedule(HBasicBlock* block) {
- ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
// Build the scheduling graph.
scheduling_graph_.Clear();
@@ -593,7 +595,7 @@ void HScheduler::Schedule(HBasicBlock* block) {
}
}
- ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
if (kDumpDotSchedulingGraphs) {
// Remember the list of initial candidates for debug output purposes.
initial_candidates.assign(candidates_.begin(), candidates_.end());
@@ -779,7 +781,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
// Phase-local allocator that allocates scheduler internal data structures like
// scheduling nodes, internel nodes map, dependencies, etc.
- ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool());
+ ScopedArenaAllocator arena_allocator(graph_->GetArenaStack());
CriticalPathSchedulingNodeSelector critical_path_selector;
RandomSchedulingNodeSelector random_selector;
SchedulingNodeSelector* selector = schedule_randomly
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 66ffac5b7d..493ec0b07b 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -19,6 +19,8 @@
#include <fstream>
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/time_utils.h"
#include "code_generator.h"
#include "driver/compiler_driver.h"
@@ -152,9 +154,9 @@ class HScheduler;
/**
* A node representing an `HInstruction` in the `SchedulingGraph`.
*/
-class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
+class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
public:
- SchedulingNode(HInstruction* instr, ArenaAllocator* arena, bool is_scheduling_barrier)
+ SchedulingNode(HInstruction* instr, ScopedArenaAllocator* arena, bool is_scheduling_barrier)
: latency_(0),
internal_latency_(0),
critical_path_(0),
@@ -171,11 +173,19 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
predecessor->num_unscheduled_successors_++;
}
+ const ScopedArenaVector<SchedulingNode*>& GetDataPredecessors() const {
+ return data_predecessors_;
+ }
+
void AddOtherPredecessor(SchedulingNode* predecessor) {
other_predecessors_.push_back(predecessor);
predecessor->num_unscheduled_successors_++;
}
+ const ScopedArenaVector<SchedulingNode*>& GetOtherPredecessors() const {
+ return other_predecessors_;
+ }
+
void DecrementNumberOfUnscheduledSuccessors() {
num_unscheduled_successors_--;
}
@@ -195,8 +205,6 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
void SetInternalLatency(uint32_t internal_latency) { internal_latency_ = internal_latency; }
uint32_t GetCriticalPath() const { return critical_path_; }
bool IsSchedulingBarrier() const { return is_scheduling_barrier_; }
- const ArenaVector<SchedulingNode*>& GetDataPredecessors() const { return data_predecessors_; }
- const ArenaVector<SchedulingNode*>& GetOtherPredecessors() const { return other_predecessors_; }
private:
// The latency of this node. It represents the latency between the moment the
@@ -227,8 +235,8 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
// Predecessors in `data_predecessors_` are data dependencies. Those in
// `other_predecessors_` contain side-effect dependencies, environment
// dependencies, and scheduling barrier dependencies.
- ArenaVector<SchedulingNode*> data_predecessors_;
- ArenaVector<SchedulingNode*> other_predecessors_;
+ ScopedArenaVector<SchedulingNode*> data_predecessors_;
+ ScopedArenaVector<SchedulingNode*> other_predecessors_;
// The number of unscheduled successors for this node. This number is
// decremented as successors are scheduled. When it reaches zero this node
@@ -243,7 +251,7 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
*/
class SchedulingGraph : public ValueObject {
public:
- SchedulingGraph(const HScheduler* scheduler, ArenaAllocator* arena)
+ SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* arena)
: scheduler_(scheduler),
arena_(arena),
contains_scheduling_barrier_(false),
@@ -251,11 +259,13 @@ class SchedulingGraph : public ValueObject {
heap_location_collector_(nullptr) {}
SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
- SchedulingNode* node = new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier);
- nodes_map_.Insert(std::make_pair(instr, node));
+ std::unique_ptr<SchedulingNode> node(
+ new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier));
+ SchedulingNode* result = node.get();
+ nodes_map_.Insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
AddDependencies(instr, is_scheduling_barrier);
- return node;
+ return result;
}
void Clear() {
@@ -272,7 +282,7 @@ class SchedulingGraph : public ValueObject {
if (it == nodes_map_.end()) {
return nullptr;
} else {
- return it->second;
+ return it->second.get();
}
}
@@ -290,7 +300,7 @@ class SchedulingGraph : public ValueObject {
// Dump the scheduling graph, in dot file format, appending it to the file
// `scheduling_graphs.dot`.
void DumpAsDotGraph(const std::string& description,
- const ArenaVector<SchedulingNode*>& initial_candidates);
+ const ScopedArenaVector<SchedulingNode*>& initial_candidates);
protected:
void AddDependency(SchedulingNode* node, SchedulingNode* dependency, bool is_data_dependency);
@@ -313,11 +323,11 @@ class SchedulingGraph : public ValueObject {
const HScheduler* const scheduler_;
- ArenaAllocator* const arena_;
+ ScopedArenaAllocator* const arena_;
bool contains_scheduling_barrier_;
- ArenaHashMap<const HInstruction*, SchedulingNode*> nodes_map_;
+ ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_;
const HeapLocationCollector* heap_location_collector_;
};
@@ -367,11 +377,11 @@ class SchedulingLatencyVisitor : public HGraphDelegateVisitor {
class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> {
public:
- virtual SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ virtual SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) = 0;
virtual ~SchedulingNodeSelector() {}
protected:
- static void DeleteNodeAtIndex(ArenaVector<SchedulingNode*>* nodes, size_t index) {
+ static void DeleteNodeAtIndex(ScopedArenaVector<SchedulingNode*>* nodes, size_t index) {
(*nodes)[index] = nodes->back();
nodes->pop_back();
}
@@ -387,7 +397,7 @@ class RandomSchedulingNodeSelector : public SchedulingNodeSelector {
srand(seed_);
}
- SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) OVERRIDE {
UNUSED(graph);
DCHECK(!nodes->empty());
@@ -408,15 +418,15 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
public:
CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
- SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) OVERRIDE;
protected:
SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
SchedulingNode* check) const;
- SchedulingNode* SelectMaterializedCondition(ArenaVector<SchedulingNode*>* nodes,
- const SchedulingGraph& graph) const;
+ SchedulingNode* SelectMaterializedCondition(ScopedArenaVector<SchedulingNode*>* nodes,
+ const SchedulingGraph& graph) const;
private:
const SchedulingNode* prev_select_;
@@ -424,7 +434,7 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
class HScheduler {
public:
- HScheduler(ArenaAllocator* arena,
+ HScheduler(ScopedArenaAllocator* arena,
SchedulingLatencyVisitor* latency_visitor,
SchedulingNodeSelector* selector)
: arena_(arena),
@@ -461,7 +471,7 @@ class HScheduler {
node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
}
- ArenaAllocator* const arena_;
+ ScopedArenaAllocator* const arena_;
SchedulingLatencyVisitor* const latency_visitor_;
SchedulingNodeSelector* const selector_;
bool only_optimize_loop_blocks_;
@@ -473,7 +483,7 @@ class HScheduler {
HInstruction* cursor_;
// The list of candidates for scheduling. A node becomes a candidate when all
// its predecessors have been scheduled.
- ArenaVector<SchedulingNode*> candidates_;
+ ScopedArenaVector<SchedulingNode*> candidates_;
private:
DISALLOW_COPY_AND_ASSIGN(HScheduler);
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index fe274d29f9..62cd75cf59 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -137,7 +137,7 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor {
class HSchedulerARM : public HScheduler {
public:
- HSchedulerARM(ArenaAllocator* arena,
+ HSchedulerARM(ScopedArenaAllocator* arena,
SchedulingNodeSelector* selector,
SchedulingLatencyVisitorARM* arm_latency_visitor)
: HScheduler(arena, arm_latency_visitor, selector) {}
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index e1a80ec6fb..6682c66ea6 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -131,7 +131,7 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
class HSchedulerARM64 : public HScheduler {
public:
- HSchedulerARM64(ArenaAllocator* arena, SchedulingNodeSelector* selector)
+ HSchedulerARM64(ScopedArenaAllocator* arena, SchedulingNodeSelector* selector)
: HScheduler(arena, &arm64_latency_visitor_, selector) {}
~HSchedulerARM64() OVERRIDE {}
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 0e6e0c5a3d..7e1ec70789 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -71,16 +71,14 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
return v;
}
-class SchedulerTest : public CommonCompilerTest {
+class SchedulerTest : public OptimizingUnitTest {
public:
- SchedulerTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ SchedulerTest() : graph_(CreateGraph()) { }
// Build scheduling graph, and run target specific scheduling on it.
void TestBuildDependencyGraphAndSchedule(HScheduler* scheduler) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->AddBlock(block1);
graph_->SetEntryBlock(entry);
@@ -100,23 +98,25 @@ class SchedulerTest : public CommonCompilerTest {
// array_get2 ArrayGet [array, add1]
// array_set2 ArraySet [array, add1, add2]
- HInstruction* array = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* array = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(10);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, c1, c2);
- HInstruction* add2 = new (&allocator_) HAdd(DataType::Type::kInt32, add1, c2);
- HInstruction* mul = new (&allocator_) HMul(DataType::Type::kInt32, add1, add2);
- HInstruction* div_check = new (&allocator_) HDivZeroCheck(add2, 0);
- HInstruction* div = new (&allocator_) HDiv(DataType::Type::kInt32, add1, div_check, 0);
- HInstruction* array_get1 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, c1, c2);
+ HInstruction* add2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, c2);
+ HInstruction* mul = new (GetAllocator()) HMul(DataType::Type::kInt32, add1, add2);
+ HInstruction* div_check = new (GetAllocator()) HDivZeroCheck(add2, 0);
+ HInstruction* div = new (GetAllocator()) HDiv(DataType::Type::kInt32, add1, div_check, 0);
+ HInstruction* array_get1 =
+ new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0);
HInstruction* array_set1 =
- new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
- HInstruction* array_get2 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
+ HInstruction* array_get2 =
+ new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0);
HInstruction* array_set2 =
- new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
DCHECK(div_check->CanThrow());
@@ -135,18 +135,19 @@ class SchedulerTest : public CommonCompilerTest {
block1->AddInstruction(instr);
}
- HEnvironment* environment = new (&allocator_) HEnvironment(&allocator_,
- 2,
- graph_->GetArtMethod(),
- 0,
- div_check);
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(GetAllocator(),
+ 2,
+ graph_->GetArtMethod(),
+ 0,
+ div_check);
div_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, add2);
add2->AddEnvUseAt(div_check->GetEnvironment(), 0);
environment->SetRawEnvAt(1, mul);
mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
- SchedulingGraph scheduling_graph(scheduler, graph_->GetArena());
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
+ SchedulingGraph scheduling_graph(scheduler, &allocator);
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
@@ -184,7 +185,7 @@ class SchedulerTest : public CommonCompilerTest {
void CompileWithRandomSchedulerAndRun(const uint16_t* data, bool has_result, int expected) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
- HGraph* graph = CreateCFG(&allocator_, data);
+ HGraph* graph = CreateCFG(data);
// Schedule the graph randomly.
HInstructionScheduling scheduling(graph, target_config.GetInstructionSet());
@@ -198,55 +199,57 @@ class SchedulerTest : public CommonCompilerTest {
}
void TestDependencyGraphOnAliasingArrayAccesses(HScheduler* scheduler) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* arr = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
- HInstruction* i = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 1,
- DataType::Type::kInt32);
- HInstruction* j = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 1,
- DataType::Type::kInt32);
- HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(),
+ HInstruction* arr = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
dex::TypeIndex(0),
0,
DataType::Type::kReference);
+ HInstruction* i = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 1,
+ DataType::Type::kInt32);
+ HInstruction* j = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 1,
+ DataType::Type::kInt32);
+ HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
HInstruction* c0 = graph_->GetIntConstant(0);
HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c0);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c1);
- HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, i, c0);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, i, c1);
- HInstruction* arr_set_0 = new (&allocator_) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_1 = new (&allocator_) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_i = new (&allocator_) HArraySet(arr, i, c0, DataType::Type::kInt32, 0);
+ HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c1);
+ HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c0);
+ HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c1);
+ HInstruction* arr_set_0 =
+ new (GetAllocator()) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_1 =
+ new (GetAllocator()) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_i = new (GetAllocator()) HArraySet(arr, i, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_add0 =
- new (&allocator_) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_add1 =
- new (&allocator_) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_sub0 =
- new (&allocator_) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_sub1 =
- new (&allocator_) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_j = new (&allocator_) HArraySet(arr, j, c0, DataType::Type::kInt32, 0);
- HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ new (GetAllocator()) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_j = new (GetAllocator()) HArraySet(arr, j, c0, DataType::Type::kInt32, 0);
+ HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object,
+ c1,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
HInstruction* block_instructions[] = {arr,
i,
@@ -270,7 +273,8 @@ class SchedulerTest : public CommonCompilerTest {
entry->AddInstruction(instr);
}
- SchedulingGraph scheduling_graph(scheduler, graph_->GetArena());
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
+ SchedulingGraph scheduling_graph(scheduler, &allocator);
HeapLocationCollector heap_location_collector(graph_);
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
@@ -342,21 +346,21 @@ class SchedulerTest : public CommonCompilerTest {
scheduler->Schedule(graph_);
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
#if defined(ART_ENABLE_CODEGEN_arm64)
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector);
+ ScopedArenaAllocator allocator(GetArenaStack());
+ arm64::HSchedulerARM64 scheduler(&allocator, &critical_path_selector);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector);
+ ScopedArenaAllocator allocator(GetArenaStack());
+ arm64::HSchedulerARM64 scheduler(&allocator, &critical_path_selector);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
@@ -365,14 +369,16 @@ TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor);
+ ScopedArenaAllocator allocator(GetArenaStack());
+ arm::HSchedulerARM scheduler(&allocator, &critical_path_selector, &arm_latency_visitor);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor);
+ ScopedArenaAllocator allocator(GetArenaStack());
+ arm::HSchedulerARM scheduler(&allocator, &critical_path_selector, &arm_latency_visitor);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 827b5913af..0e46aeca37 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -135,10 +135,10 @@ void HSelectGenerator::Run() {
DCHECK(both_successors_return || phi != nullptr);
// Create the Select instruction and insert it in front of the If.
- HSelect* select = new (graph_->GetArena()) HSelect(if_instruction->InputAt(0),
- true_value,
- false_value,
- if_instruction->GetDexPc());
+ HSelect* select = new (graph_->GetAllocator()) HSelect(if_instruction->InputAt(0),
+ true_value,
+ false_value,
+ if_instruction->GetDexPc());
if (both_successors_return) {
if (true_value->GetType() == DataType::Type::kReference) {
DCHECK(false_value->GetType() == DataType::Type::kReference);
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index fea47e66d9..cf00e48e24 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -29,9 +29,9 @@ class SideEffectsAnalysis : public HOptimization {
: HOptimization(graph, pass_name),
graph_(graph),
block_effects_(graph->GetBlocks().size(),
- graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)),
loop_effects_(graph->GetBlocks().size(),
- graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)) {}
+ graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)) {}
SideEffects GetLoopEffects(HBasicBlock* block) const;
SideEffects GetBlockEffects(HBasicBlock* block) const;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 23563168a0..f4a8a17131 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -233,7 +233,7 @@ bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) {
}
void SsaBuilder::RunPrimitiveTypePropagation() {
- ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
if (block->IsLoopHeader()) {
@@ -293,7 +293,7 @@ static HArrayGet* CreateFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) {
DCHECK(DataType::IsIntOrLongType(type));
DCHECK(FindFloatOrDoubleEquivalentOfArrayGet(aget) == nullptr);
- HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetArena()) HArrayGet(
+ HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetAllocator()) HArrayGet(
aget->GetArray(),
aget->GetIndex(),
type == DataType::Type::kInt32 ? DataType::Type::kFloat32 : DataType::Type::kFloat64,
@@ -319,7 +319,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() {
// uses (because they are untyped) and environment uses (if --debuggable).
// After resolving all ambiguous ArrayGets, we will re-run primitive type
// propagation on the Phis which need to be updated.
- ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
{
ScopedObjectAccess soa(Thread::Current());
@@ -566,7 +566,7 @@ HFloatConstant* SsaBuilder::GetFloatEquivalent(HIntConstant* constant) {
HFloatConstant* result = constant->GetNext()->AsFloatConstant();
if (result == nullptr) {
float value = bit_cast<float, int32_t>(constant->GetValue());
- result = new (graph_->GetArena()) HFloatConstant(value);
+ result = new (graph_->GetAllocator()) HFloatConstant(value);
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
graph_->CacheFloatConstant(result);
} else {
@@ -588,7 +588,7 @@ HDoubleConstant* SsaBuilder::GetDoubleEquivalent(HLongConstant* constant) {
HDoubleConstant* result = constant->GetNext()->AsDoubleConstant();
if (result == nullptr) {
double value = bit_cast<double, int64_t>(constant->GetValue());
- result = new (graph_->GetArena()) HDoubleConstant(value);
+ result = new (graph_->GetAllocator()) HDoubleConstant(value);
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
graph_->CacheDoubleConstant(result);
} else {
@@ -621,7 +621,7 @@ HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, DataType::
if (next == nullptr
|| (next->AsPhi()->GetRegNumber() != phi->GetRegNumber())
|| (next->GetType() != type)) {
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
HInputsRef inputs = phi->GetInputs();
HPhi* new_phi =
new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 1819ee568e..509cdc1252 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -56,9 +56,9 @@ class SsaBuilder : public ValueObject {
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
- ambiguous_agets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
- ambiguous_asets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
- uninitialized_strings_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) {
+ ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
+ ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
+ uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) {
graph_->InitializeInexactObjectRTI(handles);
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index f1f1be25d7..fd56601b36 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -26,7 +26,7 @@ namespace art {
void SsaLivenessAnalysis::Analyze() {
// Compute the linear order directly in the graph's data structure
// (there are no more following graph mutations).
- LinearizeGraph(graph_, graph_->GetArena(), &graph_->linear_order_);
+ LinearizeGraph(graph_, &graph_->linear_order_);
// Liveness analysis.
NumberInstructions();
@@ -56,7 +56,7 @@ void SsaLivenessAnalysis::NumberInstructions() {
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_->GetAllocator(), current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -74,7 +74,7 @@ void SsaLivenessAnalysis::NumberInstructions() {
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_->GetAllocator(), current->GetType(), current));
}
instructions_from_lifetime_position_.push_back(current);
current->SetLifetimePosition(lifetime_position);
@@ -88,8 +88,8 @@ void SsaLivenessAnalysis::NumberInstructions() {
void SsaLivenessAnalysis::ComputeLiveness() {
for (HBasicBlock* block : graph_->GetLinearOrder()) {
- block_infos_[block->GetBlockId()] =
- new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_);
+ block_infos_[block->GetBlockId()] = new (graph_->GetAllocator()) BlockInfo(
+ graph_->GetAllocator(), *block, number_of_ssa_values_);
}
// Compute the live ranges, as well as the initial live_in, live_out, and kill sets.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index ec4ab31d61..0d81e9dfe7 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1163,9 +1163,10 @@ class SsaLivenessAnalysis : public ValueObject {
codegen_(codegen),
block_infos_(graph->GetBlocks().size(),
nullptr,
- graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_ssa_index_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_lifetime_position_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
+ graph->GetAllocator()->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_ssa_index_(graph->GetAllocator()->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_lifetime_position_(
+ graph->GetAllocator()->Adapter(kArenaAllocSsaLiveness)),
number_of_ssa_values_(0) {
}
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index e89bf6d801..82ee441aa8 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -27,12 +27,10 @@
namespace art {
-class SsaLivenessAnalysisTest : public testing::Test {
+class SsaLivenessAnalysisTest : public OptimizingUnitTest {
public:
SsaLivenessAnalysisTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
+ : graph_(CreateGraph()),
compiler_options_(),
instruction_set_(kRuntimeISA) {
std::string error_msg;
@@ -44,7 +42,7 @@ class SsaLivenessAnalysisTest : public testing::Test {
compiler_options_);
CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture.";
// Create entry block.
- entry_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
graph_->SetEntryBlock(entry_);
}
@@ -52,14 +50,12 @@ class SsaLivenessAnalysisTest : public testing::Test {
protected:
HBasicBlock* CreateSuccessor(HBasicBlock* block) {
HGraph* graph = block->GetGraph();
- HBasicBlock* successor = new (&allocator_) HBasicBlock(graph);
+ HBasicBlock* successor = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(successor);
block->AddSuccessor(successor);
return successor;
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
CompilerOptions compiler_options_;
InstructionSet instruction_set_;
@@ -69,14 +65,14 @@ class SsaLivenessAnalysisTest : public testing::Test {
};
TEST_F(SsaLivenessAnalysisTest, TestReturnArg) {
- HInstruction* arg = new (&allocator_) HParameterValue(
+ HInstruction* arg = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry_->AddInstruction(arg);
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* ret = new (&allocator_) HReturn(arg);
+ HInstruction* ret = new (GetAllocator()) HReturn(arg);
block->AddInstruction(ret);
- block->AddInstruction(new (&allocator_) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph_->BuildDominatorTree();
SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
@@ -89,45 +85,45 @@ TEST_F(SsaLivenessAnalysisTest, TestReturnArg) {
}
TEST_F(SsaLivenessAnalysisTest, TestAput) {
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
- HInstruction* value = new (&allocator_) HParameterValue(
+ HInstruction* value = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32);
- HInstruction* extra_arg1 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
- HInstruction* extra_arg2 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- allocator_.Adapter());
+ GetAllocator()->Adapter());
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* null_check = new (&allocator_) HNullCheck(array, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
- HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- null_check);
+ HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ null_check);
null_check_env->CopyFrom(args);
null_check->SetRawEnvironment(null_check_env);
- HInstruction* length = new (&allocator_) HArrayLength(array, 0);
+ HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
- HInstruction* bounds_check = new (&allocator_) HBoundsCheck(index, length, /* dex_pc */ 0u);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
block->AddInstruction(bounds_check);
- HEnvironment* bounds_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- bounds_check);
+ HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ bounds_check);
bounds_check_env->CopyFrom(args);
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
- new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
@@ -159,49 +155,49 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) {
}
TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
- HInstruction* value = new (&allocator_) HParameterValue(
+ HInstruction* value = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32);
- HInstruction* extra_arg1 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
- HInstruction* extra_arg2 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- allocator_.Adapter());
+ GetAllocator()->Adapter());
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* null_check = new (&allocator_) HNullCheck(array, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
- HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- null_check);
+ HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ null_check);
null_check_env->CopyFrom(args);
null_check->SetRawEnvironment(null_check_env);
- HInstruction* length = new (&allocator_) HArrayLength(array, 0);
+ HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
// Use HAboveOrEqual+HDeoptimize as the bounds check.
- HInstruction* ae = new (&allocator_) HAboveOrEqual(index, length);
+ HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
block->AddInstruction(ae);
- HInstruction* deoptimize =
- new(&allocator_) HDeoptimize(&allocator_, ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+ HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
+ GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
block->AddInstruction(deoptimize);
- HEnvironment* deoptimize_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- deoptimize);
+ HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ deoptimize);
deoptimize_env->CopyFrom(args);
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
- new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index b4f8408a76..3b95b86268 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -31,7 +31,7 @@ void SsaDeadPhiElimination::MarkDeadPhis() {
// Phis are constructed live and should not be revived if previously marked
// dead. This algorithm temporarily breaks that invariant but we DCHECK that
// only phis which were initially live are revived.
- ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
+ ArenaSet<HPhi*> initially_live(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
// Add to the worklist phis referenced by non-phi instructions.
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -123,11 +123,11 @@ void SsaRedundantPhiElimination::Run() {
}
}
- ArenaBitVector visited_phis_in_cycle(graph_->GetArena(),
+ ArenaBitVector visited_phis_in_cycle(graph_->GetAllocator(),
graph_->GetCurrentInstructionId(),
/* expandable */ false,
kArenaAllocSsaPhiElimination);
- ArenaVector<HPhi*> cycle_worklist(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
+ ArenaVector<HPhi*> cycle_worklist(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
while (!worklist_.empty()) {
HPhi* phi = worklist_.back();
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index b48e8200d5..e0cde074d6 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@ class SsaDeadPhiElimination : public HOptimization {
public:
explicit SsaDeadPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaDeadPhiEliminationPassName),
- worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
worklist_.reserve(kDefaultWorklistSize);
}
@@ -60,7 +60,7 @@ class SsaRedundantPhiElimination : public HOptimization {
public:
explicit SsaRedundantPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaRedundantPhiEliminationPassName),
- worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
worklist_.reserve(kDefaultWorklistSize);
}
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index ac998dbcab..e08904e84b 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -29,7 +29,10 @@
namespace art {
-class SsaTest : public CommonCompilerTest {};
+class SsaTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
class SsaPrettyPrinter : public HPrettyPrinter {
public:
@@ -77,10 +80,8 @@ static void ReNumberInstructions(HGraph* graph) {
}
}
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void SsaTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index 15cd4e8a08..88336b0009 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -28,10 +28,13 @@ namespace art {
* Check that the HGraphBuilder adds suspend checks to backward branches.
*/
-static void TestCode(const uint16_t* data) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+class SuspendCheckTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data);
+};
+
+void SuspendCheckTest::TestCode(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
HBasicBlock* first_block = graph->GetEntryBlock()->GetSingleSuccessor();
HBasicBlock* loop_header = first_block->GetSingleSuccessor();
ASSERT_TRUE(loop_header->IsLoopHeader());
@@ -39,8 +42,6 @@ static void TestCode(const uint16_t* data) {
ASSERT_TRUE(loop_header->GetFirstInstruction()->IsSuspendCheck());
}
-class SuspendCheckTest : public CommonCompilerTest {};
-
TEST_F(SuspendCheckTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,