summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc3
-rw-r--r--compiler/optimizing/code_generator.cc3
-rw-r--r--compiler/optimizing/dead_code_elimination.cc2
-rw-r--r--compiler/optimizing/graph_checker.cc67
-rw-r--r--compiler/optimizing/graph_checker.h6
-rw-r--r--compiler/optimizing/gvn.cc4
-rw-r--r--compiler/optimizing/induction_var_analysis.cc176
-rw-r--r--compiler/optimizing/induction_var_analysis.h32
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc175
-rw-r--r--compiler/optimizing/induction_var_range.cc111
-rw-r--r--compiler/optimizing/induction_var_range_test.cc17
-rw-r--r--compiler/optimizing/instruction_simplifier.cc5
-rw-r--r--compiler/optimizing/intrinsics_mips.cc417
-rw-r--r--compiler/optimizing/licm.cc2
-rw-r--r--compiler/optimizing/load_store_elimination.cc5
-rw-r--r--compiler/optimizing/locations.cc2
-rw-r--r--compiler/optimizing/nodes.cc4
-rw-r--r--compiler/optimizing/nodes.h6
-rw-r--r--compiler/optimizing/optimizing_compiler.cc23
-rw-r--r--compiler/optimizing/optimizing_unit_test.h1
-rw-r--r--compiler/optimizing/register_allocator.cc14
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h6
-rw-r--r--compiler/optimizing/stack_map_stream.cc6
23 files changed, 902 insertions, 185 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 57660c2623..124afbc73b 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -367,7 +367,8 @@ GraphAnalysisResult HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item
ArenaBitVector* native_debug_info_locations;
if (native_debuggable) {
const uint32_t num_instructions = code_item.insns_size_in_code_units_;
- native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false);
+ native_debug_info_locations =
+ ArenaBitVector::Create(arena_, num_instructions, false, kArenaAllocGraphBuilder);
FindNativeDebugInfoLocations(code_item, native_debug_info_locations);
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d64a786a9e..32869ec0b4 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -856,7 +856,8 @@ void CodeGenerator::RecordCatchBlockInfo() {
uint32_t register_mask = 0; // Not used.
// The stack mask is not used, so we leave it empty.
- ArenaBitVector* stack_mask = new (arena) ArenaBitVector(arena, 0, /* expandable */ true);
+ ArenaBitVector* stack_mask =
+ ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator);
stack_map_stream_.BeginStackMapEntry(dex_pc,
native_pc,
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index e170e37bdd..d7bf16e0cc 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -97,7 +97,7 @@ void HDeadCodeElimination::RemoveDeadBlocks() {
}
// Classify blocks as reachable/unreachable.
ArenaAllocator* allocator = graph_->GetArena();
- ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false);
+ ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
MarkReachableBlocks(graph_, &live_blocks);
bool removed_one_or_more_blocks = false;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 11e3689a82..0c22903602 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -17,7 +17,6 @@
#include "graph_checker.h"
#include <algorithm>
-#include <map>
#include <string>
#include <sstream>
@@ -32,19 +31,19 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
current_block_ = block;
// Check consistency with respect to predecessors of `block`.
- ArenaSafeMap<HBasicBlock*, size_t> predecessors_count(
- std::less<HBasicBlock*>(), GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
- for (HBasicBlock* p : block->GetPredecessors()) {
- auto it = predecessors_count.find(p);
- if (it != predecessors_count.end()) {
- ++it->second;
- } else {
- predecessors_count.Put(p, 1u);
+ // Note: Counting duplicates with a sorted vector uses up to 6x less memory
+ // than ArenaSafeMap<HBasicBlock*, size_t>.
+ ArenaVector<HBasicBlock*> sorted_predecessors(
+ block->GetPredecessors().begin(),
+ block->GetPredecessors().end(),
+ GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
+ std::sort(sorted_predecessors.begin(), sorted_predecessors.end());
+ for (auto it = sorted_predecessors.begin(), end = sorted_predecessors.end(); it != end; ) {
+ HBasicBlock* p = *it++;
+ size_t p_count_in_block_predecessors = 1u;
+ for (; it != end && *it == p; ++it) {
+ ++p_count_in_block_predecessors;
}
- }
- for (auto& pc : predecessors_count) {
- HBasicBlock* p = pc.first;
- size_t p_count_in_block_predecessors = pc.second;
size_t block_count_in_p_successors =
std::count(p->GetSuccessors().begin(), p->GetSuccessors().end(), block);
if (p_count_in_block_predecessors != block_count_in_p_successors) {
@@ -57,19 +56,19 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
}
// Check consistency with respect to successors of `block`.
- ArenaSafeMap<HBasicBlock*, size_t> successors_count(
- std::less<HBasicBlock*>(), GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
- for (HBasicBlock* s : block->GetSuccessors()) {
- auto it = successors_count.find(s);
- if (it != successors_count.end()) {
- ++it->second;
- } else {
- successors_count.Put(s, 1u);
+ // Note: Counting duplicates with a sorted vector uses up to 6x less memory
+ // than ArenaSafeMap<HBasicBlock*, size_t>.
+ ArenaVector<HBasicBlock*> sorted_successors(
+ block->GetSuccessors().begin(),
+ block->GetSuccessors().end(),
+ GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
+ std::sort(sorted_successors.begin(), sorted_successors.end());
+ for (auto it = sorted_successors.begin(), end = sorted_successors.end(); it != end; ) {
+ HBasicBlock* s = *it++;
+ size_t s_count_in_block_successors = 1u;
+ for (; it != end && *it == s; ++it) {
+ ++s_count_in_block_successors;
}
- }
- for (auto& sc : successors_count) {
- HBasicBlock* s = sc.first;
- size_t s_count_in_block_successors = sc.second;
size_t block_count_in_s_predecessors =
std::count(s->GetPredecessors().begin(), s->GetPredecessors().end(), block);
if (s_count_in_block_successors != block_count_in_s_predecessors) {
@@ -812,7 +811,10 @@ void GraphChecker::VisitPhi(HPhi* phi) {
phi->GetRegNumber(),
type_str.str().c_str()));
} else {
- ArenaBitVector visited(GetGraph()->GetArena(), 0, /* expandable */ true);
+ ArenaBitVector visited(GetGraph()->GetArena(),
+ 0,
+ /* expandable */ true,
+ kArenaAllocGraphChecker);
if (!IsConstantEquivalent(phi, other_phi, &visited)) {
AddError(StringPrintf("Two phis (%d and %d) found for VReg %d but they "
"are not equivalents of constants.",
@@ -917,6 +919,19 @@ void GraphChecker::VisitCondition(HCondition* op) {
}
}
+void GraphChecker::VisitNeg(HNeg* instruction) {
+ VisitInstruction(instruction);
+ Primitive::Type input_type = instruction->InputAt(0)->GetType();
+ Primitive::Type result_type = instruction->GetType();
+ if (result_type != Primitive::PrimitiveKind(input_type)) {
+ AddError(StringPrintf("Binary operation %s %d has a result type different "
+ "from its input kind: %s vs %s.",
+ instruction->DebugName(), instruction->GetId(),
+ Primitive::PrettyDescriptor(result_type),
+ Primitive::PrettyDescriptor(input_type)));
+ }
+}
+
void GraphChecker::VisitBinaryOperation(HBinaryOperation* op) {
VisitInstruction(op);
Primitive::Type lhs_type = op->InputAt(0)->GetType();
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 52252cd3d4..27d5621887 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -30,7 +30,10 @@ class GraphChecker : public HGraphDelegateVisitor {
: HGraphDelegateVisitor(graph),
errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
dump_prefix_(dump_prefix),
- seen_ids_(graph->GetArena(), graph->GetCurrentInstructionId(), false) {}
+ seen_ids_(graph->GetArena(),
+ graph->GetCurrentInstructionId(),
+ false,
+ kArenaAllocGraphChecker) {}
// Check the whole graph (in reverse post-order).
void Run() {
@@ -56,6 +59,7 @@ class GraphChecker : public HGraphDelegateVisitor {
void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
void VisitLoadException(HLoadException* load) OVERRIDE;
+ void VisitNeg(HNeg* instruction) OVERRIDE;
void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
void VisitReturn(HReturn* ret) OVERRIDE;
void VisitReturnVoid(HReturnVoid* ret) OVERRIDE;
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index b4922789d4..f7eb2adc6c 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -40,7 +40,7 @@ class ValueSet : public ArenaObject<kArenaAllocGvn> {
: allocator_(allocator),
num_buckets_(kMinimumNumberOfBuckets),
buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
- buckets_owned_(allocator, num_buckets_, false),
+ buckets_owned_(allocator, num_buckets_, false, kArenaAllocGvn),
num_entries_(0) {
// ArenaAllocator returns zeroed memory, so no need to set buckets to null.
DCHECK(IsPowerOfTwo(num_buckets_));
@@ -53,7 +53,7 @@ class ValueSet : public ArenaObject<kArenaAllocGvn> {
: allocator_(allocator),
num_buckets_(to_copy.IdealBucketCount()),
buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
- buckets_owned_(allocator, num_buckets_, false),
+ buckets_owned_(allocator, num_buckets_, false, kArenaAllocGvn),
num_entries_(to_copy.num_entries_) {
// ArenaAllocator returns zeroed memory, so entries of buckets_ and
// buckets_owned_ are initialized to null and false, respectively.
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 82a898a9f1..266cb10ab3 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -53,6 +53,32 @@ static void RotateEntryPhiFirst(HLoopInformation* loop,
}
}
+/**
+ * Returns true if the from/to types denote a narrowing, integral conversion (precision loss).
+ */
+static bool IsNarrowingIntegralConversion(Primitive::Type from, Primitive::Type to) {
+ switch (from) {
+ case Primitive::kPrimLong:
+ return to == Primitive::kPrimByte || to == Primitive::kPrimShort
+ || to == Primitive::kPrimChar || to == Primitive::kPrimInt;
+ case Primitive::kPrimInt:
+ return to == Primitive::kPrimByte || to == Primitive::kPrimShort
+ || to == Primitive::kPrimChar;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ return to == Primitive::kPrimByte;
+ default:
+ return false;
+ }
+}
+
+/**
+ * Returns narrowest data type.
+ */
+static Primitive::Type Narrowest(Primitive::Type type1, Primitive::Type type2) {
+ return Primitive::ComponentSize(type1) <= Primitive::ComponentSize(type2) ? type1 : type2;
+}
+
//
// Class methods.
//
@@ -148,6 +174,9 @@ void HInductionVarAnalysis::VisitNode(HLoopInformation* loop, HInstruction* inst
}
}
+ // Type of induction.
+ type_ = scc_[0]->GetType();
+
// Classify the SCC.
if (scc_.size() == 1 && !scc_[0]->IsLoopHeaderPhi()) {
ClassifyTrivial(loop, scc_[0]);
@@ -197,14 +226,13 @@ void HInductionVarAnalysis::ClassifyTrivial(HLoopInformation* loop, HInstruction
instruction->InputAt(0)->GetType());
} else if (instruction->IsNeg()) {
info = TransferNeg(LookupInfo(loop, instruction->InputAt(0)));
+ } else if (instruction->IsTypeConversion()) {
+ info = TransferCnv(LookupInfo(loop, instruction->InputAt(0)),
+ instruction->AsTypeConversion()->GetInputType(),
+ instruction->AsTypeConversion()->GetResultType());
+
} else if (instruction->IsBoundsCheck()) {
info = LookupInfo(loop, instruction->InputAt(0)); // Pass-through.
- } else if (instruction->IsTypeConversion()) {
- HTypeConversion* conversion = instruction->AsTypeConversion();
- // TODO: accept different conversion scenarios.
- if (conversion->GetResultType() == conversion->GetInputType()) {
- info = LookupInfo(loop, conversion->GetInput());
- }
}
// Successfully classified?
@@ -239,7 +267,7 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
if (size == 1) {
InductionInfo* update = TransferPhi(loop, phi, /* input_index */ 1);
if (update != nullptr) {
- AssignInfo(loop, phi, CreateInduction(kWrapAround, initial, update));
+ AssignInfo(loop, phi, CreateInduction(kWrapAround, initial, update, type_));
}
return;
}
@@ -257,6 +285,8 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
} else if (instruction->IsSub()) {
update = SolveAddSub(
loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), kSub, true);
+ } else if (instruction->IsTypeConversion()) {
+ update = SolveCnv(instruction->AsTypeConversion());
}
if (update == nullptr) {
return;
@@ -271,7 +301,7 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
case kInvariant:
// Classify first phi and then the rest of the cycle "on-demand".
// Statements are scanned in order.
- AssignInfo(loop, phi, CreateInduction(kLinear, induction, initial));
+ AssignInfo(loop, phi, CreateInduction(kLinear, induction, initial, type_));
for (size_t i = 1; i < size; i++) {
ClassifyTrivial(loop, scc_[i]);
}
@@ -301,9 +331,10 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::RotatePeriodicInduc
// (b, c, d, e, a)
// in preparation of assigning this to the previous variable in the sequence.
if (induction->induction_class == kInvariant) {
- return CreateInduction(kPeriodic, induction, last);
+ return CreateInduction(kPeriodic, induction, last, type_);
}
- return CreateInduction(kPeriodic, induction->op_a, RotatePeriodicInduction(induction->op_b, last));
+ return CreateInduction(
+ kPeriodic, induction->op_a, RotatePeriodicInduction(induction->op_b, last), type_);
}
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferPhi(HLoopInformation* loop,
@@ -332,8 +363,10 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferAddSub(Indu
if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
return CreateInvariantOp(op, a, b);
} else if (a->induction_class == kLinear && b->induction_class == kLinear) {
- return CreateInduction(
- kLinear, TransferAddSub(a->op_a, b->op_a, op), TransferAddSub(a->op_b, b->op_b, op));
+ return CreateInduction(kLinear,
+ TransferAddSub(a->op_a, b->op_a, op),
+ TransferAddSub(a->op_b, b->op_b, op),
+ type_);
} else if (a->induction_class == kInvariant) {
InductionInfo* new_a = b->op_a;
InductionInfo* new_b = TransferAddSub(a, b->op_b, op);
@@ -343,7 +376,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferAddSub(Indu
} else if (op == kSub) { // Negation required.
new_a = TransferNeg(new_a);
}
- return CreateInduction(b->induction_class, new_a, new_b);
+ return CreateInduction(b->induction_class, new_a, new_b, type_);
} else if (b->induction_class == kInvariant) {
InductionInfo* new_a = a->op_a;
InductionInfo* new_b = TransferAddSub(a->op_b, b, op);
@@ -351,7 +384,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferAddSub(Indu
DCHECK(a->induction_class == kWrapAround || a->induction_class == kPeriodic);
new_a = TransferAddSub(new_a, b, op);
}
- return CreateInduction(a->induction_class, new_a, new_b);
+ return CreateInduction(a->induction_class, new_a, new_b, type_);
}
}
return nullptr;
@@ -366,9 +399,15 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferMul(Inducti
if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
return CreateInvariantOp(kMul, a, b);
} else if (a->induction_class == kInvariant) {
- return CreateInduction(b->induction_class, TransferMul(a, b->op_a), TransferMul(a, b->op_b));
+ return CreateInduction(b->induction_class,
+ TransferMul(a, b->op_a),
+ TransferMul(a, b->op_b),
+ type_);
} else if (b->induction_class == kInvariant) {
- return CreateInduction(a->induction_class, TransferMul(a->op_a, b), TransferMul(a->op_b, b));
+ return CreateInduction(a->induction_class,
+ TransferMul(a->op_a, b),
+ TransferMul(a->op_b, b),
+ type_);
}
}
return nullptr;
@@ -400,7 +439,24 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferNeg(Inducti
if (a->induction_class == kInvariant) {
return CreateInvariantOp(kNeg, nullptr, a);
}
- return CreateInduction(a->induction_class, TransferNeg(a->op_a), TransferNeg(a->op_b));
+ return CreateInduction(a->induction_class, TransferNeg(a->op_a), TransferNeg(a->op_b), type_);
+ }
+ return nullptr;
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferCnv(InductionInfo* a,
+ Primitive::Type from,
+ Primitive::Type to) {
+ if (a != nullptr) {
+ // Allow narrowing conversion in certain cases.
+ if (IsNarrowingIntegralConversion(from, to)) {
+ if (a->induction_class == kLinear) {
+ if (a->type == to || (a->type == from && IsNarrowingIntegralConversion(from, to))) {
+ return CreateInduction(kLinear, a->op_a, a->op_b, to);
+ }
+ }
+ // TODO: other cases useful too?
+ }
}
return nullptr;
}
@@ -442,11 +498,11 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolvePhiAllInputs(
if (a != nullptr && a->induction_class == kInvariant) {
if (phi->InputAt(1) == entry_phi) {
InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0));
- return CreateInduction(kPeriodic, a, initial);
+ return CreateInduction(kPeriodic, a, initial, type_);
}
InductionInfo* b = SolvePhi(phi, /* input_index */ 1);
if (b != nullptr && b->induction_class == kPeriodic) {
- return CreateInduction(kPeriodic, a, b);
+ return CreateInduction(kPeriodic, a, b, type_);
}
}
}
@@ -489,7 +545,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveAddSub(HLoopIn
InductionInfo* a = LookupInfo(loop, x);
if (a != nullptr && a->induction_class == kInvariant) {
InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0));
- return CreateInduction(kPeriodic, CreateInvariantOp(kSub, a, initial), initial);
+ return CreateInduction(kPeriodic, CreateInvariantOp(kSub, a, initial), initial, type_);
}
}
}
@@ -497,6 +553,21 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveAddSub(HLoopIn
return nullptr;
}
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveCnv(HTypeConversion* conversion) {
+ Primitive::Type from = conversion->GetInputType();
+ Primitive::Type to = conversion->GetResultType();
+ // A narrowing conversion is allowed within the cycle of a linear induction, provided that the
+ // narrowest encountered type is recorded with the induction to account for the precision loss.
+ if (IsNarrowingIntegralConversion(from, to)) {
+ auto it = cycle_.find(conversion->GetInput());
+ if (it != cycle_.end() && it->second->induction_class == kInvariant) {
+ type_ = Narrowest(type_, to);
+ return it->second;
+ }
+ }
+ return nullptr;
+}
+
void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
HInstruction* control = loop->GetHeader()->GetLastInstruction();
if (control->IsIf()) {
@@ -512,12 +583,10 @@ void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
InductionInfo* a = LookupInfo(loop, condition->InputAt(0));
InductionInfo* b = LookupInfo(loop, condition->InputAt(1));
Primitive::Type type = condition->InputAt(0)->GetType();
- // Determine if the loop control uses integral arithmetic and an if-exit (X outside) or an
- // if-iterate (X inside), always expressed as if-iterate when passing into VisitCondition().
- if (type != Primitive::kPrimInt && type != Primitive::kPrimLong) {
- // Loop control is not 32/64-bit integral.
- } else if (a == nullptr || b == nullptr) {
- // Loop control is not a sequence.
+ // Determine if the loop control uses a known sequence on an if-exit (X outside) or on
+ // an if-iterate (X inside), expressed as if-iterate when passed into VisitCondition().
+ if (a == nullptr || b == nullptr) {
+ return; // Loop control is not a sequence.
} else if (if_true->GetLoopInformation() != loop && if_false->GetLoopInformation() == loop) {
VisitCondition(loop, a, b, type, condition->GetOppositeCondition());
} else if (if_true->GetLoopInformation() == loop && if_false->GetLoopInformation() != loop) {
@@ -559,6 +628,14 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
(stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) {
cmp = stride_value > 0 ? kCondLT : kCondGT;
}
+ // Only accept integral condition. A mismatch between the type of condition and the induction
+ // is only allowed if the, necessarily narrower, induction range fits the narrower control.
+ if (type != Primitive::kPrimInt && type != Primitive::kPrimLong) {
+ return; // not integral
+ } else if (type != a->type &&
+ !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
+ return; // mismatched type
+ }
// Normalize a linear loop control with a nonzero stride:
// stride > 0, either i < U or i <= U
// stride < 0, either i > U or i >= U
@@ -640,7 +717,7 @@ void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
InductionInfo* taken_test = CreateInvariantOp(op, lower_expr, upper_expr);
AssignInfo(loop,
loop->GetHeader()->GetLastInstruction(),
- CreateTripCount(tcKind, trip_count, taken_test));
+ CreateTripCount(tcKind, trip_count, taken_test, type));
}
bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
@@ -675,10 +752,8 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
int64_t stride_value,
Primitive::Type type,
IfCondition cmp) {
- const int64_t min = type == Primitive::kPrimInt ? std::numeric_limits<int32_t>::min()
- : std::numeric_limits<int64_t>::min();
- const int64_t max = type == Primitive::kPrimInt ? std::numeric_limits<int32_t>::max()
- : std::numeric_limits<int64_t>::max();
+ const int64_t min = Primitive::MinValueOfIntegralType(type);
+ const int64_t max = Primitive::MaxValueOfIntegralType(type);
// Some rules under which it is certain at compile-time that the loop is finite.
int64_t value;
switch (cmp) {
@@ -698,6 +773,31 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
return false; // not certain, may be infinite
}
+bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr,
+ InductionInfo* upper_expr,
+ int64_t stride_value,
+ Primitive::Type type,
+ IfCondition cmp) {
+ int64_t min = Primitive::MinValueOfIntegralType(type);
+ int64_t max = Primitive::MaxValueOfIntegralType(type);
+ // Inclusive test need one extra.
+ if (stride_value != 1 && stride_value != -1) {
+ return false; // non-unit stride
+ } else if (cmp == kCondLE) {
+ max--;
+ } else if (cmp == kCondGE) {
+ min++;
+ }
+ // Do both bounds fit the range?
+ // Note: The `value` is initialized to please valgrind - the compiler can reorder
+ // the return value check with the `value` check, b/27651442 .
+ int64_t value = 0;
+ return IsAtLeast(lower_expr, &value) && value >= min &&
+ IsAtMost(lower_expr, &value) && value <= max &&
+ IsAtLeast(upper_expr, &value) && value >= min &&
+ IsAtMost(upper_expr, &value) && value <= max;
+}
+
void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
HInstruction* instruction,
InductionInfo* info) {
@@ -794,7 +894,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
return CreateSimplifiedInvariant(kSub, b->op_b, b->op_a);
}
}
- return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr);
+ return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, b->type);
}
bool HInductionVarAnalysis::IsExact(InductionInfo* info, int64_t* value) {
@@ -856,18 +956,22 @@ std::string HInductionVarAnalysis::InductionToString(InductionInfo* info) {
case kTripCountInBodyUnsafe: inv += " (TC-body-unsafe) "; break;
}
inv += InductionToString(info->op_b);
- return inv + ")";
+ inv += ")";
+ return inv;
} else {
DCHECK(info->operation == kNop);
if (info->induction_class == kLinear) {
return "(" + InductionToString(info->op_a) + " * i + " +
- InductionToString(info->op_b) + ")";
+ InductionToString(info->op_b) + "):" +
+ Primitive::PrettyDescriptor(info->type);
} else if (info->induction_class == kWrapAround) {
return "wrap(" + InductionToString(info->op_a) + ", " +
- InductionToString(info->op_b) + ")";
+ InductionToString(info->op_b) + "):" +
+ Primitive::PrettyDescriptor(info->type);
} else if (info->induction_class == kPeriodic) {
return "periodic(" + InductionToString(info->op_a) + ", " +
- InductionToString(info->op_b) + ")";
+ InductionToString(info->op_b) + "):" +
+ Primitive::PrettyDescriptor(info->type);
}
}
}
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 94d2646aec..f1965f07b2 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -97,17 +97,20 @@ class HInductionVarAnalysis : public HOptimization {
InductionOp op,
InductionInfo* a,
InductionInfo* b,
- HInstruction* f)
+ HInstruction* f,
+ Primitive::Type t)
: induction_class(ic),
operation(op),
op_a(a),
op_b(b),
- fetch(f) {}
+ fetch(f),
+ type(t) {}
InductionClass induction_class;
InductionOp operation;
InductionInfo* op_a;
InductionInfo* op_b;
HInstruction* fetch;
+ Primitive::Type type; // precision of induction
};
bool IsVisitedNode(HInstruction* instruction) const {
@@ -121,17 +124,24 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* CreateInvariantFetch(HInstruction* f) {
DCHECK(f != nullptr);
- return new (graph_->GetArena()) InductionInfo(kInvariant, kFetch, nullptr, nullptr, f);
+ return new (graph_->GetArena())
+ InductionInfo(kInvariant, kFetch, nullptr, nullptr, f, f->GetType());
}
- InductionInfo* CreateTripCount(InductionOp op, InductionInfo* a, InductionInfo* b) {
+ InductionInfo* CreateTripCount(InductionOp op,
+ InductionInfo* a,
+ InductionInfo* b,
+ Primitive::Type type) {
DCHECK(a != nullptr);
- return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr);
+ return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, type);
}
- InductionInfo* CreateInduction(InductionClass ic, InductionInfo* a, InductionInfo* b) {
+ InductionInfo* CreateInduction(InductionClass ic,
+ InductionInfo* a,
+ InductionInfo* b,
+ Primitive::Type type) {
DCHECK(a != nullptr && b != nullptr);
- return new (graph_->GetArena()) InductionInfo(ic, kNop, a, b, nullptr);
+ return new (graph_->GetArena()) InductionInfo(ic, kNop, a, b, nullptr, type);
}
// Methods for analysis.
@@ -148,6 +158,7 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* TransferMul(InductionInfo* a, InductionInfo* b);
InductionInfo* TransferShl(InductionInfo* a, InductionInfo* b, Primitive::Type type);
InductionInfo* TransferNeg(InductionInfo* a);
+ InductionInfo* TransferCnv(InductionInfo* a, Primitive::Type from, Primitive::Type to);
// Solvers.
InductionInfo* SolvePhi(HInstruction* phi, size_t input_index);
@@ -161,6 +172,7 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* y,
InductionOp op,
bool is_first_call);
+ InductionInfo* SolveCnv(HTypeConversion* conversion);
// Trip count information.
void VisitControl(HLoopInformation* loop);
@@ -181,6 +193,11 @@ class HInductionVarAnalysis : public HOptimization {
int64_t stride_value,
Primitive::Type type,
IfCondition cmp);
+ bool FitsNarrowerControl(InductionInfo* lower_expr,
+ InductionInfo* upper_expr,
+ int64_t stride_value,
+ Primitive::Type type,
+ IfCondition cmp);
// Assign and lookup.
void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);
@@ -205,6 +222,7 @@ class HInductionVarAnalysis : public HOptimization {
ArenaVector<HInstruction*> scc_;
ArenaSafeMap<HInstruction*, NodeInfo> map_;
ArenaSafeMap<HInstruction*, InductionInfo*> cycle_;
+ Primitive::Type type_;
/**
* Maintains the results of the analysis as a mapping from loops to a mapping from instructions
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 89e4690de2..0fbb67d0d9 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -202,6 +202,7 @@ TEST_F(InductionVarAnalysisTest, ProperLoopSetup) {
// }
BuildLoopNest(10);
graph_->BuildDominatorTree();
+
ASSERT_EQ(entry_->GetLoopInformation(), nullptr);
for (int d = 0; d < 1; d++) {
ASSERT_EQ(loop_preheader_[d]->GetLoopInformation(),
@@ -224,8 +225,8 @@ TEST_F(InductionVarAnalysisTest, FindBasicInduction) {
HInstruction* store = InsertArrayStore(basic_[0], 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ("((1) * i + (0))", GetInductionInfo(store->InputAt(1), 0).c_str());
- EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[0], 0).c_str());
+ EXPECT_STREQ("((1) * i + (0)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
@@ -254,11 +255,11 @@ TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
new (&allocator_) HNeg(Primitive::kPrimInt, basic_[0]), 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ("((1) * i + (100))", GetInductionInfo(add, 0).c_str());
- EXPECT_STREQ("(( - (1)) * i + (100))", GetInductionInfo(sub, 0).c_str());
- EXPECT_STREQ("((100) * i + (0))", GetInductionInfo(mul, 0).c_str());
- EXPECT_STREQ("((2) * i + (0))", GetInductionInfo(shl, 0).c_str());
- EXPECT_STREQ("(( - (1)) * i + (0))", GetInductionInfo(neg, 0).c_str());
+ EXPECT_STREQ("((1) * i + (100)):PrimInt", GetInductionInfo(add, 0).c_str());
+ EXPECT_STREQ("(( - (1)) * i + (100)):PrimInt", GetInductionInfo(sub, 0).c_str());
+ EXPECT_STREQ("((100) * i + (0)):PrimInt", GetInductionInfo(mul, 0).c_str());
+ EXPECT_STREQ("((2) * i + (0)):PrimInt", GetInductionInfo(shl, 0).c_str());
+ EXPECT_STREQ("(( - (1)) * i + (0)):PrimInt", GetInductionInfo(neg, 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindChainInduction) {
@@ -283,9 +284,9 @@ TEST_F(InductionVarAnalysisTest, FindChainInduction) {
k->AddInput(sub);
PerformInductionVarAnalysis();
- EXPECT_STREQ("(((100) - (1)) * i + (100))",
+ EXPECT_STREQ("(((100) - (1)) * i + (100)):PrimInt",
GetInductionInfo(store1->InputAt(1), 0).c_str());
- EXPECT_STREQ("(((100) - (1)) * i + ((100) - (1)))",
+ EXPECT_STREQ("(((100) - (1)) * i + ((100) - (1))):PrimInt",
GetInductionInfo(store2->InputAt(1), 0).c_str());
}
@@ -318,7 +319,7 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) {
k_header->AddInput(k_body);
PerformInductionVarAnalysis();
- EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) {
@@ -345,7 +346,7 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) {
HInstruction* store = InsertArrayStore(k, 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
@@ -365,7 +366,7 @@ TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
k->AddInput(sub);
PerformInductionVarAnalysis();
- EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)))",
+ EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)):PrimInt):PrimInt",
GetInductionInfo(store->InputAt(1), 0).c_str());
}
@@ -391,7 +392,7 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
t->AddInput(sub);
PerformInductionVarAnalysis();
- EXPECT_STREQ("wrap((0), wrap((100), (( - (1)) * i + (100))))",
+ EXPECT_STREQ("wrap((0), wrap((100), (( - (1)) * i + (100)):PrimInt):PrimInt):PrimInt",
GetInductionInfo(store->InputAt(1), 0).c_str());
}
@@ -424,11 +425,16 @@ TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) {
InsertInstruction(new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0));
PerformInductionVarAnalysis();
- EXPECT_STREQ("wrap((100), ((2) * i + (100)))", GetInductionInfo(add, 0).c_str());
- EXPECT_STREQ("wrap(((0) - (100)), ((2) * i + ((0) - (100))))", GetInductionInfo(sub, 0).c_str());
- EXPECT_STREQ("wrap((0), (((2) * (100)) * i + (0)))", GetInductionInfo(mul, 0).c_str());
- EXPECT_STREQ("wrap((0), (((2) * (2)) * i + (0)))", GetInductionInfo(shl, 0).c_str());
- EXPECT_STREQ("wrap((0), (( - (2)) * i + (0)))", GetInductionInfo(neg, 0).c_str());
+ EXPECT_STREQ("wrap((100), ((2) * i + (100)):PrimInt):PrimInt",
+ GetInductionInfo(add, 0).c_str());
+ EXPECT_STREQ("wrap(((0) - (100)), ((2) * i + ((0) - (100))):PrimInt):PrimInt",
+ GetInductionInfo(sub, 0).c_str());
+ EXPECT_STREQ("wrap((0), (((2) * (100)) * i + (0)):PrimInt):PrimInt",
+ GetInductionInfo(mul, 0).c_str());
+ EXPECT_STREQ("wrap((0), (((2) * (2)) * i + (0)):PrimInt):PrimInt",
+ GetInductionInfo(shl, 0).c_str());
+ EXPECT_STREQ("wrap((0), (( - (2)) * i + (0)):PrimInt):PrimInt",
+ GetInductionInfo(neg, 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindPeriodicInduction) {
@@ -455,8 +461,8 @@ TEST_F(InductionVarAnalysisTest, FindPeriodicInduction) {
t->AddInput(k);
PerformInductionVarAnalysis();
- EXPECT_STREQ("periodic((0), (100))", GetInductionInfo(store1->InputAt(1), 0).c_str());
- EXPECT_STREQ("periodic((100), (0))", GetInductionInfo(store2->InputAt(1), 0).c_str());
+ EXPECT_STREQ("periodic((0), (100)):PrimInt", GetInductionInfo(store1->InputAt(1), 0).c_str());
+ EXPECT_STREQ("periodic((100), (0)):PrimInt", GetInductionInfo(store2->InputAt(1), 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
@@ -476,8 +482,8 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
k->AddInput(sub);
PerformInductionVarAnalysis();
- EXPECT_STREQ("periodic((0), (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
- EXPECT_STREQ("periodic((1), (0))", GetInductionInfo(sub, 0).c_str());
+ EXPECT_STREQ("periodic((0), (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(sub, 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
@@ -512,11 +518,11 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
new (&allocator_) HNeg(Primitive::kPrimInt, k_body), 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ("periodic(((1) + (100)), (100))", GetInductionInfo(add, 0).c_str());
- EXPECT_STREQ("periodic(((1) - (100)), ((0) - (100)))", GetInductionInfo(sub, 0).c_str());
- EXPECT_STREQ("periodic((100), (0))", GetInductionInfo(mul, 0).c_str());
- EXPECT_STREQ("periodic((2), (0))", GetInductionInfo(shl, 0).c_str());
- EXPECT_STREQ("periodic(( - (1)), (0))", GetInductionInfo(neg, 0).c_str());
+ EXPECT_STREQ("periodic(((1) + (100)), (100)):PrimInt", GetInductionInfo(add, 0).c_str());
+ EXPECT_STREQ("periodic(((1) - (100)), ((0) - (100))):PrimInt", GetInductionInfo(sub, 0).c_str());
+ EXPECT_STREQ("periodic((100), (0)):PrimInt", GetInductionInfo(mul, 0).c_str());
+ EXPECT_STREQ("periodic((2), (0)):PrimInt", GetInductionInfo(shl, 0).c_str());
+ EXPECT_STREQ("periodic(( - (1)), (0)):PrimInt", GetInductionInfo(neg, 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
@@ -549,7 +555,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
// Avoid exact phi number, since that depends on the SSA building phase.
std::regex r("\\(\\(1\\) \\* i \\+ "
- "\\(\\(1\\) \\+ \\(\\d+:Phi\\)\\)\\)");
+ "\\(\\(1\\) \\+ \\(\\d+:Phi\\)\\)\\):PrimInt");
for (int d = 0; d < 10; d++) {
if (d == 9) {
@@ -557,11 +563,122 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
} else {
EXPECT_STREQ("", GetInductionInfo(store->InputAt(1), d).c_str());
}
- EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[d], d).c_str());
+ EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[d], d).c_str());
// Trip-count.
EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
}
}
+TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
+ // Setup:
+ // for (byte i = -128; i < 127; i++) { // just fits!
+ // }
+ BuildLoopNest(1);
+ basic_[0]->ReplaceInput(graph_->GetIntConstant(-128), 0);
+ HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
+ ifs->ReplaceInput(graph_->GetIntConstant(127), 1);
+ HInstruction* conv = new(&allocator_) HTypeConversion(Primitive::kPrimByte, increment_[0], -1);
+ loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
+ basic_[0]->ReplaceInput(conv, 1);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str());
+ // Trip-count.
+ EXPECT_STREQ("(((127) - (-128)) (TC-loop) ((-128) < (127)))",
+ GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, ByteLoopControl2) {
+ // Setup:
+ // for (byte i = -128; i < 128; i++) { // infinite loop!
+ // }
+ BuildLoopNest(1);
+ basic_[0]->ReplaceInput(graph_->GetIntConstant(-128), 0);
+ HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
+ ifs->ReplaceInput(graph_->GetIntConstant(128), 1);
+ HInstruction* conv = new(&allocator_) HTypeConversion(Primitive::kPrimByte, increment_[0], -1);
+ loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
+ basic_[0]->ReplaceInput(conv, 1);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str());
+ // Trip-count undefined.
+ EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, ShortLoopControl1) {
+ // Setup:
+ // for (short i = -32768; i < 32767; i++) { // just fits!
+ // }
+ BuildLoopNest(1);
+ basic_[0]->ReplaceInput(graph_->GetIntConstant(-32768), 0);
+ HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
+ ifs->ReplaceInput(graph_->GetIntConstant(32767), 1);
+ HInstruction* conv = new(&allocator_) HTypeConversion(Primitive::kPrimShort, increment_[0], -1);
+ loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
+ basic_[0]->ReplaceInput(conv, 1);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort",
+ GetInductionInfo(increment_[0], 0).c_str());
+ // Trip-count.
+ EXPECT_STREQ("(((32767) - (-32768)) (TC-loop) ((-32768) < (32767)))",
+ GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, ShortLoopControl2) {
+ // Setup:
+ // for (short i = -32768; i < 32768; i++) { // infinite loop!
+ // }
+ BuildLoopNest(1);
+ basic_[0]->ReplaceInput(graph_->GetIntConstant(-32768), 0);
+ HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
+ ifs->ReplaceInput(graph_->GetIntConstant(32768), 1);
+ HInstruction* conv = new(&allocator_) HTypeConversion(Primitive::kPrimShort, increment_[0], -1);
+ loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
+ basic_[0]->ReplaceInput(conv, 1);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort",
+ GetInductionInfo(increment_[0], 0).c_str());
+ // Trip-count undefined.
+ EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, CharLoopControl1) {
+ // Setup:
+ // for (char i = 0; i < 65535; i++) { // just fits!
+ // }
+ BuildLoopNest(1);
+ HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
+ ifs->ReplaceInput(graph_->GetIntConstant(65535), 1);
+ HInstruction* conv = new(&allocator_) HTypeConversion(Primitive::kPrimChar, increment_[0], -1);
+ loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
+ basic_[0]->ReplaceInput(conv, 1);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str());
+ // Trip-count.
+ EXPECT_STREQ("((65535) (TC-loop) ((0) < (65535)))",
+ GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, CharLoopControl2) {
+ // Setup:
+ // for (char i = 0; i < 65536; i++) { // infinite loop!
+ // }
+ BuildLoopNest(1);
+ HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
+ ifs->ReplaceInput(graph_->GetIntConstant(65536), 1);
+ HInstruction* conv = new(&allocator_) HTypeConversion(Primitive::kPrimChar, increment_[0], -1);
+ loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
+ basic_[0]->ReplaceInput(conv, 1);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str());
+ // Trip-count undefined.
+ EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+}
+
} // namespace art
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index f9b6910acd..bc920d96b5 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -58,13 +58,13 @@ static bool IsIntAndGet(HInstruction* instruction, int64_t* value) {
}
/**
- * An upper bound a * (length / a) + b, where a > 0, can be conservatively rewritten as length + b
+ * An upper bound a * (length / a) + b, where a >= 1, can be conservatively rewritten as length + b
* because length >= 0 is true. This makes it more likely the bound is useful to clients.
*/
static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v) {
int64_t value;
if (v.is_known &&
- v.a_constant > 1 &&
+ v.a_constant >= 1 &&
v.instruction->IsDiv() &&
v.instruction->InputAt(0)->IsArrayLength() &&
IsIntAndGet(v.instruction->InputAt(1), &value) && v.a_constant == value) {
@@ -73,6 +73,28 @@ static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v) {
return v;
}
+/**
+ * Corrects a value for type to account for arithmetic wrap-around in lower precision.
+ */
+static InductionVarRange::Value CorrectForType(InductionVarRange::Value v, Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimByte: {
+ // Constants within range only.
+ // TODO: maybe some room for improvement, like allowing widening conversions
+ const int32_t min = Primitive::MinValueOfIntegralType(type);
+ const int32_t max = Primitive::MaxValueOfIntegralType(type);
+ return (v.is_known && v.a_constant == 0 && min <= v.b_constant && v.b_constant <= max)
+ ? v
+ : InductionVarRange::Value();
+ }
+ default:
+ // At int or higher.
+ return v;
+ }
+}
+
/** Helper method to test for a constant value. */
static bool IsConstantValue(InductionVarRange::Value v) {
return v.is_known && v.a_constant == 0;
@@ -114,6 +136,18 @@ bool InductionVarRange::GetInductionRange(HInstruction* context,
if (info == nullptr) {
return false; // no induction information
}
+ // Type int or lower (this is not too restrictive since intended clients, like
+ // bounds check elimination, will have truncated higher precision induction
+ // at their use point already).
+ switch (info->type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimByte:
+ break;
+ default:
+ return false;
+ }
// Set up loop information.
HBasicBlock* header = loop->GetHeader();
bool in_body = context->GetBlock() != header;
@@ -128,25 +162,27 @@ bool InductionVarRange::GetInductionRange(HInstruction* context,
bool InductionVarRange::RefineOuter(/*in-out*/ Value* min_val,
/*in-out*/ Value* max_val) const {
- Value v1_min = RefineOuter(*min_val, /* is_min */ true);
- Value v2_max = RefineOuter(*max_val, /* is_min */ false);
- // The refined range is safe if both sides refine the same instruction. Otherwise, since two
- // different ranges are combined, the new refined range is safe to pass back to the client if
- // the extremes of the computed ranges ensure no arithmetic wrap-around anomalies occur.
- if (min_val->instruction != max_val->instruction) {
- Value v1_max = RefineOuter(*min_val, /* is_min */ false);
- Value v2_min = RefineOuter(*max_val, /* is_min */ true);
- if (!IsConstantValue(v1_max) ||
- !IsConstantValue(v2_min) ||
- v1_max.b_constant > v2_min.b_constant) {
- return false;
+ if (min_val->instruction != nullptr || max_val->instruction != nullptr) {
+ Value v1_min = RefineOuter(*min_val, /* is_min */ true);
+ Value v2_max = RefineOuter(*max_val, /* is_min */ false);
+ // The refined range is safe if both sides refine the same instruction. Otherwise, since two
+ // different ranges are combined, the new refined range is safe to pass back to the client if
+ // the extremes of the computed ranges ensure no arithmetic wrap-around anomalies occur.
+ if (min_val->instruction != max_val->instruction) {
+ Value v1_max = RefineOuter(*min_val, /* is_min */ false);
+ Value v2_min = RefineOuter(*max_val, /* is_min */ true);
+ if (!IsConstantValue(v1_max) ||
+ !IsConstantValue(v2_min) ||
+ v1_max.b_constant > v2_min.b_constant) {
+ return false;
+ }
+ }
+ // Did something change?
+ if (v1_min.instruction != min_val->instruction || v2_max.instruction != max_val->instruction) {
+ *min_val = v1_min;
+ *max_val = v2_max;
+ return true;
}
- }
- // Did something change?
- if (v1_min.instruction != min_val->instruction || v2_max.instruction != max_val->instruction) {
- *min_val = v1_min;
- *max_val = v2_max;
- return true;
}
return false;
}
@@ -277,7 +313,12 @@ InductionVarRange::Value InductionVarRange::GetLinear(HInductionVarAnalysis::Ind
if (HInductionVarAnalysis::InductionEqual(trip_expr->op_b, info->op_b)) {
// Analyze cancelled trip with just the positive operand (trip_expr->op_a).
HInductionVarAnalysis::InductionInfo cancelled_trip(
- trip->induction_class, trip->operation, trip_expr->op_a, trip->op_b, nullptr);
+ trip->induction_class,
+ trip->operation,
+ trip_expr->op_a,
+ trip->op_b,
+ nullptr,
+ trip->type);
return GetVal(&cancelled_trip, trip, in_body, is_min);
}
} else if (is_min && stride_value == -1) {
@@ -289,9 +330,10 @@ InductionVarRange::Value InductionVarRange::GetLinear(HInductionVarAnalysis::Ind
HInductionVarAnalysis::kNeg,
nullptr,
trip_expr->op_b,
- nullptr);
+ nullptr,
+ trip->type);
HInductionVarAnalysis::InductionInfo cancelled_trip(
- trip->induction_class, trip->operation, &neg, trip->op_b, nullptr);
+ trip->induction_class, trip->operation, &neg, trip->op_b, nullptr, trip->type);
return SubValue(Value(0), GetVal(&cancelled_trip, trip, in_body, !is_min));
}
}
@@ -322,6 +364,12 @@ InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction,
}
} else if (instruction->IsArrayLength() && instruction->InputAt(0)->IsNewArray()) {
return GetFetch(instruction->InputAt(0)->InputAt(0), trip, in_body, is_min);
+ } else if (instruction->IsTypeConversion()) {
+ // Since analysis is 32-bit (or narrower) we allow a widening along the path.
+ if (instruction->AsTypeConversion()->GetInputType() == Primitive::kPrimInt &&
+ instruction->AsTypeConversion()->GetResultType() == Primitive::kPrimLong) {
+ return GetFetch(instruction->InputAt(0), trip, in_body, is_min);
+ }
} else if (is_min) {
// Special case for finding minimum: minimum of trip-count in loop-body is 1.
if (trip != nullptr && in_body && instruction == trip->op_a->fetch) {
@@ -374,7 +422,7 @@ InductionVarRange::Value InductionVarRange::GetVal(HInductionVarAnalysis::Induct
}
break;
case HInductionVarAnalysis::kLinear: {
- return GetLinear(info, trip, in_body, is_min);
+ return CorrectForType(GetLinear(info, trip, in_body, is_min), info->type);
}
case HInductionVarAnalysis::kWrapAround:
case HInductionVarAnalysis::kPeriodic:
@@ -613,8 +661,12 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
bool in_body,
bool is_min) const {
if (info != nullptr) {
- // Handle current operation.
+ // Verify type safety.
Primitive::Type type = Primitive::kPrimInt;
+ if (info->type != type) {
+ return false;
+ }
+ // Handle current operation.
HInstruction* opa = nullptr;
HInstruction* opb = nullptr;
switch (info->induction_class) {
@@ -667,13 +719,10 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
}
break;
case HInductionVarAnalysis::kFetch:
- if (info->fetch->GetType() == type) {
- if (graph != nullptr) {
- *result = info->fetch; // already in HIR
- }
- return true;
+ if (graph != nullptr) {
+ *result = info->fetch; // already in HIR
}
- break;
+ return true;
case HInductionVarAnalysis::kTripCountInLoop:
case HInductionVarAnalysis::kTripCountInLoopUnsafe:
if (!in_body && !is_min) { // one extra!
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index c5c33bd9bc..dc04dc2c49 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -139,37 +139,40 @@ class InductionVarRangeTest : public CommonCompilerTest {
/** Constructs a trip-count. */
HInductionVarAnalysis::InductionInfo* CreateTripCount(int32_t tc, bool in_loop, bool safe) {
+ Primitive::Type type = Primitive::kPrimInt;
if (in_loop && safe) {
return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc), nullptr);
+ HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc), nullptr, type);
} else if (in_loop) {
return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInLoopUnsafe, CreateConst(tc), nullptr);
+ HInductionVarAnalysis::kTripCountInLoopUnsafe, CreateConst(tc), nullptr, type);
} else if (safe) {
return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInBody, CreateConst(tc), nullptr);
+ HInductionVarAnalysis::kTripCountInBody, CreateConst(tc), nullptr, type);
} else {
return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInBodyUnsafe, CreateConst(tc), nullptr);
+ HInductionVarAnalysis::kTripCountInBodyUnsafe, CreateConst(tc), nullptr, type);
}
}
/** Constructs a linear a * i + b induction. */
HInductionVarAnalysis::InductionInfo* CreateLinear(int32_t a, int32_t b) {
- return iva_->CreateInduction(HInductionVarAnalysis::kLinear, CreateConst(a), CreateConst(b));
+ return iva_->CreateInduction(
+ HInductionVarAnalysis::kLinear, CreateConst(a), CreateConst(b), Primitive::kPrimInt);
}
/** Constructs a range [lo, hi] using a periodic induction. */
HInductionVarAnalysis::InductionInfo* CreateRange(int32_t lo, int32_t hi) {
return iva_->CreateInduction(
- HInductionVarAnalysis::kPeriodic, CreateConst(lo), CreateConst(hi));
+ HInductionVarAnalysis::kPeriodic, CreateConst(lo), CreateConst(hi), Primitive::kPrimInt);
}
/** Constructs a wrap-around induction consisting of a constant, followed info */
HInductionVarAnalysis::InductionInfo* CreateWrapAround(
int32_t initial,
HInductionVarAnalysis::InductionInfo* info) {
- return iva_->CreateInduction(HInductionVarAnalysis::kWrapAround, CreateConst(initial), info);
+ return iva_->CreateInduction(
+ HInductionVarAnalysis::kWrapAround, CreateConst(initial), info, Primitive::kPrimInt);
}
/** Constructs a wrap-around induction consisting of a constant, followed by a range. */
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 4a8186a659..820c696033 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1538,7 +1538,10 @@ void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke,
HInstruction* distance = invoke->InputAt(1);
// Replace the invoke with an HRor.
if (is_left) {
- distance = new (GetGraph()->GetArena()) HNeg(distance->GetType(), distance);
+ // Unconditionally set the type of the negated distance to `int`,
+ // as shift and rotate operations expect a 32-bit (or narrower)
+ // value for their distance input.
+ distance = new (GetGraph()->GetArena()) HNeg(Primitive::kPrimInt, distance);
invoke->GetBlock()->InsertInstructionBefore(distance, invoke);
}
HRor* ror = new (GetGraph()->GetArena()) HRor(type, value, distance);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index c306cf93a1..1280587276 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1475,6 +1475,404 @@ void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
Thread::PeerOffset<kMipsPointerSize>().Int32Value());
}
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ bool can_call =
+ invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
+ invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile;
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ can_call ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+static void GenUnsafeGet(HInvoke* invoke,
+ Primitive::Type type,
+ bool is_volatile,
+ bool is_R6,
+ CodeGeneratorMIPS* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot)) << type;
+ MipsAssembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ Register base = locations->InAt(1).AsRegister<Register>();
+ // The "offset" argument is passed as a "long". Since this code is for
+ // a 32-bit processor, we can only use 32-bit addresses, so we only
+ // need the low 32-bits of offset.
+ Register offset_lo = invoke->GetLocations()->InAt(2).AsRegisterPairLow<Register>();
+
+ __ Addu(TMP, base, offset_lo);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ if (type == Primitive::kPrimLong) {
+ Register trg_lo = locations->Out().AsRegisterPairLow<Register>();
+ Register trg_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+ if (is_R6) {
+ __ Lw(trg_lo, TMP, 0);
+ __ Lw(trg_hi, TMP, 4);
+ } else {
+ __ Lwr(trg_lo, TMP, 0);
+ __ Lwl(trg_lo, TMP, 3);
+ __ Lwr(trg_hi, TMP, 4);
+ __ Lwl(trg_hi, TMP, 7);
+ }
+ } else {
+ Register trg = locations->Out().AsRegister<Register>();
+
+ if (is_R6) {
+ __ Lw(trg, TMP, 0);
+ } else {
+ __ Lwr(trg, TMP, 0);
+ __ Lwl(trg, TMP, 3);
+ }
+ }
+}
+
+// int sun.misc.Unsafe.getInt(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, IsR6(), codegen_);
+}
+
+// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, IsR6(), codegen_);
+}
+
+// long sun.misc.Unsafe.getLong(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, IsR6(), codegen_);
+}
+
+// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, IsR6(), codegen_);
+}
+
+// Object sun.misc.Unsafe.getObject(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, IsR6(), codegen_);
+}
+
+// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, IsR6(), codegen_);
+}
+
+static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+}
+
+static void GenUnsafePut(LocationSummary* locations,
+ Primitive::Type type,
+ bool is_volatile,
+ bool is_ordered,
+ bool is_R6,
+ CodeGeneratorMIPS* codegen) {
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot)) << type;
+ MipsAssembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ Register base = locations->InAt(1).AsRegister<Register>();
+ // The "offset" argument is passed as a "long", i.e., it's 64-bits in
+ // size. Since this code is for a 32-bit processor, we can only use
+ // 32-bit addresses, so we only need the low 32-bits of offset.
+ Register offset_lo = locations->InAt(2).AsRegisterPairLow<Register>();
+
+ __ Addu(TMP, base, offset_lo);
+ if (is_volatile || is_ordered) {
+ __ Sync(0);
+ }
+ if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
+ Register value = locations->InAt(3).AsRegister<Register>();
+
+ if (is_R6) {
+ __ Sw(value, TMP, 0);
+ } else {
+ __ Swr(value, TMP, 0);
+ __ Swl(value, TMP, 3);
+ }
+ } else {
+ Register value_lo = locations->InAt(3).AsRegisterPairLow<Register>();
+ Register value_hi = locations->InAt(3).AsRegisterPairHigh<Register>();
+
+ if (is_R6) {
+ __ Sw(value_lo, TMP, 0);
+ __ Sw(value_hi, TMP, 4);
+ } else {
+ __ Swr(value_lo, TMP, 0);
+ __ Swl(value_lo, TMP, 3);
+ __ Swr(value_hi, TMP, 4);
+ __ Swl(value_hi, TMP, 7);
+ }
+ }
+
+ if (is_volatile) {
+ __ Sync(0);
+ }
+
+ if (type == Primitive::kPrimNot) {
+ codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>());
+ }
+}
+
+// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimInt,
+ /* is_volatile */ false,
+ /* is_ordered */ false,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimInt,
+ /* is_volatile */ false,
+ /* is_ordered */ true,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimInt,
+ /* is_volatile */ true,
+ /* is_ordered */ false,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimNot,
+ /* is_volatile */ false,
+ /* is_ordered */ false,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimNot,
+ /* is_volatile */ false,
+ /* is_ordered */ true,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimNot,
+ /* is_volatile */ true,
+ /* is_ordered */ false,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimLong,
+ /* is_volatile */ false,
+ /* is_ordered */ false,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimLong,
+ /* is_volatile */ false,
+ /* is_ordered */ true,
+ IsR6(),
+ codegen_);
+}
+
+// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(),
+ Primitive::kPrimLong,
+ /* is_volatile */ true,
+ /* is_ordered */ false,
+ IsR6(),
+ codegen_);
+}
+
+static void CreateIntIntIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->SetInAt(4, Location::RequiresRegister());
+
+ locations->SetOut(Location::RequiresRegister());
+}
+
+static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorMIPS* codegen) {
+ MipsAssembler* assembler = codegen->GetAssembler();
+ bool isR6 = codegen->GetInstructionSetFeatures().IsR6();
+ Register base = locations->InAt(1).AsRegister<Register>();
+ Register offset_lo = locations->InAt(2).AsRegisterPairLow<Register>();
+ Register expected = locations->InAt(3).AsRegister<Register>();
+ Register value = locations->InAt(4).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ DCHECK_NE(base, out);
+ DCHECK_NE(offset_lo, out);
+ DCHECK_NE(expected, out);
+
+ if (type == Primitive::kPrimNot) {
+ // Mark card for object assuming new value is stored.
+ codegen->MarkGCCard(base, value);
+ }
+
+ // do {
+ // tmp_value = [tmp_ptr] - expected;
+ // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
+ // result = tmp_value != 0;
+
+ MipsLabel loop_head, exit_loop;
+ __ Addu(TMP, base, offset_lo);
+ __ Sync(0);
+ __ Bind(&loop_head);
+ if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
+ if (isR6) {
+ __ LlR6(out, TMP);
+ } else {
+ __ LlR2(out, TMP);
+ }
+ } else {
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+ __ Subu(out, out, expected); // If we didn't get the 'expected'
+ __ Sltiu(out, out, 1); // value, set 'out' to false, and
+ __ Beqz(out, &exit_loop); // return.
+ __ Move(out, value); // Use 'out' for the 'store conditional' instruction.
+ // If we use 'value' directly, we would lose 'value'
+ // in the case that the store fails. Whether the
+ // store succeeds, or fails, it will load the
+ // correct boolean value into the 'out' register.
+ // This test isn't really necessary. We only support Primitive::kPrimInt,
+ // Primitive::kPrimNot, and we already verified that we're working on one
+ // of those two types. It's left here in case the code needs to support
+ // other types in the future.
+ if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
+ if (isR6) {
+ __ ScR6(out, TMP);
+ } else {
+ __ ScR2(out, TMP);
+ }
+ }
+ __ Beqz(out, &loop_head); // If we couldn't do the read-modify-write
+ // cycle atomically then retry.
+ __ Bind(&exit_loop);
+ __ Sync(0);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
+ CreateIntIntIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
+ GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
+ CreateIntIntIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
+ GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+}
+
// char java.lang.String.charAt(int index)
void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -1482,7 +1880,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) {
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- // The inputs will be considered live at the last instruction and restored. This will overwrite
+ // The inputs will be considered live at the last instruction and restored. This would overwrite
// the output with kNoOutputOverlap.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -2042,24 +2440,7 @@ UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGet)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLong)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePut)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutOrdered)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectOrdered)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLong)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongOrdered)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASInt)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASObject)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS, StringGetCharsNoCheck)
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 33bb2e8f30..7a1e06b951 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -80,7 +80,7 @@ static void UpdateLoopPhisIn(HEnvironment* environment, HLoopInformation* info)
void LICM::Run() {
DCHECK(side_effects_.HasRun());
// Only used during debug.
- ArenaBitVector visited(graph_->GetArena(), graph_->GetBlocks().size(), false);
+ ArenaBitVector visited(graph_->GetArena(), graph_->GetBlocks().size(), false, kArenaAllocLICM);
// Post order visit to visit inner loops before outer loops.
for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 9601b066e5..e1977b1798 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -186,7 +186,10 @@ class HeapLocationCollector : public HGraphVisitor {
: HGraphVisitor(graph),
ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- aliasing_matrix_(graph->GetArena(), kInitialAliasingMatrixBitVectorSize, true),
+ aliasing_matrix_(graph->GetArena(),
+ kInitialAliasingMatrixBitVectorSize,
+ true,
+ kArenaAllocLSE),
has_heap_stores_(false),
has_volatile_(false),
has_monitor_operations_(false),
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 1ab206f69e..83596da41a 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -37,7 +37,7 @@ LocationSummary::LocationSummary(HInstruction* instruction,
if (NeedsSafepoint()) {
ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetArena();
- stack_mask_ = new (arena) ArenaBitVector(arena, 0, true);
+ stack_mask_ = ArenaBitVector::Create(arena, 0, true, kArenaAllocLocationSummary);
}
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 98766a31a6..c83340b1f6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -54,7 +54,7 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
DCHECK_EQ(visited->GetHighestBitSet(), -1);
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(arena_, blocks_.size(), false);
+ ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(), 0u, arena_->Adapter());
// Stack of nodes that we're currently visiting (same as marked in "visiting" above).
@@ -140,7 +140,7 @@ GraphAnalysisResult HGraph::BuildDominatorTree() {
// collect both normal- and exceptional-flow values at the same time.
SimplifyCatchBlocks();
- ArenaBitVector visited(arena_, blocks_.size(), false);
+ ArenaBitVector visited(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
// (2) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 46377ee503..a09ad00cdd 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -645,7 +645,7 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
irreducible_(false),
back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)),
// Make bit vector growable, as the number of blocks may change.
- blocks_(graph->GetArena(), graph->GetBlocks().size(), true) {
+ blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) {
back_edges_.reserve(kDefaultNumberOfBackEdges);
}
@@ -4148,7 +4148,9 @@ class HInvokeInterface : public HInvoke {
class HNeg : public HUnaryOperation {
public:
HNeg(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
- : HUnaryOperation(result_type, input, dex_pc) {}
+ : HUnaryOperation(result_type, input, dex_pc) {
+ DCHECK_EQ(result_type, Primitive::PrimitiveKind(input->GetType()));
+ }
template <typename T> T Compute(T x) const { return -x; }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 4d2469ca15..f1c5581c5b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -84,6 +84,8 @@
namespace art {
+static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
+
/**
* Used by the code generator, to allocate the code in a vector.
*/
@@ -761,13 +763,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
pass_observer.DumpDisassembly();
}
- if (kArenaAllocatorCountAllocations) {
- if (arena->BytesAllocated() > 4 * MB) {
- MemStats mem_stats(arena->GetMemStats());
- LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
- }
- }
-
return codegen.release();
}
@@ -812,6 +807,13 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
if (codegen.get() != nullptr) {
MaybeRecordStat(MethodCompilationStat::kCompiled);
method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item);
+
+ if (kArenaAllocatorCountAllocations) {
+ if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(arena.GetMemStats());
+ LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
+ }
+ }
}
} else {
if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
@@ -890,6 +892,13 @@ bool OptimizingCompiler::JitCompile(Thread* self,
if (codegen.get() == nullptr) {
return false;
}
+
+ if (kArenaAllocatorCountAllocations) {
+ if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(arena.GetMemStats());
+ LOG(INFO) << PrettyMethod(method_idx, *dex_file) << " " << Dumpable<MemStats>(mem_stats);
+ }
+ }
}
size_t stack_map_size = codegen->ComputeStackMapsSize();
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 0c7648edc2..0ca7305d13 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -20,7 +20,6 @@
#include "nodes.h"
#include "builder.h"
#include "common_compiler_test.h"
-#include "compiler/dex/pass_manager.h"
#include "dex_file.h"
#include "dex_instruction.h"
#include "handle_scope-inl.h"
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 48b7a97b72..44bede8ac0 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -445,7 +445,7 @@ class AllRangesIterator : public ValueObject {
bool RegisterAllocator::ValidateInternal(bool log_fatal_on_failure) const {
// To simplify unit testing, we eagerly create the array of intervals, and
// call the helper method.
- ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
@@ -483,13 +483,21 @@ bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& inte
? codegen.GetNumberOfCoreRegisters()
: codegen.GetNumberOfFloatingPointRegisters();
ArenaVector<ArenaBitVector*> liveness_of_values(
- allocator->Adapter(kArenaAllocRegisterAllocator));
+ allocator->Adapter(kArenaAllocRegisterAllocatorValidate));
liveness_of_values.reserve(number_of_registers + number_of_spill_slots);
+ size_t max_end = 0u;
+ for (LiveInterval* start_interval : intervals) {
+ for (AllRangesIterator it(start_interval); !it.Done(); it.Advance()) {
+ max_end = std::max(max_end, it.CurrentRange()->GetEnd());
+ }
+ }
+
// Allocate a bit vector per register. A live interval that has a register
// allocated will populate the associated bit vector based on its live ranges.
for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
- liveness_of_values.push_back(new (allocator) ArenaBitVector(allocator, 0, true));
+ liveness_of_values.push_back(
+ ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
}
for (LiveInterval* start_interval : intervals) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index a78aedcff5..97f2aeeb1e 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -31,9 +31,9 @@ class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
public:
BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
- live_in_(allocator, number_of_ssa_values, false),
- live_out_(allocator, number_of_ssa_values, false),
- kill_(allocator, number_of_ssa_values, false) {
+ live_in_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
+ live_out_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
+ kill_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness) {
UNUSED(block_);
live_in_.ClearAllBits();
live_out_.ClearAllBits();
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 54cbdf8b66..3f41e3594e 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -37,7 +37,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound;
if (num_dex_registers != 0) {
current_entry_.live_dex_registers_mask =
- new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
+ ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
} else {
current_entry_.live_dex_registers_mask = nullptr;
}
@@ -111,7 +111,7 @@ void StackMapStream::BeginInlineInfoEntry(uint32_t method_index,
current_inline_info_.dex_register_locations_start_index = dex_register_locations_.size();
if (num_dex_registers != 0) {
current_inline_info_.live_dex_registers_mask =
- new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
+ ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
} else {
current_inline_info_.live_dex_registers_mask = nullptr;
}
@@ -256,7 +256,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
// Ensure we reached the end of the Dex registers location_catalog.
DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
- ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false);
+ ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false, kArenaAllocStackMapStream);
uintptr_t next_dex_register_map_offset = 0;
uintptr_t next_inline_info_offset = 0;
for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) {