Optimizing: Tag even more arena allocations.
Tag previously "Misc" arena allocations with more specific
allocation types. Move some native heap allocations to the
arena in BCE.
Bug: 23736311
Change-Id: If8ef15a8b614dc3314bdfb35caa23862c9d4d25c
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 42b3541..960f4d9 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -410,7 +410,7 @@
* of an existing value range, NewArray or a loop phi corresponding to an
* incrementing/decrementing array index (MonotonicValueRange).
*/
-class ValueRange : public ArenaObject<kArenaAllocMisc> {
+class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
public:
ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper)
: allocator_(allocator), lower_(lower), upper_(upper) {}
@@ -1112,7 +1112,14 @@
BCEVisitor(HGraph* graph, HInductionVarAnalysis* induction_analysis)
: HGraphVisitor(graph),
- maps_(graph->GetBlocks().size()),
+ maps_(graph->GetBlocks().size(),
+ ArenaSafeMap<int, ValueRange*>(
+ std::less<int>(),
+ graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ first_constant_index_bounds_check_map_(
+ std::less<int>(),
+ graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
need_to_revisit_block_(false),
initial_block_size_(graph->GetBlocks().size()),
induction_range_(induction_analysis) {}
@@ -1137,14 +1144,9 @@
// Added blocks don't keep value ranges.
return nullptr;
}
- int block_id = basic_block->GetBlockId();
- if (maps_.at(block_id) == nullptr) {
- std::unique_ptr<ArenaSafeMap<int, ValueRange*>> map(
- new ArenaSafeMap<int, ValueRange*>(
- std::less<int>(), GetGraph()->GetArena()->Adapter()));
- maps_.at(block_id) = std::move(map);
- }
- return maps_.at(block_id).get();
+ uint32_t block_id = basic_block->GetBlockId();
+ DCHECK_LT(block_id, maps_.size());
+ return &maps_[block_id];
}
// Traverse up the dominator tree to look for value range info.
@@ -1842,11 +1844,11 @@
}
}
- std::vector<std::unique_ptr<ArenaSafeMap<int, ValueRange*>>> maps_;
+ ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
// Map an HArrayLength instruction's id to the first HBoundsCheck instruction in
// a block that checks a constant index against that HArrayLength.
- SafeMap<int, HBoundsCheck*> first_constant_index_bounds_check_map_;
+ ArenaSafeMap<int, HBoundsCheck*> first_constant_index_bounds_check_map_;
// For the block, there is at least one HArrayLength instruction for which there
// is more than one bounds check instruction with constant indexing. And it's
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 7ae405a..cb36f62 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -324,7 +324,7 @@
// Keep a map of all try blocks and their respective TryItems. We do not use
// the block's pointer but rather its id to ensure deterministic iteration.
ArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
- std::less<uint32_t>(), arena_->Adapter());
+ std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
// Obtain TryItem information for blocks with throwing instructions, and split
// blocks which are both try & catch to simplify the graph.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b58a3ff..5da0e59 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -425,9 +425,12 @@
core_spill_mask_(0),
fpu_spill_mask_(0),
first_register_slot_in_slow_path_(0),
- blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers)),
- blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers)),
- blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs)),
+ blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
+ kArenaAllocCodeGenerator)),
+ blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
+ kArenaAllocCodeGenerator)),
+ blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs,
+ kArenaAllocCodeGenerator)),
number_of_core_registers_(number_of_core_registers),
number_of_fpu_registers_(number_of_fpu_registers),
number_of_register_pairs_(number_of_register_pairs),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index da7a675..f29e2ba 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -438,9 +438,11 @@
move_resolver_(graph->GetArena(), this),
assembler_(),
isa_features_(isa_features),
- method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
- call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
- relative_call_patches_(graph->GetArena()->Adapter()) {
+ method_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ call_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 31900d5..c97e367 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -542,11 +542,14 @@
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
- uint64_literals_(std::less<uint64_t>(), graph->GetArena()->Adapter()),
- method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
- call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
- relative_call_patches_(graph->GetArena()->Adapter()),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter()) {
+ uint64_literals_(std::less<uint64_t>(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ call_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 277f6b4..a47a95e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -475,8 +475,8 @@
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
- method_patches_(graph->GetArena()->Adapter()),
- relative_call_patches_(graph->GetArena()->Adapter()) {
+ method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5623,7 +5623,7 @@
/**
* Class to handle late fixup of offsets into constant area.
*/
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocMisc> {
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
public:
RIPFixup(const CodeGeneratorX86& codegen, int offset)
: codegen_(codegen), offset_into_constant_area_(offset) {}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 453c6fd..b845a27 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -626,9 +626,9 @@
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
constant_area_start_(0),
- method_patches_(graph->GetArena()->Adapter()),
- relative_call_patches_(graph->GetArena()->Adapter()),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter()) {
+ method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5279,7 +5279,7 @@
/**
* Class to handle late fixup of offsets into constant area.
*/
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocMisc> {
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
public:
RIPFixup(const CodeGeneratorX86_64& codegen, int offset)
: codegen_(codegen), offset_into_constant_area_(offset) {}
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 5050e15..7cf0617 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -39,7 +39,7 @@
explicit ValueSet(ArenaAllocator* allocator)
: allocator_(allocator),
num_buckets_(kMinimumNumberOfBuckets),
- buckets_(allocator->AllocArray<Node*>(num_buckets_)),
+ buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
buckets_owned_(allocator, num_buckets_, false),
num_entries_(0) {
// ArenaAllocator returns zeroed memory, so no need to set buckets to null.
@@ -52,7 +52,7 @@
ValueSet(ArenaAllocator* allocator, const ValueSet& to_copy)
: allocator_(allocator),
num_buckets_(to_copy.IdealBucketCount()),
- buckets_(allocator->AllocArray<Node*>(num_buckets_)),
+ buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
buckets_owned_(allocator, num_buckets_, false),
num_entries_(to_copy.num_entries_) {
// ArenaAllocator returns zeroed memory, so entries of buckets_ and
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 9fb4304..52c729d 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -74,11 +74,14 @@
HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
: HOptimization(graph, kInductionPassName),
global_depth_(0),
- stack_(graph->GetArena()->Adapter()),
- scc_(graph->GetArena()->Adapter()),
- map_(std::less<HInstruction*>(), graph->GetArena()->Adapter()),
- cycle_(std::less<HInstruction*>(), graph->GetArena()->Adapter()),
- induction_(std::less<HLoopInformation*>(), graph->GetArena()->Adapter()) {
+ stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ map_(std::less<HInstruction*>(),
+ graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ cycle_(std::less<HInstruction*>(),
+ graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ induction_(std::less<HLoopInformation*>(),
+ graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
}
void HInductionVarAnalysis::Run() {
@@ -228,7 +231,7 @@
// Rotate proper entry-phi to front.
if (size > 1) {
- ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter());
+ ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis));
RotateEntryPhiFirst(loop, &scc_, &other);
}
@@ -637,7 +640,8 @@
if (it == induction_.end()) {
it = induction_.Put(loop,
ArenaSafeMap<HInstruction*, InductionInfo*>(
- std::less<HInstruction*>(), graph_->GetArena()->Adapter()));
+ std::less<HInstruction*>(),
+ graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)));
}
it->second.Put(instruction, info);
}
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 190a0db..ba5bfc2 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -78,7 +78,7 @@
* (4) periodic
* nop: a, then defined by b (repeated when exhausted)
*/
- struct InductionInfo : public ArenaObject<kArenaAllocMisc> {
+ struct InductionInfo : public ArenaObject<kArenaAllocInductionVarAnalysis> {
InductionInfo(InductionClass ic,
InductionOp op,
InductionInfo* a,
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 76bd595..de4fb7e 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -468,7 +468,7 @@
* The intent is to have the code for generating the instruction independent of
* register allocation. A register allocator just has to provide a LocationSummary.
*/
-class LocationSummary : public ArenaObject<kArenaAllocMisc> {
+class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
public:
enum CallKind {
kNoCall,
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index c43e58f..9cdb89b 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -187,7 +187,8 @@
}
number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
- registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
+ registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
+ kArenaAllocRegisterAllocator);
processing_core_registers_ = true;
unhandled_ = &unhandled_core_intervals_;
for (LiveInterval* fixed : physical_core_register_intervals_) {
@@ -206,7 +207,8 @@
handled_.clear();
number_of_registers_ = codegen_->GetNumberOfFloatingPointRegisters();
- registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
+ registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
+ kArenaAllocRegisterAllocator);
processing_core_registers_ = false;
unhandled_ = &unhandled_fp_intervals_;
for (LiveInterval* fixed : physical_fp_register_intervals_) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 414cc7d..e4b0999 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -27,7 +27,7 @@
static constexpr int kNoRegister = -1;
-class BlockInfo : public ArenaObject<kArenaAllocMisc> {
+class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
public:
BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
@@ -55,7 +55,7 @@
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> {
+class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
@@ -101,7 +101,7 @@
/**
* A use position represents a live interval use at a given position.
*/
-class UsePosition : public ArenaObject<kArenaAllocMisc> {
+class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
public:
UsePosition(HInstruction* user,
HEnvironment* environment,
@@ -169,7 +169,7 @@
DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
-class SafepointPosition : public ArenaObject<kArenaAllocMisc> {
+class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> {
public:
explicit SafepointPosition(HInstruction* instruction)
: instruction_(instruction),