summaryrefslogtreecommitdiff
path: root/compiler/optimizing/code_generator.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2017-10-03 14:49:14 +0100
committer Vladimir Marko <vmarko@google.com> 2017-10-06 17:53:50 +0100
commitca6fff898afcb62491458ae8bcd428bfb3043da1 (patch)
tree195a6b16d3a4b34acc2faf91ce56f448efb15e07 /compiler/optimizing/code_generator.cc
parentaa7273e56fbafc2692c8d20a31b50d2f4bdd2aa1 (diff)
ART: Use ScopedArenaAllocator for pass-local data.
Passes using local ArenaAllocator were hiding their memory usage from the allocation counting, making it difficult to track down where memory was used. Using ScopedArenaAllocator reveals the memory usage. This changes the HGraph constructor which requires a lot of changes in tests. Refactor these tests to limit the amount of work needed the next time we change that constructor. Test: m test-art-host-gtest Test: testrunner.py --host Test: Build with kArenaAllocatorCountAllocations = true. Bug: 64312607 Change-Id: I34939e4086b500d6e827ff3ef2211d1a421ac91a
Diffstat (limited to 'compiler/optimizing/code_generator.cc')
-rw-r--r--compiler/optimizing/code_generator.cc22
1 files changed, 12 insertions, 10 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 3cb37926af..ba26cfc70f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -322,7 +322,7 @@ void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
void CodeGenerator::CreateCommonInvokeLocationSummary(
HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnMainOnly);
@@ -420,7 +420,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary(
bool is_get = field_access->IsUnresolvedInstanceFieldGet()
|| field_access->IsUnresolvedStaticFieldGet();
- ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
@@ -541,7 +541,7 @@ void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
Location runtime_return_location) {
DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
DCHECK_EQ(cls->InputCount(), 1u);
- LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
cls, LocationSummary::kCallOnMainOnly);
locations->SetInAt(0, Location::NoLocation());
locations->AddTemp(runtime_type_index_location);
@@ -617,7 +617,7 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats) {
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* arena = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
@@ -712,7 +712,7 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
// One can write loops through try/catch, which we do not support for OSR anyway.
return;
}
- ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
for (HBasicBlock* block : graph.GetReversePostOrder()) {
if (block->IsLoopHeader()) {
HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
@@ -721,7 +721,8 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
}
}
- ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<size_t> covered(
+ loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
IterationRange<DexInstructionIterator> instructions = code_item.Instructions();
for (auto it = instructions.begin(); it != instructions.end(); ++it) {
const uint32_t dex_pc = it.GetDexPC(instructions.begin());
@@ -909,7 +910,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
}
void CodeGenerator::RecordCatchBlockInfo() {
- ArenaAllocator* arena = graph_->GetArena();
+ ArenaAllocator* arena = graph_->GetAllocator();
for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
@@ -1194,7 +1195,8 @@ LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* in
if (can_throw_into_catch_block) {
call_kind = LocationSummary::kCallOnSlowPath;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
}
@@ -1237,7 +1239,7 @@ void CodeGenerator::EmitParallelMoves(Location from1,
Location from2,
Location to2,
DataType::Type type2) {
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(from1, to1, type1, nullptr);
parallel_move.AddMove(from2, to2, type2, nullptr);
GetMoveResolver()->EmitNativeCode(&parallel_move);
@@ -1400,7 +1402,7 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnSlowPath,
kIntrinsified);