Use ScopedArenaAllocator for building HGraph.
Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
BatteryStats.dumpCheckinLocked() : 21.1MiB -> 20.2MiB
BatteryStats.dumpLocked(): 42.0MiB -> 40.3MiB
This is because all the memory previously used by the graph
builder is reused by later passes.
And finish the "arena"->"allocator" renaming; make renamed
allocator pointers that are members of classes const when
appropriate (and make a few more members around them const).
Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 64312607
Change-Id: Ia50aafc80c05941ae5b96984ba4f31ed4c78255e
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 28709a1..5b57718 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -62,23 +62,24 @@
const char* shorty = "IIFII";
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
std::unique_ptr<JniCallingConvention> jni_conv(
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
/*is_critical_native*/false,
shorty,
isa));
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
- ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa));
+ ManagedRuntimeCallingConvention::Create(
+ &allocator, is_static, is_synchronized, shorty, isa));
const int frame_size(jni_conv->FrameSize());
ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
// Assemble the method.
std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm(
- JNIMacroAssembler<kPointerSize>::Create(&arena, isa));
+ JNIMacroAssembler<kPointerSize>::Create(&allocator, isa));
jni_asm->cfi().SetEnabled(true);
jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(),
callee_save_regs, mr_conv->EntrySpills());
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 92b5c4d..e32b681 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -179,11 +179,11 @@
}
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
// Calling conventions used to iterate over parameters to method
std::unique_ptr<JniCallingConvention> main_jni_conv =
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
is_critical_native,
@@ -193,7 +193,7 @@
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
ManagedRuntimeCallingConvention::Create(
- &arena, is_static, is_synchronized, shorty, instruction_set));
+ &allocator, is_static, is_synchronized, shorty, instruction_set));
// Calling conventions to call into JNI method "end" possibly passing a returned reference, the
// method and the current thread.
@@ -209,7 +209,7 @@
}
std::unique_ptr<JniCallingConvention> end_jni_conv(
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
is_critical_native,
@@ -218,7 +218,7 @@
// Assembler that holds generated instructions
std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm =
- GetMacroAssembler<kPointerSize>(&arena, instruction_set, instruction_set_features);
+ GetMacroAssembler<kPointerSize>(&allocator, instruction_set, instruction_set_features);
const CompilerOptions& compiler_options = driver->GetCompilerOptions();
jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index f84fea3..3d56833 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -354,8 +354,8 @@
std::vector<uint8_t> Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- arm::ArmVIXLAssembler assembler(&arena);
+ ArenaAllocator allocator(&pool);
+ arm::ArmVIXLAssembler assembler(&allocator);
switch (key.GetType()) {
case ThunkType::kMethodCall:
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 828c99b..663e43b 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -511,8 +511,8 @@
std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- arm64::Arm64Assembler assembler(&arena);
+ ArenaAllocator allocator(&pool);
+ arm64::Arm64Assembler assembler(&allocator);
switch (key.GetType()) {
case ThunkType::kMethodCall: {
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index fe7ecd1..d7def77 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -29,7 +29,7 @@
uint32_t store_dex_pc) {
HBasicBlock* block = branch_targets_[store_dex_pc];
if (block == nullptr) {
- block = new (arena_) HBasicBlock(graph_, semantic_dex_pc);
+ block = new (allocator_) HBasicBlock(graph_, semantic_dex_pc);
branch_targets_[store_dex_pc] = block;
}
DCHECK_EQ(block->GetDexPc(), semantic_dex_pc);
@@ -200,7 +200,7 @@
// Returns the TryItem stored for `block` or nullptr if there is no info for it.
static const DexFile::TryItem* GetTryItem(
HBasicBlock* block,
- const ArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
+ const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
auto iterator = try_block_info.find(block->GetBlockId());
return (iterator == try_block_info.end()) ? nullptr : iterator->second;
}
@@ -212,7 +212,7 @@
static void LinkToCatchBlocks(HTryBoundary* try_boundary,
const DexFile::CodeItem& code_item,
const DexFile::TryItem* try_item,
- const ArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
+ const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
for (CatchHandlerIterator it(code_item, *try_item); it.HasNext(); it.Next()) {
try_boundary->AddExceptionHandler(catch_blocks.Get(it.GetHandlerAddress()));
}
@@ -253,8 +253,8 @@
// Keep a map of all try blocks and their respective TryItems. We do not use
// the block's pointer but rather its id to ensure deterministic iteration.
- ArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
- std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
+ std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
// Obtain TryItem information for blocks with throwing instructions, and split
// blocks which are both try & catch to simplify the graph.
@@ -278,8 +278,8 @@
}
// Map from a handler dex_pc to the corresponding catch block.
- ArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks(
- std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks(
+ std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
// Iterate over catch blocks, create artifical landing pads if necessary to
// simplify the CFG, and set metadata.
@@ -302,8 +302,8 @@
HBasicBlock* catch_block = GetBlockAt(address);
bool is_try_block = (try_block_info.find(catch_block->GetBlockId()) != try_block_info.end());
if (is_try_block || MightHaveLiveNormalPredecessors(catch_block)) {
- HBasicBlock* new_catch_block = new (arena_) HBasicBlock(graph_, address);
- new_catch_block->AddInstruction(new (arena_) HGoto(address));
+ HBasicBlock* new_catch_block = new (allocator_) HBasicBlock(graph_, address);
+ new_catch_block->AddInstruction(new (allocator_) HGoto(address));
new_catch_block->AddSuccessor(catch_block);
graph_->AddBlock(new_catch_block);
catch_block = new_catch_block;
@@ -311,7 +311,7 @@
catch_blocks.Put(address, catch_block);
catch_block->SetTryCatchInformation(
- new (arena_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
+ new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -328,8 +328,8 @@
if (GetTryItem(predecessor, try_block_info) != try_item) {
// Found a predecessor not covered by the same TryItem. Insert entering
// boundary block.
- HTryBoundary* try_entry =
- new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc());
+ HTryBoundary* try_entry = new (allocator_) HTryBoundary(
+ HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc());
try_block->CreateImmediateDominator()->AddInstruction(try_entry);
LinkToCatchBlocks(try_entry, code_item_, try_item, catch_blocks);
break;
@@ -357,7 +357,7 @@
// Insert TryBoundary and link to catch blocks.
HTryBoundary* try_exit =
- new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc());
+ new (allocator_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc());
graph_->SplitEdge(try_block, successor)->AddInstruction(try_exit);
LinkToCatchBlocks(try_exit, code_item_, try_item, catch_blocks);
}
@@ -367,8 +367,8 @@
bool HBasicBlockBuilder::Build() {
DCHECK(graph_->GetBlocks().empty());
- graph_->SetEntryBlock(new (arena_) HBasicBlock(graph_, kNoDexPc));
- graph_->SetExitBlock(new (arena_) HBasicBlock(graph_, kNoDexPc));
+ graph_->SetEntryBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc));
+ graph_->SetExitBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc));
// TODO(dbrazdil): Do CreateBranchTargets and ConnectBasicBlocks in one pass.
if (!CreateBranchTargets()) {
diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h
index 4a0f78c..79f7a7b 100644
--- a/compiler/optimizing/block_builder.h
+++ b/compiler/optimizing/block_builder.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_
-#include "base/arena_containers.h"
-#include "base/arena_object.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "dex_file.h"
#include "nodes.h"
@@ -28,17 +28,21 @@
public:
HBasicBlockBuilder(HGraph* graph,
const DexFile* const dex_file,
- const DexFile::CodeItem& code_item)
- : arena_(graph->GetAllocator()),
+ const DexFile::CodeItem& code_item,
+ ScopedArenaAllocator* local_allocator)
+ : allocator_(graph->GetAllocator()),
graph_(graph),
dex_file_(dex_file),
code_item_(code_item),
+ local_allocator_(local_allocator),
branch_targets_(code_item.insns_size_in_code_units_,
nullptr,
- arena_->Adapter(kArenaAllocGraphBuilder)),
- throwing_blocks_(kDefaultNumberOfThrowingBlocks, arena_->Adapter(kArenaAllocGraphBuilder)),
+ local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ throwing_blocks_(kDefaultNumberOfThrowingBlocks,
+ local_allocator->Adapter(kArenaAllocGraphBuilder)),
number_of_branches_(0u),
- quicken_index_for_dex_pc_(std::less<uint32_t>(), arena_->Adapter()) {}
+ quicken_index_for_dex_pc_(std::less<uint32_t>(),
+ local_allocator->Adapter(kArenaAllocGraphBuilder)) {}
// Creates basic blocks in `graph_` at branch target dex_pc positions of the
// `code_item_`. Blocks are connected but left unpopulated with instructions.
@@ -71,18 +75,19 @@
// handler dex_pcs.
bool MightHaveLiveNormalPredecessors(HBasicBlock* catch_block);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
HGraph* const graph_;
const DexFile* const dex_file_;
const DexFile::CodeItem& code_item_;
- ArenaVector<HBasicBlock*> branch_targets_;
- ArenaVector<HBasicBlock*> throwing_blocks_;
+ ScopedArenaAllocator* const local_allocator_;
+ ScopedArenaVector<HBasicBlock*> branch_targets_;
+ ScopedArenaVector<HBasicBlock*> throwing_blocks_;
size_t number_of_branches_;
// A table to quickly find the quicken index for the first instruction of a basic block.
- ArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_;
+ ScopedArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_;
static constexpr size_t kDefaultNumberOfThrowingBlocks = 2u;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 76350a6..4ed1612 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -20,12 +20,16 @@
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "base/logging.h"
+#include "block_builder.h"
#include "data_type-inl.h"
#include "dex/verified_method.h"
#include "driver/compiler_options.h"
+#include "instruction_builder.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
+#include "optimizing_compiler_stats.h"
+#include "ssa_builder.h"
#include "thread.h"
#include "utils/dex_cache_arrays_layout-inl.h"
@@ -43,27 +47,13 @@
dex_file_(&graph->GetDexFile()),
code_item_(*dex_compilation_unit->GetCodeItem()),
dex_compilation_unit_(dex_compilation_unit),
+ outer_compilation_unit_(outer_compilation_unit),
compiler_driver_(driver),
+ code_generator_(code_generator),
compilation_stats_(compiler_stats),
- block_builder_(graph, dex_file_, code_item_),
- ssa_builder_(graph,
- dex_compilation_unit->GetClassLoader(),
- dex_compilation_unit->GetDexCache(),
- handles),
- instruction_builder_(graph,
- &block_builder_,
- &ssa_builder_,
- dex_file_,
- code_item_,
- DataType::FromShorty(dex_compilation_unit_->GetShorty()[0]),
- dex_compilation_unit,
- outer_compilation_unit,
- driver,
- code_generator,
- interpreter_metadata,
- compiler_stats,
- dex_compilation_unit->GetDexCache(),
- handles) {}
+ interpreter_metadata_(interpreter_metadata),
+ handles_(handles),
+ return_type_(DataType::FromShorty(dex_compilation_unit_->GetShorty()[0])) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
if (compiler_driver_ == nullptr) {
@@ -108,15 +98,38 @@
graph_->SetMaximumNumberOfOutVRegs(code_item_.outs_size_);
graph_->SetHasTryCatch(code_item_.tries_size_ != 0);
+ // Use ScopedArenaAllocator for all local allocations.
+ ScopedArenaAllocator local_allocator(graph_->GetArenaStack());
+ HBasicBlockBuilder block_builder(graph_, dex_file_, code_item_, &local_allocator);
+ SsaBuilder ssa_builder(graph_,
+ dex_compilation_unit_->GetClassLoader(),
+ dex_compilation_unit_->GetDexCache(),
+ handles_,
+ &local_allocator);
+ HInstructionBuilder instruction_builder(graph_,
+ &block_builder,
+ &ssa_builder,
+ dex_file_,
+ code_item_,
+ return_type_,
+ dex_compilation_unit_,
+ outer_compilation_unit_,
+ compiler_driver_,
+ code_generator_,
+ interpreter_metadata_,
+ compilation_stats_,
+ handles_,
+ &local_allocator);
+
// 1) Create basic blocks and link them together. Basic blocks are left
// unpopulated with the exception of synthetic blocks, e.g. HTryBoundaries.
- if (!block_builder_.Build()) {
+ if (!block_builder.Build()) {
return kAnalysisInvalidBytecode;
}
// 2) Decide whether to skip this method based on its code size and number
// of branches.
- if (SkipCompilation(block_builder_.GetNumberOfBranches())) {
+ if (SkipCompilation(block_builder.GetNumberOfBranches())) {
return kAnalysisSkipped;
}
@@ -127,12 +140,12 @@
}
// 4) Populate basic blocks with instructions.
- if (!instruction_builder_.Build()) {
+ if (!instruction_builder.Build()) {
return kAnalysisInvalidBytecode;
}
// 5) Type the graph and eliminate dead/redundant phis.
- return ssa_builder_.BuildSsa();
+ return ssa_builder.BuildSsa();
}
} // namespace art
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 6c5985a..5a860f1 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -17,21 +17,17 @@
#ifndef ART_COMPILER_OPTIMIZING_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_BUILDER_H_
-#include "base/arena_containers.h"
#include "base/arena_object.h"
-#include "block_builder.h"
#include "dex_file-inl.h"
#include "dex_file.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
-#include "instruction_builder.h"
#include "nodes.h"
-#include "optimizing_compiler_stats.h"
-#include "ssa_builder.h"
namespace art {
class CodeGenerator;
+class OptimizingCompilerStats;
class HGraphBuilder : public ValueObject {
public:
@@ -46,34 +42,21 @@
// Only for unit testing.
HGraphBuilder(HGraph* graph,
+ const DexCompilationUnit* dex_compilation_unit,
const DexFile::CodeItem& code_item,
VariableSizedHandleScope* handles,
DataType::Type return_type = DataType::Type::kInt32)
: graph_(graph),
- dex_file_(nullptr),
+ dex_file_(dex_compilation_unit->GetDexFile()),
code_item_(code_item),
- dex_compilation_unit_(nullptr),
+ dex_compilation_unit_(dex_compilation_unit),
+ outer_compilation_unit_(nullptr),
compiler_driver_(nullptr),
+ code_generator_(nullptr),
compilation_stats_(nullptr),
- block_builder_(graph, nullptr, code_item),
- ssa_builder_(graph,
- handles->NewHandle<mirror::ClassLoader>(nullptr),
- handles->NewHandle<mirror::DexCache>(nullptr),
- handles),
- instruction_builder_(graph,
- &block_builder_,
- &ssa_builder_,
- /* dex_file */ nullptr,
- code_item_,
- return_type,
- /* dex_compilation_unit */ nullptr,
- /* outer_compilation_unit */ nullptr,
- /* compiler_driver */ nullptr,
- /* code_generator */ nullptr,
- /* interpreter_metadata */ nullptr,
- /* compiler_stats */ nullptr,
- handles->NewHandle<mirror::DexCache>(nullptr),
- handles) {}
+ interpreter_metadata_(nullptr),
+ handles_(handles),
+ return_type_(return_type) {}
GraphAnalysisResult BuildGraph();
@@ -90,13 +73,16 @@
// it can be an inlined method.
const DexCompilationUnit* const dex_compilation_unit_;
+ // The compilation unit of the enclosing method being compiled.
+ const DexCompilationUnit* const outer_compilation_unit_;
+
CompilerDriver* const compiler_driver_;
+ CodeGenerator* const code_generator_;
- OptimizingCompilerStats* compilation_stats_;
-
- HBasicBlockBuilder block_builder_;
- SsaBuilder ssa_builder_;
- HInstructionBuilder instruction_builder_;
+ OptimizingCompilerStats* const compilation_stats_;
+ const uint8_t* const interpreter_metadata_;
+ VariableSizedHandleScope* const handles_;
+ const DataType::Type return_type_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index eccdccf..3851877 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -21,6 +21,7 @@
#include <cctype>
#include <sstream>
+#include "art_method.h"
#include "bounds_check_elimination.h"
#include "builder.h"
#include "code_generator.h"
@@ -33,6 +34,7 @@
#include "optimization.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
+#include "scoped_thread_state_change-inl.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
#include "utils/intrusive_forward_list.h"
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index b06d91c..902985e 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -17,15 +17,23 @@
#include "instruction_builder.h"
#include "art_method-inl.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "block_builder.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "data_type-inl.h"
#include "dex_instruction-inl.h"
+#include "driver/compiler_driver-inl.h"
+#include "driver/dex_compilation_unit.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
+#include "mirror/dex_cache.h"
+#include "optimizing_compiler_stats.h"
#include "quicken_info.h"
#include "scoped_thread_state_change-inl.h"
#include "sharpening.h"
+#include "ssa_builder.h"
#include "well_known_classes.h"
namespace art {
@@ -34,8 +42,8 @@
return block_builder_->GetBlockAt(dex_pc);
}
-inline ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
- ArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
+inline ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
+ ScopedArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
const size_t vregs = graph_->GetNumberOfVRegs();
if (locals->size() == vregs) {
return locals;
@@ -43,9 +51,9 @@
return GetLocalsForWithAllocation(block, locals, vregs);
}
-ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
+ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
HBasicBlock* block,
- ArenaVector<HInstruction*>* locals,
+ ScopedArenaVector<HInstruction*>* locals,
const size_t vregs) {
DCHECK_NE(locals->size(), vregs);
locals->resize(vregs, nullptr);
@@ -73,7 +81,7 @@
}
inline HInstruction* HInstructionBuilder::ValueOfLocalAt(HBasicBlock* block, size_t local) {
- ArenaVector<HInstruction*>* locals = GetLocalsFor(block);
+ ScopedArenaVector<HInstruction*>* locals = GetLocalsFor(block);
return (*locals)[local];
}
@@ -168,7 +176,7 @@
void HInstructionBuilder::PropagateLocalsToCatchBlocks() {
const HTryBoundary& try_entry = current_block_->GetTryCatchInformation()->GetTryEntry();
for (HBasicBlock* catch_block : try_entry.GetExceptionHandlers()) {
- ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
+ ScopedArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
DCHECK_EQ(handler_locals->size(), current_locals_->size());
for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
HInstruction* handler_value = (*handler_locals)[vreg];
@@ -216,7 +224,7 @@
graph_->GetArtMethod(),
instruction->GetDexPc(),
instruction);
- environment->CopyFrom(*current_locals_);
+ environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals_));
instruction->SetRawEnvironment(environment);
}
}
@@ -264,8 +272,9 @@
}
bool HInstructionBuilder::Build() {
- locals_for_.resize(graph_->GetBlocks().size(),
- ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder)));
+ locals_for_.resize(
+ graph_->GetBlocks().size(),
+ ScopedArenaVector<HInstruction*>(local_allocator_->Adapter(kArenaAllocGraphBuilder)));
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
@@ -274,10 +283,7 @@
compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
- const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
- native_debug_info_locations =
- new (allocator_) ArenaBitVector (allocator_, num_instructions, false);
- FindNativeDebugInfoLocations(native_debug_info_locations);
+ native_debug_info_locations = FindNativeDebugInfoLocations();
}
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -358,7 +364,7 @@
return true;
}
-void HInstructionBuilder::FindNativeDebugInfoLocations(ArenaBitVector* locations) {
+ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
// The callback gets called when the line number changes.
// In other words, it marks the start of new java statement.
struct Callback {
@@ -367,6 +373,12 @@
return false;
}
};
+ const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
+ ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
+ num_instructions,
+ /* expandable */ false,
+ kArenaAllocGraphBuilder);
+ locations->ClearAllBits();
dex_file_->DecodeDebugPositionInfo(&code_item_, Callback::Position, locations);
// Instruction-specific tweaks.
IterationRange<DexInstructionIterator> instructions = code_item_.Instructions();
@@ -387,6 +399,7 @@
break;
}
}
+ return locations;
}
HInstruction* HInstructionBuilder::LoadLocal(uint32_t reg_number, DataType::Type type) const {
@@ -439,8 +452,8 @@
void HInstructionBuilder::InitializeParameters() {
DCHECK(current_block_->IsEntryBlock());
- // dex_compilation_unit_ is null only when unit testing.
- if (dex_compilation_unit_ == nullptr) {
+ // outer_compilation_unit_ is null only when unit testing.
+ if (outer_compilation_unit_ == nullptr) {
return;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 79d6ddc..058b711 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -17,23 +17,32 @@
#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
-#include "base/arena_containers.h"
-#include "base/arena_object.h"
-#include "block_builder.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
+#include "data_type.h"
+#include "dex_file.h"
#include "dex_file_types.h"
-#include "driver/compiler_driver-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "mirror/dex_cache.h"
+#include "handle.h"
#include "nodes.h"
-#include "optimizing_compiler_stats.h"
#include "quicken_info.h"
-#include "ssa_builder.h"
namespace art {
+class ArenaBitVector;
+class ArtField;
+class ArtMethod;
class CodeGenerator;
+class CompilerDriver;
+class DexCompilationUnit;
+class HBasicBlockBuilder;
class Instruction;
+class OptimizingCompilerStats;
+class SsaBuilder;
+class VariableSizedHandleScope;
+
+namespace mirror {
+class Class;
+} // namespace mirror
class HInstructionBuilder : public ValueObject {
public:
@@ -45,12 +54,12 @@
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
+ CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
- Handle<mirror::DexCache> dex_cache,
- VariableSizedHandleScope* handles)
+ VariableSizedHandleScope* handles,
+ ScopedArenaAllocator* local_allocator)
: allocator_(graph->GetAllocator()),
graph_(graph),
handles_(handles),
@@ -59,19 +68,19 @@
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)),
- current_block_(nullptr),
- current_locals_(nullptr),
- latest_result_(nullptr),
- current_this_parameter_(nullptr),
- compiler_driver_(driver),
+ compiler_driver_(compiler_driver),
code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
quicken_info_(interpreter_metadata),
compilation_stats_(compiler_stats),
- dex_cache_(dex_cache),
- loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) {
+ local_allocator_(local_allocator),
+ locals_for_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ current_block_(nullptr),
+ current_locals_(nullptr),
+ latest_result_(nullptr),
+ current_this_parameter_(nullptr),
+ loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -83,18 +92,18 @@
void SetLoopHeaderPhiInputs();
bool ProcessDexInstruction(const Instruction& instruction, uint32_t dex_pc, size_t quicken_index);
- void FindNativeDebugInfoLocations(ArenaBitVector* locations);
+ ArenaBitVector* FindNativeDebugInfoLocations();
bool CanDecodeQuickenedInfo() const;
uint16_t LookupQuickenedInfo(uint32_t quicken_index);
HBasicBlock* FindBlockStartingAt(uint32_t dex_pc) const;
- ArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block);
+ ScopedArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block);
// Out of line version of GetLocalsFor(), which has a fast path that is
// beneficial to get inlined by callers.
- ArenaVector<HInstruction*>* GetLocalsForWithAllocation(
- HBasicBlock* block, ArenaVector<HInstruction*>* locals, const size_t vregs);
+ ScopedArenaVector<HInstruction*>* GetLocalsForWithAllocation(
+ HBasicBlock* block, ScopedArenaVector<HInstruction*>* locals, const size_t vregs);
HInstruction* ValueOfLocalAt(HBasicBlock* block, size_t local);
HInstruction* LoadLocal(uint32_t register_index, DataType::Type type) const;
HInstruction* LoadNullCheckedLocal(uint32_t register_index, uint32_t dex_pc);
@@ -314,7 +323,7 @@
ArenaAllocator* const allocator_;
HGraph* const graph_;
- VariableSizedHandleScope* handles_;
+ VariableSizedHandleScope* const handles_;
// The dex file where the method being compiled is, and the bytecode data.
const DexFile* const dex_file_;
@@ -323,18 +332,8 @@
// The return type of the method being compiled.
const DataType::Type return_type_;
- HBasicBlockBuilder* block_builder_;
- SsaBuilder* ssa_builder_;
-
- ArenaVector<ArenaVector<HInstruction*>> locals_for_;
- HBasicBlock* current_block_;
- ArenaVector<HInstruction*>* current_locals_;
- HInstruction* latest_result_;
- // Current "this" parameter.
- // Valid only after InitializeParameters() finishes.
- // * Null for static methods.
- // * Non-null for instance methods.
- HParameterValue* current_this_parameter_;
+ HBasicBlockBuilder* const block_builder_;
+ SsaBuilder* const ssa_builder_;
CompilerDriver* const compiler_driver_;
@@ -352,10 +351,20 @@
// Original values kept after instruction quickening.
QuickenInfoTable quicken_info_;
- OptimizingCompilerStats* compilation_stats_;
- Handle<mirror::DexCache> dex_cache_;
+ OptimizingCompilerStats* const compilation_stats_;
- ArenaVector<HBasicBlock*> loop_headers_;
+ ScopedArenaAllocator* const local_allocator_;
+ ScopedArenaVector<ScopedArenaVector<HInstruction*>> locals_for_;
+ HBasicBlock* current_block_;
+ ScopedArenaVector<HInstruction*>* current_locals_;
+ HInstruction* latest_result_;
+ // Current "this" parameter.
+ // Valid only after InitializeParameters() finishes.
+ // * Null for static methods.
+ // * Non-null for instance methods.
+ HParameterValue* current_this_parameter_;
+
+ ScopedArenaVector<HBasicBlock*> loop_headers_;
static constexpr int kDefaultNumberOfLoops = 2;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f39acab..afe7484 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1284,9 +1284,9 @@
DCHECK(input_other->IsShr()); // For UShr, we would have taken the branch above.
// Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24".
HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(),
- input_other->InputAt(0),
- input_other->InputAt(1),
- input_other->GetDexPc());
+ input_other->InputAt(0),
+ input_other->InputAt(1),
+ input_other->GetDexPc());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, ushr);
input_other->GetBlock()->RemoveInstruction(input_other);
RecordSimplification();
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 3533c88..033a644 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -57,8 +57,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGeneratorARM64* codegen_;
+ ArenaAllocator* const allocator_;
+ CodeGeneratorARM64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
};
@@ -81,7 +81,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorARM64* codegen_;
+ CodeGeneratorARM64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARM64);
};
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 4f18ca3..9c02d0a 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -46,9 +46,9 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGenerator* codegen_;
- ArmVIXLAssembler* assembler_;
+ ArenaAllocator* const allocator_;
+ CodeGenerator* const codegen_;
+ ArmVIXLAssembler* const assembler_;
const ArmInstructionSetFeatures& features_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
@@ -71,7 +71,7 @@
ArenaAllocator* GetAllocator();
ArmVIXLAssembler* GetAssembler();
- CodeGeneratorARMVIXL* codegen_;
+ CodeGeneratorARMVIXL* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARMVIXL);
};
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index afd9548..13397f1 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- CodeGeneratorMIPS* codegen_;
- ArenaAllocator* allocator_;
+ CodeGeneratorMIPS* const codegen_;
+ ArenaAllocator* const allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
};
@@ -77,7 +77,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorMIPS* codegen_;
+ CodeGeneratorMIPS* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS);
};
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 6085c7b..6f40d90 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- CodeGeneratorMIPS64* codegen_;
- ArenaAllocator* allocator_;
+ CodeGeneratorMIPS64* const codegen_;
+ ArenaAllocator* const allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
};
@@ -73,7 +73,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorMIPS64* codegen_;
+ CodeGeneratorMIPS64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS64);
};
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index ba3ca0a..e3555e7 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGeneratorX86* codegen_;
+ ArenaAllocator* const allocator_;
+ CodeGeneratorX86* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
};
@@ -73,7 +73,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorX86* codegen_;
+ CodeGeneratorX86* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86);
};
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index b0fbe91..5cb601e 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGeneratorX86_64* codegen_;
+ ArenaAllocator* const allocator_;
+ CodeGeneratorX86_64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
};
@@ -73,7 +73,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorX86_64* codegen_;
+ CodeGeneratorX86_64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1a537ca..f4f6434 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -55,14 +55,18 @@
// "visited" must be empty on entry, it's an output argument for all visited (i.e. live) blocks.
DCHECK_EQ(visited->GetHighestBitSet(), -1);
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visiting(
+ &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+ visiting.ClearAllBits();
// Number of successors visited from a given node, indexed by block id.
- ArenaVector<size_t> successors_visited(blocks_.size(),
- 0u,
- allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<size_t> successors_visited(blocks_.size(),
+ 0u,
+ allocator.Adapter(kArenaAllocGraphBuilder));
// Stack of nodes that we're currently visiting (same as marked in "visiting" above).
- ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
visited->SetBit(entry_block_->GetBlockId());
@@ -173,7 +177,11 @@
}
GraphAnalysisResult HGraph::BuildDominatorTree() {
- ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
+
+ ArenaBitVector visited(&allocator, blocks_.size(), false, kArenaAllocGraphBuilder);
+ visited.ClearAllBits();
// (1) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
@@ -258,14 +266,16 @@
reverse_post_order_.reserve(blocks_.size());
reverse_post_order_.push_back(entry_block_);
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
// Number of visits of a given node, indexed by block id.
- ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<size_t> visits(blocks_.size(), 0u, allocator.Adapter(kArenaAllocGraphBuilder));
// Number of successors visited from a given node, indexed by block id.
- ArenaVector<size_t> successors_visited(blocks_.size(),
- 0u,
- allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<size_t> successors_visited(blocks_.size(),
+ 0u,
+ allocator.Adapter(kArenaAllocGraphBuilder));
// Nodes for which we need to visit successors.
- ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
worklist.push_back(entry_block_);
@@ -710,10 +720,13 @@
bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader();
if (is_irreducible_loop) {
- ArenaBitVector visited(graph->GetAllocator(),
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(graph->GetArenaStack());
+ ArenaBitVector visited(&allocator,
graph->GetBlocks().size(),
/* expandable */ false,
kArenaAllocGraphBuilder);
+ visited.ClearAllBits();
// Stop marking blocks at the loop header.
visited.SetBit(header_->GetBlockId());
@@ -942,7 +955,7 @@
}
}
-void HEnvironment::CopyFrom(const ArenaVector<HInstruction*>& locals) {
+void HEnvironment::CopyFrom(ArrayRef<HInstruction* const> locals) {
for (size_t i = 0; i < locals.size(); i++) {
HInstruction* instruction = locals[i];
SetRawEnvAt(i, instruction);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 99fde75..75cdb3e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1839,7 +1839,7 @@
}
}
- void CopyFrom(const ArenaVector<HInstruction*>& locals);
+ void CopyFrom(ArrayRef<HInstruction* const> locals);
void CopyFrom(HEnvironment* environment);
// Copy from `env`. If it's a loop phi for `loop_header`, copy the first
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index b2180d9..9bfd250 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -129,10 +129,9 @@
HEnvironment* environment = new (GetAllocator()) HEnvironment(
GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment);
- ArenaVector<HInstruction*> array(GetAllocator()->Adapter());
- array.push_back(parameter1);
+ HInstruction* const array[] = { parameter1 };
- environment->CopyFrom(array);
+ environment->CopyFrom(ArrayRef<HInstruction* const>(array));
with_environment->SetRawEnvironment(environment);
ASSERT_TRUE(parameter1->HasEnvironmentUses());
@@ -140,13 +139,13 @@
HEnvironment* parent1 = new (GetAllocator()) HEnvironment(
GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
- parent1->CopyFrom(array);
+ parent1->CopyFrom(ArrayRef<HInstruction* const>(array));
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
HEnvironment* parent2 = new (GetAllocator()) HEnvironment(
GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
- parent2->CopyFrom(array);
+ parent2->CopyFrom(ArrayRef<HInstruction* const>(array));
parent1->SetAndCopyParentChain(GetAllocator(), parent2);
// One use for parent2, and one other use for the new parent of parent1.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 9bfb7a5..42f32b7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1146,7 +1146,8 @@
if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
MemStats mem_stats(allocator.GetMemStats());
MemStats peak_stats(arena_stack.GetPeakStats());
- LOG(INFO) << dex_file.PrettyMethod(method_idx)
+ LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+ << dex_file.PrettyMethod(method_idx)
<< "\n" << Dumpable<MemStats>(mem_stats)
<< "\n" << Dumpable<MemStats>(peak_stats);
}
@@ -1256,7 +1257,8 @@
if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
MemStats mem_stats(allocator.GetMemStats());
MemStats peak_stats(arena_stack.GetPeakStats());
- LOG(INFO) << dex_file->PrettyMethod(method_idx)
+ LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+ << dex_file->PrettyMethod(method_idx)
<< "\n" << Dumpable<MemStats>(mem_stats)
<< "\n" << Dumpable<MemStats>(peak_stats);
}
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5632f9a..9aba912 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -22,7 +22,9 @@
#include "common_compiler_test.h"
#include "dex_file.h"
#include "dex_instruction.h"
-#include "handle_scope.h"
+#include "handle_scope-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
#include "nodes.h"
#include "scoped_thread_state_change.h"
#include "ssa_builder.h"
@@ -123,8 +125,7 @@
// Create a control-flow graph from Dex instructions.
HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
- const DexFile::CodeItem* item =
- reinterpret_cast<const DexFile::CodeItem*>(data);
+ const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = CreateGraph();
{
@@ -132,7 +133,19 @@
if (handles_ == nullptr) {
handles_.reset(new VariableSizedHandleScope(soa.Self()));
}
- HGraphBuilder builder(graph, *item, handles_.get(), return_type);
+ const DexFile* dex_file = graph->GetAllocator()->Alloc<DexFile>();
+ const DexCompilationUnit* dex_compilation_unit =
+ new (graph->GetAllocator()) DexCompilationUnit(
+ handles_->NewHandle<mirror::ClassLoader>(nullptr),
+ /* class_linker */ nullptr,
+ *dex_file,
+ code_item,
+ /* class_def_index */ DexFile::kDexNoIndex16,
+ /* method_idx */ dex::kDexNoIndex,
+ /* access_flags */ 0u,
+ /* verified_method */ nullptr,
+ handles_->NewHandle<mirror::DexCache>(nullptr));
+ HGraphBuilder builder(graph, dex_compilation_unit, *code_item, handles_.get(), return_type);
bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
return graph_built ? graph : nullptr;
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index eaeec3b..55a8a38 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -87,7 +87,7 @@
// to find an optimal split position.
LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
- ScopedArenaAllocator* allocator_;
+ ScopedArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
};
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index c673d54..57eb762 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -781,7 +781,7 @@
#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
// Phase-local allocator that allocates scheduler internal data structures like
// scheduling nodes, internel nodes map, dependencies, etc.
- ScopedArenaAllocator arena_allocator(graph_->GetArenaStack());
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
CriticalPathSchedulingNodeSelector critical_path_selector;
RandomSchedulingNodeSelector random_selector;
SchedulingNodeSelector* selector = schedule_randomly
@@ -797,7 +797,7 @@
switch (instruction_set_) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
- arm64::HSchedulerARM64 scheduler(&arena_allocator, selector);
+ arm64::HSchedulerARM64 scheduler(&allocator, selector);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
break;
@@ -807,7 +807,7 @@
case kThumb2:
case kArm: {
arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
- arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
break;
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 3efd26a..afdf6f1 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -253,14 +253,14 @@
public:
SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
: scheduler_(scheduler),
- arena_(allocator),
+ allocator_(allocator),
contains_scheduling_barrier_(false),
- nodes_map_(arena_->Adapter(kArenaAllocScheduler)),
+ nodes_map_(allocator_->Adapter(kArenaAllocScheduler)),
heap_location_collector_(nullptr) {}
SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
std::unique_ptr<SchedulingNode> node(
- new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier));
+ new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
SchedulingNode* result = node.get();
nodes_map_.Insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
@@ -323,7 +323,7 @@
const HScheduler* const scheduler_;
- ScopedArenaAllocator* const arena_;
+ ScopedArenaAllocator* const allocator_;
bool contains_scheduling_barrier_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index f4a8a17..e4edbfd 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -105,7 +105,7 @@
}
static void AddDependentInstructionsToWorklist(HInstruction* instruction,
- ArenaVector<HPhi*>* worklist) {
+ ScopedArenaVector<HPhi*>* worklist) {
// If `instruction` is a dead phi, type conflict was just identified. All its
// live phi users, and transitively users of those users, therefore need to be
// marked dead/conflicting too, so we add them to the worklist. Otherwise we
@@ -167,7 +167,7 @@
}
// Replace inputs of `phi` to match its type. Return false if conflict is identified.
-bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) {
DataType::Type common_type = phi->GetType();
if (DataType::IsIntegralType(common_type)) {
// We do not need to retype ambiguous inputs because they are always constructed
@@ -213,7 +213,7 @@
// Attempt to set the primitive type of `phi` to match its inputs. Return whether
// it was changed by the algorithm or not.
-bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) {
DCHECK(phi->IsLive());
DataType::Type original_type = phi->GetType();
@@ -233,7 +233,7 @@
}
void SsaBuilder::RunPrimitiveTypePropagation() {
- ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
if (block->IsLoopHeader()) {
@@ -262,7 +262,7 @@
EquivalentPhisCleanup();
}
-void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist) {
+void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist) {
// Process worklist
while (!worklist->empty()) {
HPhi* phi = worklist->back();
@@ -319,7 +319,7 @@
// uses (because they are untyped) and environment uses (if --debuggable).
// After resolving all ambiguous ArrayGets, we will re-run primitive type
// propagation on the Phis which need to be updated.
- ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
{
ScopedObjectAccess soa(Thread::Current());
@@ -623,8 +623,7 @@
|| (next->GetType() != type)) {
ArenaAllocator* allocator = graph_->GetAllocator();
HInputsRef inputs = phi->GetInputs();
- HPhi* new_phi =
- new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
+ HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
// Copy the inputs. Note that the graph may not be correctly typed
// by doing this copy, but the type propagation phase will fix it.
ArrayRef<HUserRecord<HInstruction*>> new_input_records = new_phi->GetInputRecords();
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 509cdc1..60831a9 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -17,7 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "nodes.h"
#include "optimization.h"
@@ -50,15 +51,17 @@
SsaBuilder(HGraph* graph,
Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> dex_cache,
- VariableSizedHandleScope* handles)
+ VariableSizedHandleScope* handles,
+ ScopedArenaAllocator* local_allocator)
: graph_(graph),
class_loader_(class_loader),
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
- ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
- ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
- uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) {
+ local_allocator_(local_allocator),
+ ambiguous_agets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ ambiguous_asets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ uninitialized_strings_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
graph_->InitializeInexactObjectRTI(handles);
}
@@ -105,9 +108,9 @@
// input. Returns false if the type of an array is unknown.
bool FixAmbiguousArrayOps();
- bool TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist);
- bool UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist);
- void ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist);
+ bool TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist);
+ bool UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist);
+ void ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist);
HFloatConstant* GetFloatEquivalent(HIntConstant* constant);
HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant);
@@ -116,7 +119,7 @@
void RemoveRedundantUninitializedStrings();
- HGraph* graph_;
+ HGraph* const graph_;
Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> dex_cache_;
VariableSizedHandleScope* const handles_;
@@ -124,9 +127,10 @@
// True if types of ambiguous ArrayGets have been resolved.
bool agets_fixed_;
- ArenaVector<HArrayGet*> ambiguous_agets_;
- ArenaVector<HArraySet*> ambiguous_asets_;
- ArenaVector<HNewInstance*> uninitialized_strings_;
+ ScopedArenaAllocator* const local_allocator_;
+ ScopedArenaVector<HArrayGet*> ambiguous_agets_;
+ ScopedArenaVector<HArraySet*> ambiguous_asets_;
+ ScopedArenaVector<HNewInstance*> uninitialized_strings_;
DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
};
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 9800af7..f83bb52 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1292,7 +1292,7 @@
// Use a local ScopedArenaAllocator for allocating memory.
// This allocator must remain alive while doing register allocation.
- ScopedArenaAllocator* allocator_;
+ ScopedArenaAllocator* const allocator_;
ScopedArenaVector<BlockInfo*> block_infos_;
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 9b78e0e..b9bfbaa 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -95,8 +95,7 @@
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
- ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- GetAllocator()->Adapter());
+ HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 };
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
@@ -109,7 +108,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
null_check);
- null_check_env->CopyFrom(args);
+ null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
@@ -120,7 +119,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
bounds_check);
- bounds_check_env->CopyFrom(args);
+ bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
@@ -144,7 +143,7 @@
// Environment uses keep the reference argument alive.
"ranges: { [10,19) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
};
- ASSERT_EQ(arraysize(expected), args.size());
+ static_assert(arraysize(expected) == arraysize(args), "Array size check.");
size_t arg_index = 0u;
for (HInstruction* arg : args) {
std::ostringstream arg_dump;
@@ -165,8 +164,7 @@
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
- ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- GetAllocator()->Adapter());
+ HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 };
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
@@ -179,7 +177,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
null_check);
- null_check_env->CopyFrom(args);
+ null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
@@ -194,7 +192,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
deoptimize);
- deoptimize_env->CopyFrom(args);
+ deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
@@ -217,7 +215,7 @@
// Environment uses keep the reference argument alive.
"ranges: { [10,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
};
- ASSERT_EQ(arraysize(expected), args.size());
+ static_assert(arraysize(expected) == arraysize(args), "Array size check.");
size_t arg_index = 0u;
for (HInstruction* arg : args) {
std::ostringstream arg_dump;
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index a574566..62ed7ee 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -223,7 +223,7 @@
size_t dex_register_locations_index) const;
void CheckCodeInfo(MemoryRegion region) const;
- ArenaAllocator* allocator_;
+ ArenaAllocator* const allocator_;
const InstructionSet instruction_set_;
ArenaVector<StackMapEntry> stack_maps_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index a842c6e..96ac368 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -47,10 +47,10 @@
TEST(StackMapTest, Test1) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
size_t number_of_dex_registers = 2;
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
@@ -58,7 +58,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -128,11 +128,11 @@
TEST(StackMapTest, Test2) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
- ArenaBitVector sp_mask1(&arena, 0, true);
+ ArenaBitVector sp_mask1(&allocator, 0, true);
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
@@ -146,7 +146,7 @@
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
- ArenaBitVector sp_mask2(&arena, 0, true);
+ ArenaBitVector sp_mask2(&allocator, 0, true);
sp_mask2.SetBit(3);
sp_mask2.SetBit(8);
stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
@@ -154,7 +154,7 @@
stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location.
stream.EndStackMapEntry();
- ArenaBitVector sp_mask3(&arena, 0, true);
+ ArenaBitVector sp_mask3(&allocator, 0, true);
sp_mask3.SetBit(1);
sp_mask3.SetBit(5);
stream.BeginStackMapEntry(2, 192, 0xAB, &sp_mask3, number_of_dex_registers, 0);
@@ -162,7 +162,7 @@
stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8); // Short location.
stream.EndStackMapEntry();
- ArenaBitVector sp_mask4(&arena, 0, true);
+ ArenaBitVector sp_mask4(&allocator, 0, true);
sp_mask4.SetBit(6);
sp_mask4.SetBit(7);
stream.BeginStackMapEntry(3, 256, 0xCD, &sp_mask4, number_of_dex_registers, 0);
@@ -171,7 +171,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -412,11 +412,11 @@
TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
- ArenaBitVector sp_mask1(&arena, 0, true);
+ ArenaBitVector sp_mask1(&allocator, 0, true);
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
const size_t number_of_dex_registers = 2;
@@ -431,7 +431,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -506,10 +506,10 @@
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(Kind::kNone, 0); // No location.
@@ -517,7 +517,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -585,10 +585,10 @@
// not treat it as kNoDexRegisterMap.
TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 1024;
// Create the first stack map (and its Dex register map).
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
@@ -609,7 +609,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -648,10 +648,10 @@
TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
// First stack map.
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
@@ -670,7 +670,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -706,10 +706,10 @@
TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 0;
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.EndStackMapEntry();
@@ -719,7 +719,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -755,11 +755,11 @@
TEST(StackMapTest, InlineTest) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
- ArenaBitVector sp_mask1(&arena, 0, true);
+ ArenaBitVector sp_mask1(&allocator, 0, true);
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
@@ -821,7 +821,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -936,10 +936,10 @@
TEST(StackMapTest, TestDeduplicateStackMask) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, true);
+ ArenaBitVector sp_mask(&allocator, 0, true);
sp_mask.SetBit(1);
sp_mask.SetBit(4);
stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
@@ -948,7 +948,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -964,10 +964,10 @@
TEST(StackMapTest, TestInvokeInfo) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, true);
+ ArenaBitVector sp_mask(&allocator, 0, true);
sp_mask.SetBit(1);
stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
stream.AddInvoke(kSuper, 1);
@@ -980,11 +980,12 @@
stream.EndStackMapEntry();
const size_t code_info_size = stream.PrepareForFillIn();
- MemoryRegion code_info_region(arena.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
+ MemoryRegion code_info_region(allocator.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
stream.FillInCodeInfo(code_info_region);
const size_t method_info_size = stream.ComputeMethodInfoSize();
- MemoryRegion method_info_region(arena.Alloc(method_info_size, kArenaAllocMisc), method_info_size);
+ MemoryRegion method_info_region(allocator.Alloc(method_info_size, kArenaAllocMisc),
+ method_info_size);
stream.FillInMethodInfo(method_info_region);
CodeInfo code_info(code_info_region);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 1e9a521..9527a60 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -244,19 +244,19 @@
EntryPointCallingConvention abi,
ThreadOffset64 offset) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return arm64::CreateTrampoline(&arena, abi, offset);
+ return arm64::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return mips64::CreateTrampoline(&arena, abi, offset);
+ return mips64::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return x86_64::CreateTrampoline(&arena, offset);
+ return x86_64::CreateTrampoline(&allocator, offset);
#endif
default:
UNUSED(abi);
@@ -270,21 +270,21 @@
EntryPointCallingConvention abi,
ThreadOffset32 offset) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return arm::CreateTrampoline(&arena, abi, offset);
+ return arm::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return mips::CreateTrampoline(&arena, abi, offset);
+ return mips::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
UNUSED(abi);
- return x86::CreateTrampoline(&arena, offset);
+ return x86::CreateTrampoline(&allocator, offset);
#endif
default:
LOG(FATAL) << "Unexpected InstructionSet: " << isa;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index dbd35ab..e0cef85 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -252,7 +252,7 @@
// for a single, fast space check per instruction.
static const int kMinimumGap = 32;
- ArenaAllocator* allocator_;
+ ArenaAllocator* const allocator_;
uint8_t* contents_;
uint8_t* cursor_;
uint8_t* limit_;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 11a9b91..ae7636b 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -719,8 +719,8 @@
explicit AssemblerTest() {}
void SetUp() OVERRIDE {
- arena_.reset(new ArenaAllocator(&pool_));
- assembler_.reset(CreateAssembler(arena_.get()));
+ allocator_.reset(new ArenaAllocator(&pool_));
+ assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -737,7 +737,7 @@
void TearDown() OVERRIDE {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
- arena_.reset();
+ allocator_.reset();
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
@@ -1589,7 +1589,7 @@
static constexpr size_t kWarnManyCombinationsThreshold = 500;
ArenaPool pool_;
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
std::unique_ptr<Ass> assembler_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5622f89..5307d17 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -167,10 +167,10 @@
class ArmVIXLAssemblerTest : public ::testing::Test {
public:
- ArmVIXLAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
+ ArmVIXLAssemblerTest() : pool(), allocator(&pool), assembler(&allocator) { }
ArenaPool pool;
- ArenaAllocator arena;
+ ArenaAllocator allocator;
ArmVIXLJNIMacroAssembler assembler;
};
@@ -209,18 +209,16 @@
const bool is_critical_native = false;
const char* shorty = "IIFII";
- ArenaPool pool;
- ArenaAllocator arena(&pool);
-
std::unique_ptr<JniCallingConvention> jni_conv(
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
is_critical_native,
shorty,
kThumb2));
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
- ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
+ ManagedRuntimeCallingConvention::Create(
+ &allocator, is_static, is_synchronized, shorty, kThumb2));
const int frame_size(jni_conv->FrameSize());
ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index ba95e21..34ab4c3 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -58,8 +58,8 @@
explicit JNIMacroAssemblerTest() {}
void SetUp() OVERRIDE {
- arena_.reset(new ArenaAllocator(&pool_));
- assembler_.reset(CreateAssembler(arena_.get()));
+ allocator_.reset(new ArenaAllocator(&pool_));
+ assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -76,7 +76,7 @@
void TearDown() OVERRIDE {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
- arena_.reset();
+ allocator_.reset();
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
@@ -140,7 +140,7 @@
}
ArenaPool pool_;
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
std::unique_ptr<Ass> assembler_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index cccde37..e232add 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -24,8 +24,8 @@
TEST(AssemblerX86, CreateBuffer) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- AssemblerBuffer buffer(&arena);
+ ArenaAllocator allocator(&pool);
+ AssemblerBuffer buffer(&allocator);
AssemblerBuffer::EnsureCapacity ensured(&buffer);
buffer.Emit<uint8_t>(0x42);
ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index b08ba4a..0cb3ffd 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -30,8 +30,8 @@
TEST(AssemblerX86_64, CreateBuffer) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- AssemblerBuffer buffer(&arena);
+ ArenaAllocator allocator(&pool);
+ AssemblerBuffer buffer(&allocator);
AssemblerBuffer::EnsureCapacity ensured(&buffer);
buffer.Emit<uint8_t>(0x42);
ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 73724b2..642d26e 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -35,7 +35,7 @@
class ProfileAssistantTest : public CommonRuntimeTest {
public:
void PostRuntimeCreate() OVERRIDE {
- arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+ allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
protected:
@@ -108,7 +108,7 @@
// Creates an inline cache which will be destructed at the end of the test.
ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
- std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
return used_inline_caches.back().get();
}
@@ -122,13 +122,13 @@
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
@@ -136,13 +136,13 @@
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(dex_pc, dex_pc_data);
}
@@ -375,7 +375,7 @@
return ProcessProfiles(profile_fds, reference_profile_fd);
}
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
// Cache of inline caches generated during tests.
// This makes it easier to pass data between different utilities and ensure that
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index c48e30f..79cf087 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -296,7 +296,7 @@
void ArenaPool::ReclaimMemory() {
while (free_arenas_ != nullptr) {
- auto* arena = free_arenas_;
+ Arena* arena = free_arenas_;
free_arenas_ = free_arenas_->next_;
delete arena;
}
@@ -330,7 +330,7 @@
ScopedTrace trace(__PRETTY_FUNCTION__);
// Doesn't work for malloc.
MutexLock lock(Thread::Current(), lock_);
- for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
arena->Release();
}
}
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index 6bf56c8..68e26af 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -34,8 +34,8 @@
TEST_F(ArenaAllocatorTest, Test) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- ArenaBitVector bv(&arena, 10, true);
+ ArenaAllocator allocator(&pool);
+ ArenaBitVector bv(&allocator, 10, true);
bv.SetBit(5);
EXPECT_EQ(1U, bv.GetStorageSize());
bv.SetBit(35);
@@ -50,14 +50,14 @@
uint32_t* small_array;
{
// Allocate a small array from an arena and release it.
- ArenaAllocator arena(&pool);
- small_array = arena.AllocArray<uint32_t>(kSmallArraySize);
+ ArenaAllocator allocator(&pool);
+ small_array = allocator.AllocArray<uint32_t>(kSmallArraySize);
ASSERT_EQ(0u, small_array[kSmallArraySize - 1u]);
}
{
// Reuse the previous arena and allocate more than previous allocation including red zone.
- ArenaAllocator arena(&pool);
- uint32_t* large_array = arena.AllocArray<uint32_t>(kLargeArraySize);
+ ArenaAllocator allocator(&pool);
+ uint32_t* large_array = allocator.AllocArray<uint32_t>(kLargeArraySize);
ASSERT_EQ(0u, large_array[kLargeArraySize - 1u]);
// Verify that the allocation was made on the same arena.
ASSERT_EQ(small_array, large_array);
@@ -72,70 +72,72 @@
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
// Note: Leaving some space for memory tool red zones.
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 2 / 8);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 2 / 8);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(1u, NumberOfArenas(&arena));
+ ASSERT_EQ(1u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16);
+ ArenaAllocator allocator(&pool);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
- void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 7 / 16);
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
+ void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 7 / 16);
ASSERT_NE(alloc1, alloc3);
ASSERT_NE(alloc2, alloc3);
- ASSERT_EQ(3u, NumberOfArenas(&arena));
+ ASSERT_EQ(3u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
+ ArenaAllocator allocator(&pool);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
// Note: Leaving some space for memory tool red zones.
- void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
+ void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
ASSERT_NE(alloc1, alloc3);
ASSERT_NE(alloc2, alloc3);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+ ArenaAllocator allocator(&pool);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
// Note: Leaving some space for memory tool red zones.
- void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
+ void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
ASSERT_NE(alloc1, alloc3);
ASSERT_NE(alloc2, alloc3);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
// Note: Leaving some space for memory tool red zones.
for (size_t i = 0; i != 15; ++i) {
- arena.Alloc(arena_allocator::kArenaDefaultSize * 1 / 16); // Allocate 15 times from the same arena.
- ASSERT_EQ(i + 1u, NumberOfArenas(&arena));
- arena.Alloc(arena_allocator::kArenaDefaultSize * 17 / 16); // Allocate a separate arena.
- ASSERT_EQ(i + 2u, NumberOfArenas(&arena));
+ // Allocate 15 times from the same arena.
+ allocator.Alloc(arena_allocator::kArenaDefaultSize * 1 / 16);
+ ASSERT_EQ(i + 1u, NumberOfArenas(&allocator));
+ // Allocate a separate arena.
+ allocator.Alloc(arena_allocator::kArenaDefaultSize * 17 / 16);
+ ASSERT_EQ(i + 2u, NumberOfArenas(&allocator));
}
}
}
TEST_F(ArenaAllocatorTest, AllocAlignment) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
for (size_t iterations = 0; iterations <= 10; ++iterations) {
for (size_t size = 1; size <= ArenaAllocator::kAlignment + 1; ++size) {
- void* allocation = arena.Alloc(size);
+ void* allocation = allocator.Alloc(size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(allocation))
<< reinterpret_cast<uintptr_t>(allocation);
}
@@ -152,52 +154,52 @@
{
// Case 1: small aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
{
// Case 2: small aligned allocation, non-aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
{
// Case 3: small non-aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 4;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
{
// Case 4: small non-aligned allocation, aligned non-extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
@@ -207,31 +209,31 @@
{
// Case 5: large allocation, aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 5;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_NE(original_allocation, realloc_allocation);
}
{
// Case 6: large allocation, non-aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 4 -
ArenaAllocator::kAlignment / 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = arena_allocator::kArenaDefaultSize +
ArenaAllocator::kAlignment * 2 +
ArenaAllocator::kAlignment / 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_NE(original_allocation, realloc_allocation);
}
}
@@ -240,68 +242,68 @@
{
// Case 1: small aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 2: small aligned allocation, non-aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 3: small non-aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 4;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 4: small non-aligned allocation, aligned non-extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
@@ -311,39 +313,39 @@
{
// Case 5: large allocation, aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 5;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 6: large allocation, non-aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 4 -
ArenaAllocator::kAlignment / 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = arena_allocator::kArenaDefaultSize +
ArenaAllocator::kAlignment * 2 +
ArenaAllocator::kAlignment / 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
}
diff --git a/runtime/base/arena_bit_vector.cc b/runtime/base/arena_bit_vector.cc
index 5f8f5d2..1542e9d 100644
--- a/runtime/base/arena_bit_vector.cc
+++ b/runtime/base/arena_bit_vector.cc
@@ -52,9 +52,9 @@
template <typename ArenaAlloc>
class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind {
public:
- static ArenaBitVectorAllocator* Create(ArenaAlloc* arena, ArenaAllocKind kind) {
- void* storage = arena->template Alloc<ArenaBitVectorAllocator>(kind);
- return new (storage) ArenaBitVectorAllocator(arena, kind);
+ static ArenaBitVectorAllocator* Create(ArenaAlloc* allocator, ArenaAllocKind kind) {
+ void* storage = allocator->template Alloc<ArenaBitVectorAllocator>(kind);
+ return new (storage) ArenaBitVectorAllocator(allocator, kind);
}
~ArenaBitVectorAllocator() {
@@ -63,36 +63,36 @@
}
virtual void* Alloc(size_t size) {
- return arena_->Alloc(size, this->Kind());
+ return allocator_->Alloc(size, this->Kind());
}
virtual void Free(void*) {} // Nop.
private:
- ArenaBitVectorAllocator(ArenaAlloc* arena, ArenaAllocKind kind)
- : ArenaBitVectorAllocatorKind(kind), arena_(arena) { }
+ ArenaBitVectorAllocator(ArenaAlloc* allocator, ArenaAllocKind kind)
+ : ArenaBitVectorAllocatorKind(kind), allocator_(allocator) { }
- ArenaAlloc* const arena_;
+ ArenaAlloc* const allocator_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
};
-ArenaBitVector::ArenaBitVector(ArenaAllocator* arena,
+ArenaBitVector::ArenaBitVector(ArenaAllocator* allocator,
unsigned int start_bits,
bool expandable,
ArenaAllocKind kind)
: BitVector(start_bits,
expandable,
- ArenaBitVectorAllocator<ArenaAllocator>::Create(arena, kind)) {
+ ArenaBitVectorAllocator<ArenaAllocator>::Create(allocator, kind)) {
}
-ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* arena,
+ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* allocator,
unsigned int start_bits,
bool expandable,
ArenaAllocKind kind)
: BitVector(start_bits,
expandable,
- ArenaBitVectorAllocator<ScopedArenaAllocator>::Create(arena, kind)) {
+ ArenaBitVectorAllocator<ScopedArenaAllocator>::Create(allocator, kind)) {
}
} // namespace art
diff --git a/runtime/base/arena_bit_vector.h b/runtime/base/arena_bit_vector.h
index d86d622..ca1d5b1 100644
--- a/runtime/base/arena_bit_vector.h
+++ b/runtime/base/arena_bit_vector.h
@@ -31,19 +31,19 @@
class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
public:
template <typename Allocator>
- static ArenaBitVector* Create(Allocator* arena,
+ static ArenaBitVector* Create(Allocator* allocator,
uint32_t start_bits,
bool expandable,
ArenaAllocKind kind = kArenaAllocGrowableBitMap) {
- void* storage = arena->template Alloc<ArenaBitVector>(kind);
- return new (storage) ArenaBitVector(arena, start_bits, expandable, kind);
+ void* storage = allocator->template Alloc<ArenaBitVector>(kind);
+ return new (storage) ArenaBitVector(allocator, start_bits, expandable, kind);
}
- ArenaBitVector(ArenaAllocator* arena,
+ ArenaBitVector(ArenaAllocator* allocator,
uint32_t start_bits,
bool expandable,
ArenaAllocKind kind = kArenaAllocGrowableBitMap);
- ArenaBitVector(ScopedArenaAllocator* arena,
+ ArenaBitVector(ScopedArenaAllocator* allocator,
uint32_t start_bits,
bool expandable,
ArenaAllocKind kind = kArenaAllocGrowableBitMap);
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index f9603a7..d14c94a 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -1828,7 +1828,7 @@
ProfileCompilationInfo::DexFileData::FindOrAddMethod(uint16_t method_index) {
return &(method_map.FindOrAdd(
method_index,
- InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)))->second);
+ InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)))->second);
}
// Mark a method as executed at least once.
@@ -1847,7 +1847,7 @@
if ((flags & MethodHotness::kFlagHot) != 0) {
method_map.FindOrAdd(
index,
- InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
}
return true;
}
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 8889b34..8dbb43f 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -434,7 +434,7 @@
uint32_t location_checksum,
uint16_t index,
uint32_t num_methods)
- : arena_(allocator),
+ : allocator_(allocator),
profile_key(key),
profile_index(index),
checksum(location_checksum),
@@ -466,8 +466,8 @@
MethodHotness GetHotnessInfo(uint32_t dex_method_index) const;
- // The arena used to allocate new inline cache maps.
- ArenaAllocator* arena_;
+ // The allocator used to allocate new inline cache maps.
+ ArenaAllocator* const allocator_;
// The profile key this data belongs to.
std::string profile_key;
// The profile index of this dex file (matches ClassReference#dex_profile_index).
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 2cb8294..f155d7e 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -39,7 +39,7 @@
class ProfileCompilationInfoTest : public CommonRuntimeTest {
public:
void PostRuntimeCreate() OVERRIDE {
- arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+ allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
protected:
@@ -176,7 +176,7 @@
// Creates an inline cache which will be destructed at the end of the test.
ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
- std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
return used_inline_caches.back().get();
}
@@ -188,7 +188,7 @@
for (const auto& inline_cache : pmi.inline_caches) {
ProfileCompilationInfo::DexPcData& dex_pc_data =
ic_map->FindOrAdd(
- inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(arena_.get()))->second;
+ inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(allocator_.get()))->second;
if (inline_cache.is_missing_types) {
dex_pc_data.SetIsMissingTypes();
}
@@ -215,13 +215,13 @@
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
dex_pc_data.AddClass(2, dex::TypeIndex(2));
@@ -230,13 +230,13 @@
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(dex_pc, dex_pc_data);
}
@@ -273,7 +273,7 @@
static constexpr int kProfileMagicSize = 4;
static constexpr int kProfileVersionSize = 4;
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
// Cache of inline caches generated during tests.
// This makes it easier to pass data between different utilities and ensure that
@@ -730,7 +730,7 @@
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
ic_map->Put(dex_pc, dex_pc_data);
@@ -741,7 +741,7 @@
pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(1, dex::TypeIndex(0));
dex_pc_data.AddClass(0, dex::TypeIndex(1));
ic_map_reindexed->Put(dex_pc, dex_pc_data);
@@ -795,7 +795,7 @@
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
@@ -825,7 +825,7 @@
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 7246bae..0033167 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -71,8 +71,8 @@
// sure we only print this once.
static bool gPrintedDxMonitorText = false;
-PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
- : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& allocator)
+ : register_lines_(allocator.Adapter(kArenaAllocVerifier)) {}
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
@@ -552,9 +552,9 @@
bool allow_thread_suspension)
: self_(self),
arena_stack_(Runtime::Current()->GetArenaPool()),
- arena_(&arena_stack_),
- reg_types_(can_load_classes, arena_),
- reg_table_(arena_),
+ allocator_(&arena_stack_),
+ reg_types_(can_load_classes, allocator_),
+ reg_table_(allocator_),
work_insn_idx_(dex::kDexNoIndex),
dex_method_idx_(dex_method_idx),
mirror_method_(method),
@@ -868,7 +868,7 @@
}
// Allocate and initialize an array to hold instruction data.
- insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+ insn_flags_.reset(allocator_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
DCHECK(insn_flags_ != nullptr);
std::uninitialized_fill_n(insn_flags_.get(),
code_item_->insns_size_in_code_units_,
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 57ab56c..1f1d7c1 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -67,7 +67,7 @@
// execution of that instruction.
class PcToRegisterLineTable {
public:
- explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
+ explicit PcToRegisterLineTable(ScopedArenaAllocator& allocator);
~PcToRegisterLineTable();
// Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -222,7 +222,7 @@
}
ScopedArenaAllocator& GetScopedAllocator() {
- return arena_;
+ return allocator_;
}
private:
@@ -711,7 +711,7 @@
// Arena allocator.
ArenaStack arena_stack_;
- ScopedArenaAllocator arena_;
+ ScopedArenaAllocator allocator_;
RegTypeCache reg_types_;
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 0c00868..4ebe151 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -164,7 +164,7 @@
}
StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
- char* ptr = arena_.AllocArray<char>(string_piece.length());
+ char* ptr = allocator_.AllocArray<char>(string_piece.length());
memcpy(ptr, string_piece.data(), string_piece.length());
return StringPiece(ptr, string_piece.length());
}
@@ -197,9 +197,10 @@
if (klass->CannotBeAssignedFromOtherTypes() || precise) {
DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
DCHECK(!klass->IsInterface());
- entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
+ entry =
+ new (&allocator_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
} else {
- entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
+ entry = new (&allocator_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
}
return AddEntry(entry);
} else { // Class not resolved.
@@ -213,7 +214,7 @@
}
if (IsValidDescriptor(descriptor)) {
return AddEntry(
- new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
+ new (&allocator_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
} else {
// The descriptor is broken return the unknown type as there's nothing sensible that
// could be done at runtime
@@ -224,7 +225,7 @@
const RegType& RegTypeCache::MakeUnresolvedReference() {
// The descriptor is intentionally invalid so nothing else will match this type.
- return AddEntry(new (&arena_) UnresolvedReferenceType(AddString("a"), entries_.size()));
+ return AddEntry(new (&allocator_) UnresolvedReferenceType(AddString("a"), entries_.size()));
}
const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
@@ -253,8 +254,8 @@
DCHECK(FindClass(klass, precise) == nullptr);
RegType* const reg_type = precise
? static_cast<RegType*>(
- new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
- : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+ new (&allocator_) PreciseReferenceType(klass, descriptor, entries_.size()))
+ : new (&allocator_) ReferenceType(klass, descriptor, entries_.size());
return &AddEntry(reg_type);
}
@@ -267,11 +268,11 @@
return *reg_type;
}
-RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
- : entries_(arena.Adapter(kArenaAllocVerifier)),
- klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator)
+ : entries_(allocator.Adapter(kArenaAllocVerifier)),
+ klass_entries_(allocator.Adapter(kArenaAllocVerifier)),
can_load_classes_(can_load_classes),
- arena_(arena) {
+ allocator_(allocator) {
if (kIsDebugBuild) {
Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
}
@@ -349,7 +350,7 @@
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left,
const RegType& right,
MethodVerifier* verifier) {
- ArenaBitVector types(&arena_,
+ ArenaBitVector types(&allocator_,
kDefaultArenaBitVectorBytes * kBitsPerByte, // Allocate at least 8 bytes.
true); // Is expandable.
const RegType* left_resolved;
@@ -426,10 +427,10 @@
}
}
}
- return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
- types,
- this,
- entries_.size()));
+ return AddEntry(new (&allocator_) UnresolvedMergedType(resolved_parts_merged,
+ types,
+ this,
+ entries_.size()));
}
const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -446,7 +447,7 @@
}
}
}
- return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
+ return AddEntry(new (&allocator_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
}
const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
@@ -462,9 +463,9 @@
return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
}
}
- entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
- allocation_pc,
- entries_.size());
+ entry = new (&allocator_) UnresolvedUninitializedRefType(descriptor,
+ allocation_pc,
+ entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -476,10 +477,10 @@
return *down_cast<const UninitializedReferenceType*>(cur_entry);
}
}
- entry = new (&arena_) UninitializedReferenceType(klass,
- descriptor,
- allocation_pc,
- entries_.size());
+ entry = new (&allocator_) UninitializedReferenceType(klass,
+ descriptor,
+ allocation_pc,
+ entries_.size());
}
return AddEntry(entry);
}
@@ -496,7 +497,7 @@
return *cur_entry;
}
}
- entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
+ entry = new (&allocator_) UnresolvedReferenceType(descriptor, entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -507,7 +508,7 @@
return *cur_entry;
}
}
- entry = new (&arena_) ReferenceType(klass, "", entries_.size());
+ entry = new (&allocator_) ReferenceType(klass, "", entries_.size());
} else if (!klass->IsPrimitive()) {
// We're uninitialized because of allocation, look or create a precise type as allocations
// may only create objects of that type.
@@ -526,9 +527,9 @@
return *cur_entry;
}
}
- entry = new (&arena_) PreciseReferenceType(klass,
- uninit_type.GetDescriptor(),
- entries_.size());
+ entry = new (&allocator_) PreciseReferenceType(klass,
+ uninit_type.GetDescriptor(),
+ entries_.size());
} else {
return Conflict();
}
@@ -547,7 +548,7 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+ entry = new (&allocator_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -556,7 +557,7 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
+ entry = new (&allocator_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
return AddEntry(entry);
}
@@ -572,9 +573,9 @@
}
ConstantType* entry;
if (precise) {
- entry = new (&arena_) PreciseConstType(value, entries_.size());
+ entry = new (&allocator_) PreciseConstType(value, entries_.size());
} else {
- entry = new (&arena_) ImpreciseConstType(value, entries_.size());
+ entry = new (&allocator_) ImpreciseConstType(value, entries_.size());
}
return AddEntry(entry);
}
@@ -589,9 +590,9 @@
}
ConstantType* entry;
if (precise) {
- entry = new (&arena_) PreciseConstLoType(value, entries_.size());
+ entry = new (&allocator_) PreciseConstLoType(value, entries_.size());
} else {
- entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
+ entry = new (&allocator_) ImpreciseConstLoType(value, entries_.size());
}
return AddEntry(entry);
}
@@ -606,9 +607,9 @@
}
ConstantType* entry;
if (precise) {
- entry = new (&arena_) PreciseConstHiType(value, entries_.size());
+ entry = new (&allocator_) PreciseConstHiType(value, entries_.size());
} else {
- entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
+ entry = new (&allocator_) ImpreciseConstHiType(value, entries_.size());
}
return AddEntry(entry);
}
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 96eca05..74d9e9d 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -61,7 +61,7 @@
class RegTypeCache {
public:
- explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
+ explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator);
~RegTypeCache();
static void Init() REQUIRES_SHARED(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
@@ -201,7 +201,7 @@
const bool can_load_classes_;
// Arena allocator.
- ScopedArenaAllocator& arena_;
+ ScopedArenaAllocator& allocator_;
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
};