Use ScopedArenaAllocator for register allocation.
Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
BatteryStats.dumpCheckinLocked() : 25.1MiB -> 21.1MiB
BatteryStats.dumpLocked(): 49.6MiB -> 42.0MiB
This is because all the memory previously used by Scheduler
is reused by the register allocator; the register allocator
has a higher peak usage of the ArenaStack.
And continue the "arena"->"allocator" renaming.
Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 64312607
Change-Id: Idfd79a9901552b5147ec0bf591cb38120de86b01
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 36a87a8..42a5f86 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@
// Managed runtime calling convention
std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
const char* shorty,
@@ -57,35 +57,37 @@
case kArm:
case kThumb2:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
+ new (allocator) arm::ArmManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) arm64::Arm64ManagedRuntimeCallingConvention(
+ new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) mips::MipsManagedRuntimeCallingConvention(
+ new (allocator) mips::MipsManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) mips64::Mips64ManagedRuntimeCallingConvention(
+ new (allocator) mips64::Mips64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
+ new (allocator) x86::X86ManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) x86_64::X86_64ManagedRuntimeCallingConvention(
+ new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
default:
@@ -146,7 +148,7 @@
// JNI calling convention
-std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* arena,
+std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
bool is_critical_native,
@@ -157,50 +159,38 @@
case kArm:
case kThumb2:
return std::unique_ptr<JniCallingConvention>(
- new (arena) arm::ArmJniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) arm::ArmJniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) arm64::Arm64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) arm64::Arm64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return std::unique_ptr<JniCallingConvention>(
- new (arena) mips::MipsJniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) mips::MipsJniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) mips64::Mips64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) mips64::Mips64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return std::unique_ptr<JniCallingConvention>(
- new (arena) x86::X86JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) x86::X86JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) x86_64::X86_64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) x86_64::X86_64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 335a2df..be0bd72 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -231,7 +231,7 @@
// | { Method* } | <-- SP
class ManagedRuntimeCallingConvention : public CallingConvention {
public:
- static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* arena,
+ static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
const char* shorty,
@@ -284,7 +284,7 @@
// callee saves for frames above this one.
class JniCallingConvention : public CallingConvention {
public:
- static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* arena,
+ static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
bool is_critical_native,
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index e7a5935..92b5c4d 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -66,8 +66,8 @@
template <PointerSize kPointerSize>
static std::unique_ptr<JNIMacroAssembler<kPointerSize>> GetMacroAssembler(
- ArenaAllocator* arena, InstructionSet isa, const InstructionSetFeatures* features) {
- return JNIMacroAssembler<kPointerSize>::Create(arena, isa, features);
+ ArenaAllocator* allocator, InstructionSet isa, const InstructionSetFeatures* features) {
+ return JNIMacroAssembler<kPointerSize>::Create(allocator, isa, features);
}
enum class JniEntrypoint {
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ba26cfc..dd8e3d2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -617,61 +617,49 @@
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats) {
- ArenaAllocator* arena = graph->GetAllocator();
+ ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2: {
return std::unique_ptr<CodeGenerator>(
- new (arena) arm::CodeGeneratorARMVIXL(graph,
- *isa_features.AsArmInstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) arm::CodeGeneratorARMVIXL(
+ graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) arm64::CodeGeneratorARM64(graph,
- *isa_features.AsArm64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) arm64::CodeGeneratorARM64(
+ graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips: {
return std::unique_ptr<CodeGenerator>(
- new (arena) mips::CodeGeneratorMIPS(graph,
- *isa_features.AsMipsInstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) mips::CodeGeneratorMIPS(
+ graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) mips64::CodeGeneratorMIPS64(graph,
- *isa_features.AsMips64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) mips64::CodeGeneratorMIPS64(
+ graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
return std::unique_ptr<CodeGenerator>(
- new (arena) x86::CodeGeneratorX86(graph,
- *isa_features.AsX86InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) x86::CodeGeneratorX86(
+ graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) x86_64::CodeGeneratorX86_64(graph,
- *isa_features.AsX86_64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) x86_64::CodeGeneratorX86_64(
+ graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
}
#endif
default:
@@ -910,7 +898,7 @@
}
void CodeGenerator::RecordCatchBlockInfo() {
- ArenaAllocator* arena = graph_->GetAllocator();
+ ArenaAllocator* allocator = graph_->GetAllocator();
for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
@@ -925,7 +913,7 @@
// The stack mask is not used, so we leave it empty.
ArenaBitVector* stack_mask =
- ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator);
+ ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
stack_map_stream_.BeginStackMapEntry(dex_pc,
native_pc,
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index bcf35d9..10aced0 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -179,8 +179,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -372,8 +372,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -772,8 +772,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -967,8 +967,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -1216,10 +1216,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 605c936..f84408d 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -84,8 +84,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -207,8 +207,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -526,8 +526,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -643,8 +643,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -686,10 +686,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 82d90e0..c5a39ff 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -99,8 +99,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -281,8 +281,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -709,8 +709,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -839,8 +839,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -919,10 +919,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 6b0162a..e606df2 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -102,8 +102,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -285,8 +285,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -713,8 +713,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -843,8 +843,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -923,10 +923,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 699c02f..ad8128a 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -180,8 +180,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -424,8 +424,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -857,8 +857,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1045,8 +1045,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -1084,10 +1084,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index d0c4320..107030e 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -163,8 +163,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -407,8 +407,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -840,8 +840,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1018,8 +1018,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -1057,10 +1057,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 5f4593f..bcbcc12 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -295,10 +295,15 @@
const std::function<void(HGraph*)>& hook_before_codegen,
bool has_result,
Expected expected) {
- SsaLivenessAnalysis liveness(graph, codegen);
- PrepareForRegisterAllocation(graph).Run();
- liveness.Analyze();
- RegisterAllocator::Create(graph->GetAllocator(), codegen, liveness)->AllocateRegisters();
+ {
+ ScopedArenaAllocator local_allocator(graph->GetArenaStack());
+ SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
+ PrepareForRegisterAllocation(graph).Run();
+ liveness.Analyze();
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(&local_allocator, codegen, liveness);
+ register_allocator->AllocateRegisters();
+ }
hook_before_codegen(graph);
InternalCodeAllocator allocator;
codegen->Compile(&allocator);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 1cebeb5..f39acab 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -274,7 +274,7 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -307,14 +307,14 @@
use->IsVecAdd() ? HInstruction::kAdd : HInstruction::kSub;
if (accumulator != nullptr) {
HVecMultiplyAccumulate* mulacc =
- new (arena) HVecMultiplyAccumulate(arena,
- kind,
- accumulator,
- mul->GetLeft(),
- mul->GetRight(),
- binop->GetPackedType(),
- binop->GetVectorLength(),
- binop->GetDexPc());
+ new (allocator) HVecMultiplyAccumulate(allocator,
+ kind,
+ accumulator,
+ mul->GetLeft(),
+ mul->GetRight(),
+ binop->GetPackedType(),
+ binop->GetVectorLength(),
+ binop->GetDexPc());
binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
DCHECK(!mul->HasUses());
@@ -700,30 +700,30 @@
}
}
-static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* arena, HInstruction* cond) {
+static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* allocator, HInstruction* cond) {
HInstruction *lhs = cond->InputAt(0);
HInstruction *rhs = cond->InputAt(1);
switch (cond->GetKind()) {
case HInstruction::kEqual:
- return new (arena) HEqual(rhs, lhs);
+ return new (allocator) HEqual(rhs, lhs);
case HInstruction::kNotEqual:
- return new (arena) HNotEqual(rhs, lhs);
+ return new (allocator) HNotEqual(rhs, lhs);
case HInstruction::kLessThan:
- return new (arena) HGreaterThan(rhs, lhs);
+ return new (allocator) HGreaterThan(rhs, lhs);
case HInstruction::kLessThanOrEqual:
- return new (arena) HGreaterThanOrEqual(rhs, lhs);
+ return new (allocator) HGreaterThanOrEqual(rhs, lhs);
case HInstruction::kGreaterThan:
- return new (arena) HLessThan(rhs, lhs);
+ return new (allocator) HLessThan(rhs, lhs);
case HInstruction::kGreaterThanOrEqual:
- return new (arena) HLessThanOrEqual(rhs, lhs);
+ return new (allocator) HLessThanOrEqual(rhs, lhs);
case HInstruction::kBelow:
- return new (arena) HAbove(rhs, lhs);
+ return new (allocator) HAbove(rhs, lhs);
case HInstruction::kBelowOrEqual:
- return new (arena) HAboveOrEqual(rhs, lhs);
+ return new (allocator) HAboveOrEqual(rhs, lhs);
case HInstruction::kAbove:
- return new (arena) HBelow(rhs, lhs);
+ return new (allocator) HBelow(rhs, lhs);
case HInstruction::kAboveOrEqual:
- return new (arena) HBelowOrEqual(rhs, lhs);
+ return new (allocator) HBelowOrEqual(rhs, lhs);
default:
LOG(FATAL) << "Unknown ConditionType " << cond->GetKind();
}
@@ -837,7 +837,9 @@
}
// Constructs a new ABS(x) node in the HIR.
-static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HInstruction* cursor) {
+static HInstruction* NewIntegralAbs(ArenaAllocator* allocator,
+ HInstruction* x,
+ HInstruction* cursor) {
DataType::Type type = x->GetType();
DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
// Construct a fake intrinsic with as much context as is needed to allocate one.
@@ -848,8 +850,8 @@
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
0u
};
- HInvokeStaticOrDirect* invoke = new (arena) HInvokeStaticOrDirect(
- arena,
+ HInvokeStaticOrDirect* invoke = new (allocator) HInvokeStaticOrDirect(
+ allocator,
1,
type,
x->GetDexPc(),
@@ -2180,20 +2182,20 @@
HInstruction* str = invoke->InputAt(0);
HInstruction* index = invoke->InputAt(1);
uint32_t dex_pc = invoke->GetDexPc();
- ArenaAllocator* arena = GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength, HBoundsCheck and HArrayGet.
- HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
- HBoundsCheck* bounds_check = new (arena) HBoundsCheck(
+ HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
index, length, dex_pc, invoke->GetDexMethodIndex());
invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
- HArrayGet* array_get = new (arena) HArrayGet(str,
- bounds_check,
- DataType::Type::kUint16,
- SideEffects::None(), // Strings are immutable.
- dex_pc,
- /* is_string_char_at */ true);
+ HArrayGet* array_get = new (allocator) HArrayGet(str,
+ bounds_check,
+ DataType::Type::kUint16,
+ SideEffects::None(), // Strings are immutable.
+ dex_pc,
+ /* is_string_char_at */ true);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
GetGraph()->SetHasBoundsChecks(true);
@@ -2524,13 +2526,13 @@
int64_t const3_val = ComputeAddition(type, const1_val, const2_val);
HBasicBlock* block = instruction->GetBlock();
HConstant* const3 = block->GetGraph()->GetConstant(type, const3_val);
- ArenaAllocator* arena = instruction->GetAllocator();
+ ArenaAllocator* allocator = instruction->GetAllocator();
HInstruction* z;
if (is_x_negated) {
- z = new (arena) HSub(type, const3, x, instruction->GetDexPc());
+ z = new (allocator) HSub(type, const3, x, instruction->GetDexPc());
} else {
- z = new (arena) HAdd(type, x, const3, instruction->GetDexPc());
+ z = new (allocator) HAdd(type, x, const3, instruction->GetDexPc());
}
block->ReplaceAndRemoveInstructionWith(instruction, z);
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 037e98c..1c13084 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -75,8 +75,8 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetAllocator();
- HMultiplyAccumulate* mulacc = new(arena) HMultiplyAccumulate(
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
+ HMultiplyAccumulate* mulacc = new (allocator) HMultiplyAccumulate(
mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc());
mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc);
@@ -105,7 +105,7 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -137,11 +137,11 @@
if (accumulator != nullptr) {
HMultiplyAccumulate* mulacc =
- new (arena) HMultiplyAccumulate(type,
- binop->GetKind(),
- accumulator,
- mul->GetLeft(),
- mul->GetRight());
+ new (allocator) HMultiplyAccumulate(type,
+ binop->GetKind(),
+ accumulator,
+ mul->GetLeft(),
+ mul->GetRight());
binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
DCHECK(!mul->HasUses());
@@ -150,11 +150,11 @@
}
} else if (use->IsNeg() && isa != kArm) {
HMultiplyAccumulate* mulacc =
- new (arena) HMultiplyAccumulate(type,
- HInstruction::kSub,
- mul->GetBlock()->GetGraph()->GetConstant(type, 0),
- mul->GetLeft(),
- mul->GetRight());
+ new (allocator) HMultiplyAccumulate(type,
+ HInstruction::kSub,
+ mul->GetBlock()->GetGraph()->GetConstant(type, 0),
+ mul->GetLeft(),
+ mul->GetRight());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, mulacc);
DCHECK(!mul->HasUses());
@@ -255,10 +255,10 @@
// Proceed to extract the base address computation.
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetAllocator();
+ ArenaAllocator* allocator = graph->GetAllocator();
HIntConstant* offset = graph->GetIntConstant(data_offset);
- HIntermediateAddress* address = new (arena) HIntermediateAddress(array, offset, kNoDexPc);
+ HIntermediateAddress* address = new (allocator) HIntermediateAddress(array, offset, kNoDexPc);
// TODO: Is it ok to not have this on the intermediate address?
// address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
access->GetBlock()->InsertInstructionBefore(address, access);
@@ -289,7 +289,7 @@
}
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetAllocator();
+ ArenaAllocator* allocator = graph->GetAllocator();
DataType::Type packed_type = access->GetPackedType();
uint32_t data_offset = mirror::Array::DataOffset(
DataType::Size(packed_type)).Uint32Value();
@@ -328,7 +328,7 @@
HIntConstant* offset = graph->GetIntConstant(data_offset);
HIntConstant* shift = graph->GetIntConstant(component_shift);
HIntermediateAddressIndex* address =
- new (arena) HIntermediateAddressIndex(index, offset, shift, kNoDexPc);
+ new (allocator) HIntermediateAddressIndex(index, offset, shift, kNoDexPc);
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 1);
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index e82fab9..b2a9c0a 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -45,7 +45,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
ASSERT_EQ(graph->GetLinearOrder().size(), number_of_blocks);
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
index 405f261..c60386d 100644
--- a/compiler/optimizing/live_interval_test.cc
+++ b/compiler/optimizing/live_interval_test.cc
@@ -23,29 +23,29 @@
namespace art {
TEST(LiveInterval, GetStart) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_EQ(0u, interval->GetStart());
}
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_EQ(4u, interval->GetStart());
}
}
TEST(LiveInterval, IsDeadAt) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->IsDeadAt(42));
ASSERT_TRUE(interval->IsDeadAt(43));
ASSERT_FALSE(interval->IsDeadAt(41));
@@ -55,7 +55,7 @@
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->IsDeadAt(16));
ASSERT_TRUE(interval->IsDeadAt(32));
ASSERT_FALSE(interval->IsDeadAt(0));
@@ -68,12 +68,12 @@
}
TEST(LiveInterval, Covers) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(41));
@@ -83,7 +83,7 @@
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_FALSE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(11));
@@ -96,68 +96,68 @@
}
TEST(LiveInterval, FirstIntersectionWith) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 42}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(8u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {11, 12}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {9, 10}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(9u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 7}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {6, 7}, {9, 10}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(6u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {55, 58}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {11, 42}, {43, 48}, {54, 56}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(55u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {15, 18}, {27, 32}, {41, 53}, {54, 60}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {11, 12}, {19, 25}, {34, 42}, {52, 60}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(41u, interval1->FirstIntersectionWith(interval2));
}
@@ -188,13 +188,13 @@
}
TEST(LiveInterval, SplitAt) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
// Test within one range.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(1);
static constexpr size_t expected[][2] = {{0, 1}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -205,7 +205,7 @@
{
// Test just before the end of one range.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(3);
static constexpr size_t expected[][2] = {{0, 3}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -216,7 +216,7 @@
{
// Test withing the first range.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(1);
static constexpr size_t expected[][2] = {{0, 1}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -227,7 +227,7 @@
{
// Test in a hole.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(5);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -238,7 +238,7 @@
{
// Test withing the second range.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(9);
static constexpr size_t expected[][2] = {{0, 4}, {8, 9}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -249,7 +249,7 @@
{
// Test at the beginning of the second range.
static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(6);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -260,7 +260,7 @@
{
// Test at the end of the first range.
static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(4);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -271,7 +271,7 @@
{
// Test that we get null if we split at a position where the interval is dead.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(5);
ASSERT_TRUE(split == nullptr);
ASSERT_TRUE(RangesEquals(interval, ranges, arraysize(ranges)));
@@ -279,13 +279,13 @@
}
TEST(LiveInterval, AddLoopRange) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
// Test when only used in a loop.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -296,7 +296,7 @@
{
// Test when only used in a loop.
static constexpr size_t ranges[][2] = {{2, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -307,7 +307,7 @@
{
// Test when used just after the loop.
static constexpr size_t ranges[][2] = {{2, 4}, {8, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -318,7 +318,7 @@
{
// Test when use after the loop is after a lifetime hole.
static constexpr size_t ranges[][2] = {{2, 4}, {10, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_EQ(range->GetStart(), 0u);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 8087fe0..ddcad5a 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -66,7 +66,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -112,7 +112,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -161,7 +161,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 4 constant.
@@ -238,7 +238,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -315,7 +315,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -391,7 +391,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 7793965..3eadc8f 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -53,7 +53,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
std::ostringstream buffer;
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 29dfddf..5879c6f 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -43,8 +43,7 @@
instruction->SetLocations(this);
if (NeedsSafepoint()) {
- ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
- stack_mask_ = ArenaBitVector::Create(arena, 0, true, kArenaAllocLocationSummary);
+ stack_mask_ = ArenaBitVector::Create(allocator, 0, true, kArenaAllocLocationSummary);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3c584bd..99fde75 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1800,21 +1800,23 @@
// A HEnvironment object contains the values of virtual registers at a given location.
class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
public:
- ALWAYS_INLINE HEnvironment(ArenaAllocator* arena,
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator,
size_t number_of_vregs,
ArtMethod* method,
uint32_t dex_pc,
HInstruction* holder)
- : vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
- locations_(arena->Adapter(kArenaAllocEnvironmentLocations)),
+ : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)),
+ locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
method_(method),
dex_pc_(dex_pc),
holder_(holder) {
}
- ALWAYS_INLINE HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
- : HEnvironment(arena,
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator,
+ const HEnvironment& to_copy,
+ HInstruction* holder)
+ : HEnvironment(allocator,
to_copy.Size(),
to_copy.GetMethod(),
to_copy.GetDexPc(),
@@ -2475,11 +2477,11 @@
protected:
HVariableInputSizeInstruction(SideEffects side_effects,
uint32_t dex_pc,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
size_t number_of_inputs,
ArenaAllocKind kind)
: HInstruction(side_effects, dex_pc),
- inputs_(number_of_inputs, arena->Adapter(kind)) {}
+ inputs_(number_of_inputs, allocator->Adapter(kind)) {}
ArenaVector<HUserRecord<HInstruction*>> inputs_;
@@ -2580,7 +2582,7 @@
class HPhi FINAL : public HVariableInputSizeInstruction {
public:
- HPhi(ArenaAllocator* arena,
+ HPhi(ArenaAllocator* allocator,
uint32_t reg_number,
size_t number_of_inputs,
DataType::Type type,
@@ -2588,7 +2590,7 @@
: HVariableInputSizeInstruction(
SideEffects::None(),
dex_pc,
- arena,
+ allocator,
number_of_inputs,
kArenaAllocPhiInputs),
reg_number_(reg_number) {
@@ -3027,11 +3029,14 @@
public:
// Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
// across.
- HDeoptimize(ArenaAllocator* arena, HInstruction* cond, DeoptimizationKind kind, uint32_t dex_pc)
+ HDeoptimize(ArenaAllocator* allocator,
+ HInstruction* cond,
+ DeoptimizationKind kind,
+ uint32_t dex_pc)
: HVariableInputSizeInstruction(
SideEffects::All(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 1,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(false);
@@ -3044,7 +3049,7 @@
// instead of `guard`.
// We set CanTriggerGC to prevent any intermediate address to be live
// at the point of the `HDeoptimize`.
- HDeoptimize(ArenaAllocator* arena,
+ HDeoptimize(ArenaAllocator* allocator,
HInstruction* cond,
HInstruction* guard,
DeoptimizationKind kind,
@@ -3052,7 +3057,7 @@
: HVariableInputSizeInstruction(
SideEffects::CanTriggerGC(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 2,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(true);
@@ -3116,8 +3121,8 @@
public:
// CHA guards are only optimized in a separate pass and it has no side effects
// with regard to other passes.
- HShouldDeoptimizeFlag(ArenaAllocator* arena, uint32_t dex_pc)
- : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, arena, 0, kArenaAllocCHA) {
+ HShouldDeoptimizeFlag(ArenaAllocator* allocator, uint32_t dex_pc)
+ : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, allocator, 0, kArenaAllocCHA) {
}
DataType::Type GetType() const OVERRIDE { return DataType::Type::kInt32; }
@@ -4084,7 +4089,7 @@
using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>;
using ReturnTypeField = BitField<DataType::Type, kFieldReturnType, kFieldReturnTypeSize>;
- HInvoke(ArenaAllocator* arena,
+ HInvoke(ArenaAllocator* allocator,
uint32_t number_of_arguments,
uint32_t number_of_other_inputs,
DataType::Type return_type,
@@ -4095,7 +4100,7 @@
: HVariableInputSizeInstruction(
SideEffects::AllExceptGCDependency(), // Assume write/read on all fields/arrays.
dex_pc,
- arena,
+ allocator,
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
@@ -4122,13 +4127,13 @@
class HInvokeUnresolved FINAL : public HInvoke {
public:
- HInvokeUnresolved(ArenaAllocator* arena,
+ HInvokeUnresolved(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
InvokeType invoke_type)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u /* number_of_other_inputs */,
return_type,
@@ -4146,12 +4151,12 @@
class HInvokePolymorphic FINAL : public HInvoke {
public:
- HInvokePolymorphic(ArenaAllocator* arena,
+ HInvokePolymorphic(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u /* number_of_other_inputs */,
return_type,
@@ -4223,7 +4228,7 @@
uint64_t method_load_data;
};
- HInvokeStaticOrDirect(ArenaAllocator* arena,
+ HInvokeStaticOrDirect(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
@@ -4233,7 +4238,7 @@
InvokeType invoke_type,
MethodReference target_method,
ClinitCheckRequirement clinit_check_requirement)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
// There is potentially one extra argument for the HCurrentMethod node, and
// potentially one other if the clinit check is explicit, and potentially
@@ -4418,14 +4423,14 @@
class HInvokeVirtual FINAL : public HInvoke {
public:
- HInvokeVirtual(ArenaAllocator* arena,
+ HInvokeVirtual(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
ArtMethod* resolved_method,
uint32_t vtable_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u,
return_type,
@@ -4466,14 +4471,14 @@
class HInvokeInterface FINAL : public HInvoke {
public:
- HInvokeInterface(ArenaAllocator* arena,
+ HInvokeInterface(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
ArtMethod* resolved_method,
uint32_t imt_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u,
return_type,
@@ -6645,7 +6650,7 @@
// about the associated object.
HConstructorFence(HInstruction* fence_object,
uint32_t dex_pc,
- ArenaAllocator* arena)
+ ArenaAllocator* allocator)
// We strongly suspect there is not a more accurate way to describe the fine-grained reordering
// constraints described in the class header. We claim that these SideEffects constraints
// enforce a superset of the real constraints.
@@ -6669,7 +6674,7 @@
// we can refine the side effect to a smaller set of type reads (see above constraints).
: HVariableInputSizeInstruction(SideEffects::AllReads(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 1,
kArenaAllocConstructorFenceInputs) {
DCHECK(fence_object != nullptr);
@@ -6886,9 +6891,9 @@
class HParallelMove FINAL : public HTemplateInstruction<0> {
public:
- explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc)
+ explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::None(), dex_pc),
- moves_(arena->Adapter(kArenaAllocMoveOperands)) {
+ moves_(allocator->Adapter(kArenaAllocMoveOperands)) {
moves_.reserve(kDefaultNumberOfMoves);
}
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 8f3ab11..781a59f 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -71,7 +71,7 @@
// TODO: we could introduce SIMD types in HIR.
static constexpr DataType::Type kSIMDType = DataType::Type::kFloat64;
- HVecOperation(ArenaAllocator* arena,
+ HVecOperation(ArenaAllocator* allocator,
DataType::Type packed_type,
SideEffects side_effects,
size_t number_of_inputs,
@@ -79,7 +79,7 @@
uint32_t dex_pc)
: HVariableInputSizeInstruction(side_effects,
dex_pc,
- arena,
+ allocator,
number_of_inputs,
kArenaAllocVectorNode),
vector_length_(vector_length) {
@@ -156,12 +156,12 @@
// Abstraction of a unary vector operation.
class HVecUnaryOperation : public HVecOperation {
public:
- HVecUnaryOperation(ArenaAllocator* arena,
+ HVecUnaryOperation(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 1,
@@ -181,13 +181,13 @@
// Abstraction of a binary vector operation.
class HVecBinaryOperation : public HVecOperation {
public:
- HVecBinaryOperation(ArenaAllocator* arena,
+ HVecBinaryOperation(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 2,
@@ -210,13 +210,18 @@
// The Android runtime guarantees elements have at least natural alignment.
class HVecMemoryOperation : public HVecOperation {
public:
- HVecMemoryOperation(ArenaAllocator* arena,
+ HVecMemoryOperation(ArenaAllocator* allocator,
DataType::Type packed_type,
SideEffects side_effects,
size_t number_of_inputs,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena, packed_type, side_effects, number_of_inputs, vector_length, dex_pc),
+ : HVecOperation(allocator,
+ packed_type,
+ side_effects,
+ number_of_inputs,
+ vector_length,
+ dex_pc),
alignment_(DataType::Size(packed_type), 0) {
DCHECK_GE(number_of_inputs, 2u);
}
@@ -260,12 +265,12 @@
// viz. replicate(x) = [ x, .. , x ].
class HVecReplicateScalar FINAL : public HVecUnaryOperation {
public:
- HVecReplicateScalar(ArenaAllocator* arena,
+ HVecReplicateScalar(ArenaAllocator* allocator,
HInstruction* scalar,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, scalar, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, scalar, packed_type, vector_length, dex_pc) {
DCHECK(!scalar->IsVecOperation());
}
@@ -285,13 +290,13 @@
// TODO: for now only i == 1 case supported.
class HVecExtractScalar FINAL : public HVecUnaryOperation {
public:
- HVecExtractScalar(ArenaAllocator* arena,
+ HVecExtractScalar(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
size_t index,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
DCHECK_LT(index, vector_length);
DCHECK_EQ(index, 0u);
@@ -323,13 +328,13 @@
kMax = 3
};
- HVecReduce(ArenaAllocator* arena,
+ HVecReduce(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
ReductionKind kind,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc),
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc),
kind_(kind) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -356,12 +361,12 @@
// viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ].
class HVecCnv FINAL : public HVecUnaryOperation {
public:
- HVecCnv(ArenaAllocator* arena,
+ HVecCnv(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(input->IsVecOperation());
DCHECK_NE(GetInputType(), GetResultType()); // actual convert
}
@@ -381,12 +386,12 @@
// viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ].
class HVecNeg FINAL : public HVecUnaryOperation {
public:
- HVecNeg(ArenaAllocator* arena,
+ HVecNeg(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -403,12 +408,12 @@
// for signed operand x.
class HVecAbs FINAL : public HVecUnaryOperation {
public:
- HVecAbs(ArenaAllocator* arena,
+ HVecAbs(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -425,12 +430,12 @@
// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
class HVecNot FINAL : public HVecUnaryOperation {
public:
- HVecNot(ArenaAllocator* arena,
+ HVecNot(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(input->IsVecOperation());
}
@@ -450,13 +455,13 @@
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
class HVecAdd FINAL : public HVecBinaryOperation {
public:
- HVecAdd(ArenaAllocator* arena,
+ HVecAdd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -475,7 +480,7 @@
// for either both signed or both unsigned operands x, y.
class HVecHalvingAdd FINAL : public HVecBinaryOperation {
public:
- HVecHalvingAdd(ArenaAllocator* arena,
+ HVecHalvingAdd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
@@ -483,7 +488,7 @@
bool is_rounded,
bool is_unsigned,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
// The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
// This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
DCHECK(!is_unsigned ||
@@ -524,13 +529,13 @@
// viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
class HVecSub FINAL : public HVecBinaryOperation {
public:
- HVecSub(ArenaAllocator* arena,
+ HVecSub(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -547,13 +552,13 @@
// viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
class HVecMul FINAL : public HVecBinaryOperation {
public:
- HVecMul(ArenaAllocator* arena,
+ HVecMul(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -570,13 +575,13 @@
// viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
class HVecDiv FINAL : public HVecBinaryOperation {
public:
- HVecDiv(ArenaAllocator* arena,
+ HVecDiv(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -594,14 +599,14 @@
// for either both signed or both unsigned operands x, y.
class HVecMin FINAL : public HVecBinaryOperation {
public:
- HVecMin(ArenaAllocator* arena,
+ HVecMin(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
bool is_unsigned,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
// The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
// This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
DCHECK(!is_unsigned ||
@@ -638,14 +643,14 @@
// for either both signed or both unsigned operands x, y.
class HVecMax FINAL : public HVecBinaryOperation {
public:
- HVecMax(ArenaAllocator* arena,
+ HVecMax(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
bool is_unsigned,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
// The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
// This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
DCHECK(!is_unsigned ||
@@ -681,13 +686,13 @@
// viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
class HVecAnd FINAL : public HVecBinaryOperation {
public:
- HVecAnd(ArenaAllocator* arena,
+ HVecAnd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -703,13 +708,13 @@
// viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
class HVecAndNot FINAL : public HVecBinaryOperation {
public:
- HVecAndNot(ArenaAllocator* arena,
+ HVecAndNot(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -725,13 +730,13 @@
// viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
class HVecOr FINAL : public HVecBinaryOperation {
public:
- HVecOr(ArenaAllocator* arena,
+ HVecOr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -747,13 +752,13 @@
// viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
class HVecXor FINAL : public HVecBinaryOperation {
public:
- HVecXor(ArenaAllocator* arena,
+ HVecXor(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -769,13 +774,13 @@
// viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
class HVecShl FINAL : public HVecBinaryOperation {
public:
- HVecShl(ArenaAllocator* arena,
+ HVecShl(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -791,13 +796,13 @@
// viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
class HVecShr FINAL : public HVecBinaryOperation {
public:
- HVecShr(ArenaAllocator* arena,
+ HVecShr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -813,13 +818,13 @@
// viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
class HVecUShr FINAL : public HVecBinaryOperation {
public:
- HVecUShr(ArenaAllocator* arena,
+ HVecUShr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -840,13 +845,13 @@
// set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m < n.
class HVecSetScalars FINAL : public HVecOperation {
public:
- HVecSetScalars(ArenaAllocator* arena,
+ HVecSetScalars(ArenaAllocator* allocator,
HInstruction* scalars[],
DataType::Type packed_type,
size_t vector_length,
size_t number_of_scalars,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
number_of_scalars,
@@ -872,7 +877,7 @@
// viz. [ a1, .. , an ] + [ x1, .. , xn ] * [ y1, .. , yn ] = [ a1 + x1 * y1, .. , an + xn * yn ].
class HVecMultiplyAccumulate FINAL : public HVecOperation {
public:
- HVecMultiplyAccumulate(ArenaAllocator* arena,
+ HVecMultiplyAccumulate(ArenaAllocator* allocator,
InstructionKind op,
HInstruction* accumulator,
HInstruction* mul_left,
@@ -880,7 +885,7 @@
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 3,
@@ -922,14 +927,14 @@
// for m <= n, non-overlapping sums, and signed operands x, y.
class HVecSADAccumulate FINAL : public HVecOperation {
public:
- HVecSADAccumulate(ArenaAllocator* arena,
+ HVecSADAccumulate(ArenaAllocator* allocator,
HInstruction* accumulator,
HInstruction* sad_left,
HInstruction* sad_right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 3,
@@ -955,7 +960,7 @@
// yield the vector [ mem(1), .. , mem(n) ].
class HVecLoad FINAL : public HVecMemoryOperation {
public:
- HVecLoad(ArenaAllocator* arena,
+ HVecLoad(ArenaAllocator* allocator,
HInstruction* base,
HInstruction* index,
DataType::Type packed_type,
@@ -963,7 +968,7 @@
size_t vector_length,
bool is_string_char_at,
uint32_t dex_pc)
- : HVecMemoryOperation(arena,
+ : HVecMemoryOperation(allocator,
packed_type,
side_effects,
/* number_of_inputs */ 2,
@@ -999,7 +1004,7 @@
// sets mem(1) = x1, .. , mem(n) = xn.
class HVecStore FINAL : public HVecMemoryOperation {
public:
- HVecStore(ArenaAllocator* arena,
+ HVecStore(ArenaAllocator* allocator,
HInstruction* base,
HInstruction* index,
HInstruction* value,
@@ -1007,7 +1012,7 @@
SideEffects side_effects,
size_t vector_length,
uint32_t dex_pc)
- : HVecMemoryOperation(arena,
+ : HVecMemoryOperation(allocator,
packed_type,
side_effects,
/* number_of_inputs */ 3,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 50b870b..9bfb7a5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -109,8 +109,8 @@
*/
class CodeVectorAllocator FINAL : public CodeAllocator {
public:
- explicit CodeVectorAllocator(ArenaAllocator* arena)
- : memory_(arena->Adapter(kArenaAllocCodeBuffer)),
+ explicit CodeVectorAllocator(ArenaAllocator* allocator)
+ : memory_(allocator->Adapter(kArenaAllocCodeBuffer)),
size_(0) {}
virtual uint8_t* Allocate(size_t size) {
@@ -352,7 +352,7 @@
private:
// Create a 'CompiledMethod' for an optimized graph.
- CompiledMethod* Emit(ArenaAllocator* arena,
+ CompiledMethod* Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
CompilerDriver* driver,
@@ -454,7 +454,7 @@
static HOptimization* BuildOptimization(
const std::string& pass_name,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
@@ -467,78 +467,79 @@
std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
- return new (arena) BoundsCheckElimination(graph,
- *most_recent_side_effects,
- most_recent_induction);
+ return new (allocator) BoundsCheckElimination(graph,
+ *most_recent_side_effects,
+ most_recent_induction);
} else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
+ return new (allocator) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
} else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
- return new (arena) HConstantFolding(graph, pass_name.c_str());
+ return new (allocator) HConstantFolding(graph, pass_name.c_str());
} else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
- return new (arena) HDeadCodeElimination(graph, stats, pass_name.c_str());
+ return new (allocator) HDeadCodeElimination(graph, stats, pass_name.c_str());
} else if (opt_name == HInliner::kInlinerPassName) {
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- return new (arena) HInliner(graph, // outer_graph
- graph, // outermost_graph
- codegen,
- dex_compilation_unit, // outer_compilation_unit
- dex_compilation_unit, // outermost_compilation_unit
- driver,
- handles,
- stats,
- number_of_dex_registers,
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr);
+ return new (allocator) HInliner(graph, // outer_graph
+ graph, // outermost_graph
+ codegen,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr);
} else if (opt_name == HSharpening::kSharpeningPassName) {
- return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
+ return new (allocator) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
} else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
- return new (arena) HSelectGenerator(graph, handles, stats);
+ return new (allocator) HSelectGenerator(graph, handles, stats);
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
- return new (arena) HInductionVarAnalysis(graph);
+ return new (allocator) HInductionVarAnalysis(graph);
} else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
- return new (arena) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
+ return new (allocator) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
} else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
- return new (arena) IntrinsicsRecognizer(graph, stats);
+ return new (allocator) IntrinsicsRecognizer(graph, stats);
} else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) LICM(graph, *most_recent_side_effects, stats);
+ return new (allocator) LICM(graph, *most_recent_side_effects, stats);
} else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
- return new (arena) LoadStoreAnalysis(graph);
+ return new (allocator) LoadStoreAnalysis(graph);
} else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
CHECK(most_recent_side_effects != nullptr);
CHECK(most_recent_lsa != nullptr);
- return
- new (arena) LoadStoreElimination(graph, *most_recent_side_effects, *most_recent_lsa, stats);
+ return new (allocator) LoadStoreElimination(graph,
+ *most_recent_side_effects,
+ *most_recent_lsa, stats);
} else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
- return new (arena) SideEffectsAnalysis(graph);
+ return new (allocator) SideEffectsAnalysis(graph);
} else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
- return new (arena) HLoopOptimization(graph, driver, most_recent_induction, stats);
+ return new (allocator) HLoopOptimization(graph, driver, most_recent_induction, stats);
} else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
- return new (arena) CHAGuardOptimization(graph);
+ return new (allocator) CHAGuardOptimization(graph);
} else if (opt_name == CodeSinking::kCodeSinkingPassName) {
- return new (arena) CodeSinking(graph, stats);
+ return new (allocator) CodeSinking(graph, stats);
} else if (opt_name == ConstructorFenceRedundancyElimination::kPassName) {
- return new (arena) ConstructorFenceRedundancyElimination(graph, stats);
+ return new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
#ifdef ART_ENABLE_CODEGEN_arm
} else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
- return new (arena) arm::InstructionSimplifierArm(graph, stats);
+ return new (allocator) arm::InstructionSimplifierArm(graph, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
} else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
- return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
+ return new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
} else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
- return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ return new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
} else if (opt_name == mips::InstructionSimplifierMips::kInstructionSimplifierMipsPassName) {
- return new (arena) mips::InstructionSimplifierMips(graph, codegen, stats);
+ return new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_x86
} else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
- return new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ return new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
} else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
- return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ return new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
#endif
}
return nullptr;
@@ -546,7 +547,7 @@
static ArenaVector<HOptimization*> BuildOptimizations(
const std::vector<std::string>& pass_names,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
@@ -559,11 +560,11 @@
SideEffectsAnalysis* most_recent_side_effects = nullptr;
HInductionVarAnalysis* most_recent_induction = nullptr;
LoadStoreAnalysis* most_recent_lsa = nullptr;
- ArenaVector<HOptimization*> ret(arena->Adapter());
+ ArenaVector<HOptimization*> ret(allocator->Adapter());
for (const std::string& pass_name : pass_names) {
HOptimization* opt = BuildOptimization(
pass_name,
- arena,
+ allocator,
graph,
stats,
codegen,
@@ -757,15 +758,18 @@
pass_observer);
PrepareForRegisterAllocation(graph, stats).Run();
}
- SsaLivenessAnalysis liveness(graph, codegen);
+ // Use local allocator shared by SSA liveness analysis and register allocator.
+ // (Register allocator creates new objects in the liveness data.)
+ ScopedArenaAllocator local_allocator(graph->GetArenaStack());
+ SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
{
PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
liveness.Analyze();
}
{
PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(graph->GetAllocator(), codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(&local_allocator, codegen, liveness, strategy);
register_allocator->AllocateRegisters();
}
}
@@ -777,11 +781,11 @@
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* arena = graph->GetAllocator();
+ ArenaAllocator* allocator = graph->GetAllocator();
if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
ArenaVector<HOptimization*> optimizations = BuildOptimizations(
*driver->GetCompilerOptions().GetPassesToRun(),
- arena,
+ allocator,
graph,
stats,
codegen,
@@ -792,43 +796,45 @@
return;
}
- HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce1 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$initial");
- HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce2 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$after_inlining");
- HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce3 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$final");
- HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
- InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(
+ HConstantFolding* fold1 = new (allocator) HConstantFolding(graph, "constant_folding");
+ InstructionSimplifier* simplify1 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats);
- HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, handles, stats);
- HConstantFolding* fold2 = new (arena) HConstantFolding(
+ HSelectGenerator* select_generator = new (allocator) HSelectGenerator(graph, handles, stats);
+ HConstantFolding* fold2 = new (allocator) HConstantFolding(
graph, "constant_folding$after_inlining");
- HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
- SideEffectsAnalysis* side_effects1 = new (arena) SideEffectsAnalysis(
+ HConstantFolding* fold3 = new (allocator) HConstantFolding(graph, "constant_folding$after_bce");
+ SideEffectsAnalysis* side_effects1 = new (allocator) SideEffectsAnalysis(
graph, "side_effects$before_gvn");
- SideEffectsAnalysis* side_effects2 = new (arena) SideEffectsAnalysis(
+ SideEffectsAnalysis* side_effects2 = new (allocator) SideEffectsAnalysis(
graph, "side_effects$before_lse");
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects1);
- LICM* licm = new (arena) LICM(graph, *side_effects1, stats);
- HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
- BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction);
- HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction, stats);
- LoadStoreAnalysis* lsa = new (arena) LoadStoreAnalysis(graph);
- LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2, *lsa, stats);
- HSharpening* sharpening = new (arena) HSharpening(
+ GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects1);
+ LICM* licm = new (allocator) LICM(graph, *side_effects1, stats);
+ HInductionVarAnalysis* induction = new (allocator) HInductionVarAnalysis(graph);
+ BoundsCheckElimination* bce =
+ new (allocator) BoundsCheckElimination(graph, *side_effects1, induction);
+ HLoopOptimization* loop = new (allocator) HLoopOptimization(graph, driver, induction, stats);
+ LoadStoreAnalysis* lsa = new (allocator) LoadStoreAnalysis(graph);
+ LoadStoreElimination* lse =
+ new (allocator) LoadStoreElimination(graph, *side_effects2, *lsa, stats);
+ HSharpening* sharpening = new (allocator) HSharpening(
graph, codegen, dex_compilation_unit, driver, handles);
- InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify2 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$after_inlining");
- InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify3 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$after_bce");
- InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify4 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$before_codegen");
- IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
- CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
- CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
+ IntrinsicsRecognizer* intrinsics = new (allocator) IntrinsicsRecognizer(graph, stats);
+ CHAGuardOptimization* cha_guard = new (allocator) CHAGuardOptimization(graph);
+ CodeSinking* code_sinking = new (allocator) CodeSinking(graph, stats);
ConstructorFenceRedundancyElimination* cfre =
- new (arena) ConstructorFenceRedundancyElimination(graph, stats);
+ new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
HOptimization* optimizations1[] = {
intrinsics,
@@ -887,14 +893,14 @@
return linker_patches;
}
-CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
+CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
CompilerDriver* compiler_driver,
const DexFile::CodeItem* code_item) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
- ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
- ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps));
+ ArenaVector<uint8_t> stack_map(allocator->Adapter(kArenaAllocStackMaps));
+ ArenaVector<uint8_t> method_info(allocator->Adapter(kArenaAllocStackMaps));
size_t stack_map_size = 0;
size_t method_info_size = 0;
codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index f31ad82..5632f9a 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -48,7 +48,7 @@
LiveInterval* BuildInterval(const size_t ranges[][2],
size_t number_of_ranges,
- ArenaAllocator* allocator,
+ ScopedArenaAllocator* allocator,
int reg = -1,
HInstruction* defined_by = nullptr) {
LiveInterval* interval =
@@ -81,15 +81,18 @@
class ArenaPoolAndAllocator {
public:
- ArenaPoolAndAllocator() : pool_(), allocator_(&pool_), arena_stack_(&pool_) { }
+ ArenaPoolAndAllocator()
+ : pool_(), allocator_(&pool_), arena_stack_(&pool_), scoped_allocator_(&arena_stack_) { }
ArenaAllocator* GetAllocator() { return &allocator_; }
ArenaStack* GetArenaStack() { return &arena_stack_; }
+ ScopedArenaAllocator* GetScopedAllocator() { return &scoped_allocator_; }
private:
ArenaPool pool_;
ArenaAllocator allocator_;
ArenaStack arena_stack_;
+ ScopedArenaAllocator scoped_allocator_;
};
inline HGraph* CreateGraph(ArenaPoolAndAllocator* pool_and_allocator) {
@@ -107,6 +110,7 @@
ArenaAllocator* GetAllocator() { return pool_and_allocator_->GetAllocator(); }
ArenaStack* GetArenaStack() { return pool_and_allocator_->GetArenaStack(); }
+ ScopedArenaAllocator* GetScopedAllocator() { return pool_and_allocator_->GetScopedAllocator(); }
void ResetPoolAndAllocator() {
pool_and_allocator_.reset(new ArenaPoolAndAllocator());
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 5ec76b4..fe98aa9 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -56,12 +56,12 @@
// Add a fake environment for String.charAt() inline info as we want
// the exception to appear as being thrown from there.
ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
- ArenaAllocator* arena = GetGraph()->GetAllocator();
- HEnvironment* environment = new (arena) HEnvironment(arena,
- /* number_of_vregs */ 0u,
- char_at_method,
- /* dex_pc */ dex::kDexNoIndex,
- check);
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
+ HEnvironment* environment = new (allocator) HEnvironment(allocator,
+ /* number_of_vregs */ 0u,
+ char_at_method,
+ /* dex_pc */ dex::kDexNoIndex,
+ check);
check->InsertRawEnvironment(environment);
}
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 1786aa7..5ed9e02 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -22,10 +22,9 @@
namespace art {
-RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
- CodeGenerator* codegen,
+RegisterAllocationResolver::RegisterAllocationResolver(CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
- : allocator_(allocator),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen),
liveness_(liveness) {}
@@ -36,7 +35,7 @@
size_t float_spill_slots,
size_t double_spill_slots,
size_t catch_phi_spill_slots,
- const ArenaVector<LiveInterval*>& temp_intervals) {
+ ArrayRef<LiveInterval* const> temp_intervals) {
size_t spill_slots = int_spill_slots
+ long_spill_slots
+ float_spill_slots
diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h
index 4a148e0..2783717 100644
--- a/compiler/optimizing/register_allocation_resolver.h
+++ b/compiler/optimizing/register_allocation_resolver.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
-#include "base/arena_containers.h"
#include "base/array_ref.h"
#include "base/value_object.h"
#include "data_type.h"
@@ -40,9 +39,7 @@
*/
class RegisterAllocationResolver : ValueObject {
public:
- RegisterAllocationResolver(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& liveness);
+ RegisterAllocationResolver(CodeGenerator* codegen, const SsaLivenessAnalysis& liveness);
void Resolve(ArrayRef<HInstruction* const> safepoints,
size_t reserved_out_slots, // Includes slot(s) for the art method.
@@ -51,7 +48,7 @@
size_t float_spill_slots,
size_t double_spill_slots,
size_t catch_phi_spill_slots,
- const ArenaVector<LiveInterval*>& temp_intervals);
+ ArrayRef<LiveInterval* const> temp_intervals);
private:
// Update live registers of safepoint location summary.
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index c3b33e2..ece9904 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -19,6 +19,8 @@
#include <iostream>
#include <sstream>
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "register_allocator_graph_color.h"
@@ -27,22 +29,24 @@
namespace art {
-RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
+RegisterAllocator::RegisterAllocator(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
: allocator_(allocator),
codegen_(codegen),
liveness_(liveness) {}
-RegisterAllocator* RegisterAllocator::Create(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& analysis,
- Strategy strategy) {
+std::unique_ptr<RegisterAllocator> RegisterAllocator::Create(ScopedArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ Strategy strategy) {
switch (strategy) {
case kRegisterAllocatorLinearScan:
- return new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis);
+ return std::unique_ptr<RegisterAllocator>(
+ new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis));
case kRegisterAllocatorGraphColor:
- return new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis);
+ return std::unique_ptr<RegisterAllocator>(
+ new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis));
default:
LOG(FATAL) << "Invalid register allocation strategy: " << strategy;
UNREACHABLE();
@@ -87,18 +91,18 @@
DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
};
-bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+bool RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,
size_t number_of_out_slots,
const CodeGenerator& codegen,
- ArenaAllocator* allocator,
bool processing_core_registers,
bool log_fatal_on_failure) {
size_t number_of_registers = processing_core_registers
? codegen.GetNumberOfCoreRegisters()
: codegen.GetNumberOfFloatingPointRegisters();
- ArenaVector<ArenaBitVector*> liveness_of_values(
- allocator->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(codegen.GetGraph()->GetArenaStack());
+ ScopedArenaVector<ArenaBitVector*> liveness_of_values(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
liveness_of_values.reserve(number_of_registers + number_of_spill_slots);
size_t max_end = 0u;
@@ -112,7 +116,8 @@
// allocated will populate the associated bit vector based on its live ranges.
for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
liveness_of_values.push_back(
- ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
+ ArenaBitVector::Create(&allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
+ liveness_of_values.back()->ClearAllBits();
}
for (LiveInterval* start_interval : intervals) {
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 4375d68..eaeec3b 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/arena_object.h"
#include "base/macros.h"
@@ -36,7 +36,7 @@
/**
* Base class for any register allocator.
*/
-class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
+class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocator> {
public:
enum Strategy {
kRegisterAllocatorLinearScan,
@@ -45,10 +45,10 @@
static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan;
- static RegisterAllocator* Create(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& analysis,
- Strategy strategy = kRegisterAllocatorDefault);
+ static std::unique_ptr<RegisterAllocator> Create(ScopedArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ Strategy strategy = kRegisterAllocatorDefault);
virtual ~RegisterAllocator() = default;
@@ -64,18 +64,17 @@
InstructionSet instruction_set);
// Verifies that live intervals do not conflict. Used by unit testing.
- static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+ static bool ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,
size_t number_of_out_slots,
const CodeGenerator& codegen,
- ArenaAllocator* allocator,
bool processing_core_registers,
bool log_fatal_on_failure);
static constexpr const char* kRegisterAllocatorPassName = "register";
protected:
- RegisterAllocator(ArenaAllocator* allocator,
+ RegisterAllocator(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
@@ -88,7 +87,7 @@
// to find an optimal split position.
LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
};
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 33df607..ad5248e 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -217,13 +217,12 @@
// and thus whether it is safe to prune it from the interference graph early on.
class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> {
public:
- InterferenceNode(ArenaAllocator* allocator,
- LiveInterval* interval,
+ InterferenceNode(LiveInterval* interval,
const SsaLivenessAnalysis& liveness)
: stage(NodeStage::kInitial),
interval_(interval),
- adjacent_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
- coalesce_opportunities_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ adjacent_nodes_(nullptr),
+ coalesce_opportunities_(nullptr),
out_degree_(interval->HasRegister() ? std::numeric_limits<size_t>::max() : 0),
alias_(this),
spill_weight_(ComputeSpillWeight(interval, liveness)),
@@ -232,21 +231,26 @@
DCHECK(!interval->IsHighInterval()) << "Pair nodes should be represented by the low interval";
}
- void AddInterference(InterferenceNode* other, bool guaranteed_not_interfering_yet) {
+ void AddInterference(InterferenceNode* other,
+ bool guaranteed_not_interfering_yet,
+ ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>>* storage) {
DCHECK(!IsPrecolored()) << "To save memory, fixed nodes should not have outgoing interferences";
DCHECK_NE(this, other) << "Should not create self loops in the interference graph";
DCHECK_EQ(this, alias_) << "Should not add interferences to a node that aliases another";
DCHECK_NE(stage, NodeStage::kPruned);
DCHECK_NE(other->stage, NodeStage::kPruned);
+ if (adjacent_nodes_ == nullptr) {
+ ScopedArenaVector<InterferenceNode*>::allocator_type adapter(storage->get_allocator());
+ storage->emplace_back(adapter);
+ adjacent_nodes_ = &storage->back();
+ }
if (guaranteed_not_interfering_yet) {
- DCHECK(std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other)
- == adjacent_nodes_.end());
- adjacent_nodes_.push_back(other);
+ DCHECK(!ContainsElement(GetAdjacentNodes(), other));
+ adjacent_nodes_->push_back(other);
out_degree_ += EdgeWeightWith(other);
} else {
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- if (it == adjacent_nodes_.end()) {
- adjacent_nodes_.push_back(other);
+ if (!ContainsElement(GetAdjacentNodes(), other)) {
+ adjacent_nodes_->push_back(other);
out_degree_ += EdgeWeightWith(other);
}
}
@@ -255,26 +259,29 @@
void RemoveInterference(InterferenceNode* other) {
DCHECK_EQ(this, alias_) << "Should not remove interferences from a coalesced node";
DCHECK_EQ(other->stage, NodeStage::kPruned) << "Should only remove interferences when pruning";
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- if (it != adjacent_nodes_.end()) {
- adjacent_nodes_.erase(it);
- out_degree_ -= EdgeWeightWith(other);
+ if (adjacent_nodes_ != nullptr) {
+ auto it = std::find(adjacent_nodes_->begin(), adjacent_nodes_->end(), other);
+ if (it != adjacent_nodes_->end()) {
+ adjacent_nodes_->erase(it);
+ out_degree_ -= EdgeWeightWith(other);
+ }
}
}
bool ContainsInterference(InterferenceNode* other) const {
DCHECK(!IsPrecolored()) << "Should not query fixed nodes for interferences";
DCHECK_EQ(this, alias_) << "Should not query a coalesced node for interferences";
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- return it != adjacent_nodes_.end();
+ return ContainsElement(GetAdjacentNodes(), other);
}
LiveInterval* GetInterval() const {
return interval_;
}
- const ArenaVector<InterferenceNode*>& GetAdjacentNodes() const {
- return adjacent_nodes_;
+ ArrayRef<InterferenceNode*> GetAdjacentNodes() const {
+ return adjacent_nodes_ != nullptr
+ ? ArrayRef<InterferenceNode*>(*adjacent_nodes_)
+ : ArrayRef<InterferenceNode*>();
}
size_t GetOutDegree() const {
@@ -283,16 +290,22 @@
return out_degree_;
}
- void AddCoalesceOpportunity(CoalesceOpportunity* opportunity) {
- coalesce_opportunities_.push_back(opportunity);
+ void AddCoalesceOpportunity(CoalesceOpportunity* opportunity,
+ ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>>* storage) {
+ if (coalesce_opportunities_ == nullptr) {
+ ScopedArenaVector<CoalesceOpportunity*>::allocator_type adapter(storage->get_allocator());
+ storage->emplace_back(adapter);
+ coalesce_opportunities_ = &storage->back();
+ }
+ coalesce_opportunities_->push_back(opportunity);
}
void ClearCoalesceOpportunities() {
- coalesce_opportunities_.clear();
+ coalesce_opportunities_ = nullptr;
}
bool IsMoveRelated() const {
- for (CoalesceOpportunity* opportunity : coalesce_opportunities_) {
+ for (CoalesceOpportunity* opportunity : GetCoalesceOpportunities()) {
if (opportunity->stage == CoalesceStage::kWorklist ||
opportunity->stage == CoalesceStage::kActive) {
return true;
@@ -325,8 +338,10 @@
return alias_;
}
- const ArenaVector<CoalesceOpportunity*>& GetCoalesceOpportunities() const {
- return coalesce_opportunities_;
+ ArrayRef<CoalesceOpportunity*> GetCoalesceOpportunities() const {
+ return coalesce_opportunities_ != nullptr
+ ? ArrayRef<CoalesceOpportunity*>(*coalesce_opportunities_)
+ : ArrayRef<CoalesceOpportunity*>();
}
float GetSpillWeight() const {
@@ -361,10 +376,10 @@
// All nodes interfering with this one.
// We use an unsorted vector as a set, since a tree or hash set is too heavy for the
// set sizes that we encounter. Using a vector leads to much better performance.
- ArenaVector<InterferenceNode*> adjacent_nodes_;
+ ScopedArenaVector<InterferenceNode*>* adjacent_nodes_; // Owned by ColoringIteration.
// Interference nodes that this node should be coalesced with to reduce moves.
- ArenaVector<CoalesceOpportunity*> coalesce_opportunities_;
+ ScopedArenaVector<CoalesceOpportunity*>* coalesce_opportunities_; // Owned by ColoringIteration.
// The maximum number of colors with which this node could interfere. This could be more than
// the number of adjacent nodes if this is a pair node, or if some adjacent nodes are pair nodes.
@@ -416,7 +431,7 @@
class ColoringIteration {
public:
ColoringIteration(RegisterAllocatorGraphColor* register_allocator,
- ArenaAllocator* allocator,
+ ScopedArenaAllocator* allocator,
bool processing_core_regs,
size_t num_regs)
: register_allocator_(register_allocator),
@@ -430,15 +445,17 @@
freeze_worklist_(allocator->Adapter(kArenaAllocRegisterAllocator)),
spill_worklist_(HasGreaterNodePriority, allocator->Adapter(kArenaAllocRegisterAllocator)),
coalesce_worklist_(CoalesceOpportunity::CmpPriority,
- allocator->Adapter(kArenaAllocRegisterAllocator)) {}
+ allocator->Adapter(kArenaAllocRegisterAllocator)),
+ adjacent_nodes_links_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ coalesce_opportunities_links_(allocator->Adapter(kArenaAllocRegisterAllocator)) {}
// Use the intervals collected from instructions to construct an
// interference graph mapping intervals to adjacency lists.
// Also, collect synthesized safepoint nodes, used to keep
// track of live intervals across safepoints.
// TODO: Should build safepoints elsewhere.
- void BuildInterferenceGraph(const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes);
+ void BuildInterferenceGraph(const ScopedArenaVector<LiveInterval*>& intervals,
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes);
// Add coalesce opportunities to interference nodes.
void FindCoalesceOpportunities();
@@ -456,8 +473,8 @@
// Return prunable nodes.
// The register allocator will need to access prunable nodes after coloring
// in order to tell the code generator which registers have been assigned.
- const ArenaVector<InterferenceNode*>& GetPrunableNodes() const {
- return prunable_nodes_;
+ ArrayRef<InterferenceNode* const> GetPrunableNodes() const {
+ return ArrayRef<InterferenceNode* const>(prunable_nodes_);
}
private:
@@ -503,38 +520,46 @@
// needed to split intervals and assign spill slots.
RegisterAllocatorGraphColor* register_allocator_;
- // An arena allocator used for a single graph coloring attempt.
- ArenaAllocator* allocator_;
+ // A scoped arena allocator used for a single graph coloring attempt.
+ ScopedArenaAllocator* allocator_;
const bool processing_core_regs_;
const size_t num_regs_;
// A map from live intervals to interference nodes.
- ArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_;
+ ScopedArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_;
// Uncolored nodes that should be pruned from the interference graph.
- ArenaVector<InterferenceNode*> prunable_nodes_;
+ ScopedArenaVector<InterferenceNode*> prunable_nodes_;
// A stack of nodes pruned from the interference graph, waiting to be pruned.
- ArenaStdStack<InterferenceNode*> pruned_nodes_;
+ ScopedArenaStdStack<InterferenceNode*> pruned_nodes_;
// A queue containing low degree, non-move-related nodes that can pruned immediately.
- ArenaDeque<InterferenceNode*> simplify_worklist_;
+ ScopedArenaDeque<InterferenceNode*> simplify_worklist_;
// A queue containing low degree, move-related nodes.
- ArenaDeque<InterferenceNode*> freeze_worklist_;
+ ScopedArenaDeque<InterferenceNode*> freeze_worklist_;
// A queue containing high degree nodes.
// If we have to prune from the spill worklist, we cannot guarantee
// the pruned node a color, so we order the worklist by priority.
- ArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_;
+ ScopedArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_;
// A queue containing coalesce opportunities.
// We order the coalesce worklist by priority, since some coalesce opportunities (e.g., those
// inside of loops) are more important than others.
- ArenaPriorityQueue<CoalesceOpportunity*,
- decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_;
+ ScopedArenaPriorityQueue<CoalesceOpportunity*,
+ decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_;
+
+ // Storage for links to adjacent nodes for interference nodes.
+ // Using std::deque so that elements do not move when adding new ones.
+ ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>> adjacent_nodes_links_;
+
+ // Storage for links to coalesce opportunities for interference nodes.
+ // Using std::deque so that elements do not move when adding new ones.
+ ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>> coalesce_opportunities_links_;
DISALLOW_COPY_AND_ASSIGN(ColoringIteration);
};
@@ -547,7 +572,7 @@
return static_cast<size_t>(InstructionSetPointerSize(codegen.GetInstructionSet())) / kVRegSize;
}
-RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness,
bool iterative_move_coalescing)
@@ -574,8 +599,7 @@
physical_core_nodes_.resize(codegen_->GetNumberOfCoreRegisters(), nullptr);
for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kInt32);
- physical_core_nodes_[i] =
- new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_core_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness);
physical_core_nodes_[i]->stage = NodeStage::kPrecolored;
core_intervals_.push_back(interval);
if (codegen_->IsBlockedCoreRegister(i)) {
@@ -587,8 +611,7 @@
for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
LiveInterval* interval =
LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kFloat32);
- physical_fp_nodes_[i] =
- new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_fp_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness);
physical_fp_nodes_[i]->stage = NodeStage::kPrecolored;
fp_intervals_.push_back(interval);
if (codegen_->IsBlockedFloatingPointRegister(i)) {
@@ -597,12 +620,14 @@
}
}
+RegisterAllocatorGraphColor::~RegisterAllocatorGraphColor() {}
+
void RegisterAllocatorGraphColor::AllocateRegisters() {
// (1) Collect and prepare live intervals.
ProcessInstructions();
for (bool processing_core_regs : {true, false}) {
- ArenaVector<LiveInterval*>& intervals = processing_core_regs
+ ScopedArenaVector<LiveInterval*>& intervals = processing_core_regs
? core_intervals_
: fp_intervals_;
size_t num_registers = processing_core_regs
@@ -619,17 +644,15 @@
<< "should be prioritized over long ones, because they cannot be split further.)";
// Many data structures are cleared between graph coloring attempts, so we reduce
- // total memory usage by using a new arena allocator for each attempt.
- ArenaAllocator coloring_attempt_allocator(allocator_->GetArenaPool());
+ // total memory usage by using a new scoped arena allocator for each attempt.
+ ScopedArenaAllocator coloring_attempt_allocator(allocator_->GetArenaStack());
ColoringIteration iteration(this,
&coloring_attempt_allocator,
processing_core_regs,
num_registers);
- // (2) Build the interference graph. Also gather safepoints.
- ArenaVector<InterferenceNode*> safepoints(
- coloring_attempt_allocator.Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ // (2) Build the interference graph.
+ ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
? physical_core_nodes_
: physical_fp_nodes_;
iteration.BuildInterferenceGraph(intervals, physical_nodes);
@@ -691,7 +714,7 @@
} // for processing_core_instructions
// (6) Resolve locations and deconstruct SSA form.
- RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ RegisterAllocationResolver(codegen_, liveness_)
.Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_art_method_slots_ + reserved_out_slots_,
num_int_spill_slots_,
@@ -699,7 +722,7 @@
num_float_spill_slots_,
num_double_spill_slots_,
catch_phi_spill_slot_counter_,
- temp_intervals_);
+ ArrayRef<LiveInterval* const>(temp_intervals_));
if (kIsDebugBuild) {
Validate(/*log_fatal_on_failure*/ true);
@@ -708,8 +731,9 @@
bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) {
for (bool processing_core_regs : {true, false}) {
- ArenaVector<LiveInterval*> intervals(
- allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<LiveInterval*> intervals(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
LiveInterval* interval = instruction->GetLiveInterval();
@@ -718,7 +742,7 @@
}
}
- ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
? physical_core_nodes_
: physical_fp_nodes_;
for (InterferenceNode* fixed : physical_nodes) {
@@ -742,11 +766,10 @@
+ num_float_spill_slots_
+ num_double_spill_slots_
+ catch_phi_spill_slot_counter_;
- bool ok = ValidateIntervals(intervals,
+ bool ok = ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
spill_slots,
reserved_art_method_slots_ + reserved_out_slots_,
*codegen_,
- allocator_,
processing_core_regs,
log_fatal_on_failure);
if (!ok) {
@@ -825,7 +848,7 @@
CheckForFixedOutput(instruction);
AllocateSpillSlotForCatchPhi(instruction);
- ArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
+ ScopedArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
? core_intervals_
: fp_intervals_;
if (interval->HasSpillSlot() || instruction->IsConstant()) {
@@ -1075,11 +1098,12 @@
} else if (to->IsPrecolored()) {
// It is important that only a single node represents a given fixed register in the
// interference graph. We retrieve that node here.
- const ArenaVector<InterferenceNode*>& physical_nodes = to->GetInterval()->IsFloatingPoint()
- ? register_allocator_->physical_fp_nodes_
- : register_allocator_->physical_core_nodes_;
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes =
+ to->GetInterval()->IsFloatingPoint() ? register_allocator_->physical_fp_nodes_
+ : register_allocator_->physical_core_nodes_;
InterferenceNode* physical_node = physical_nodes[to->GetInterval()->GetRegister()];
- from->AddInterference(physical_node, /*guaranteed_not_interfering_yet*/ false);
+ from->AddInterference(
+ physical_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_);
DCHECK_EQ(to->GetInterval()->GetRegister(), physical_node->GetInterval()->GetRegister());
DCHECK_EQ(to->GetAlias(), physical_node) << "Fixed nodes should alias the canonical fixed node";
@@ -1097,11 +1121,12 @@
physical_nodes[to->GetInterval()->GetHighInterval()->GetRegister()];
DCHECK_EQ(to->GetInterval()->GetHighInterval()->GetRegister(),
high_node->GetInterval()->GetRegister());
- from->AddInterference(high_node, /*guaranteed_not_interfering_yet*/ false);
+ from->AddInterference(
+ high_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_);
}
} else {
// Standard interference between two uncolored nodes.
- from->AddInterference(to, guaranteed_not_interfering_yet);
+ from->AddInterference(to, guaranteed_not_interfering_yet, &adjacent_nodes_links_);
}
if (both_directions) {
@@ -1156,8 +1181,8 @@
}
void ColoringIteration::BuildInterferenceGraph(
- const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes) {
+ const ScopedArenaVector<LiveInterval*>& intervals,
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
@@ -1171,7 +1196,7 @@
//
// For simplicity, we create a tuple for each endpoint, and then sort the tuples.
// Tuple contents: (position, is_range_beginning, node).
- ArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
+ ScopedArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
allocator_->Adapter(kArenaAllocRegisterAllocator));
// We reserve plenty of space to avoid excessive copying.
@@ -1181,8 +1206,8 @@
for (LiveInterval* sibling = parent; sibling != nullptr; sibling = sibling->GetNextSibling()) {
LiveRange* range = sibling->GetFirstRange();
if (range != nullptr) {
- InterferenceNode* node = new (allocator_) InterferenceNode(
- allocator_, sibling, register_allocator_->liveness_);
+ InterferenceNode* node =
+ new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
interval_node_map_.Insert(std::make_pair(sibling, node));
if (sibling->HasRegister()) {
@@ -1217,8 +1242,7 @@
});
// Nodes live at the current position in the linear sweep.
- ArenaVector<InterferenceNode*> live(
- allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaVector<InterferenceNode*> live(allocator_->Adapter(kArenaAllocRegisterAllocator));
// Linear sweep. When we encounter the beginning of a range, we add the corresponding node to the
// live set. When we encounter the end of a range, we remove the corresponding node
@@ -1261,8 +1285,8 @@
<< "Nodes of different memory widths should never be coalesced";
CoalesceOpportunity* opportunity =
new (allocator_) CoalesceOpportunity(a, b, kind, position, register_allocator_->liveness_);
- a->AddCoalesceOpportunity(opportunity);
- b->AddCoalesceOpportunity(opportunity);
+ a->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
+ b->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
coalesce_worklist_.push(opportunity);
}
@@ -1332,7 +1356,7 @@
// Coalesce phi inputs with the corresponding output.
HInstruction* defined_by = interval->GetDefinedBy();
if (defined_by != nullptr && defined_by->IsPhi()) {
- const ArenaVector<HBasicBlock*>& predecessors = defined_by->GetBlock()->GetPredecessors();
+ ArrayRef<HBasicBlock* const> predecessors(defined_by->GetBlock()->GetPredecessors());
HInputsRef inputs = defined_by->GetInputs();
for (size_t i = 0, e = inputs.size(); i < e; ++i) {
@@ -1675,7 +1699,7 @@
// Add coalesce opportunities.
for (CoalesceOpportunity* opportunity : from->GetCoalesceOpportunities()) {
if (opportunity->stage != CoalesceStage::kDefunct) {
- into->AddCoalesceOpportunity(opportunity);
+ into->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
}
}
EnableCoalesceOpportunities(from);
@@ -1729,7 +1753,7 @@
// Build a mask with a bit set for each register assigned to some
// interval in `intervals`.
template <typename Container>
-static std::bitset<kMaxNumRegs> BuildConflictMask(Container& intervals) {
+static std::bitset<kMaxNumRegs> BuildConflictMask(const Container& intervals) {
std::bitset<kMaxNumRegs> conflict_mask;
for (InterferenceNode* adjacent : intervals) {
LiveInterval* conflicting = adjacent->GetInterval();
@@ -1765,7 +1789,7 @@
bool ColoringIteration::ColorInterferenceGraph() {
DCHECK_LE(num_regs_, kMaxNumRegs) << "kMaxNumRegs is too small";
- ArenaVector<LiveInterval*> colored_intervals(
+ ScopedArenaVector<LiveInterval*> colored_intervals(
allocator_->Adapter(kArenaAllocRegisterAllocator));
bool successful = true;
@@ -1888,16 +1912,18 @@
return successful;
}
-void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes) {
+void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes) {
// The register allocation resolver will organize the stack based on value type,
// so we assign stack slots for each value type separately.
- ArenaVector<LiveInterval*> double_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> long_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> float_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> int_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaAllocatorAdapter<void> adapter = allocator.Adapter(kArenaAllocRegisterAllocator);
+ ScopedArenaVector<LiveInterval*> double_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> long_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> float_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> int_intervals(adapter);
// The set of parent intervals already handled.
- ArenaSet<LiveInterval*> seen(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaSet<LiveInterval*> seen(adapter);
// Find nodes that need spill slots.
for (InterferenceNode* node : nodes) {
@@ -1954,23 +1980,24 @@
}
// Color spill slots for each value type.
- ColorSpillSlots(&double_intervals, &num_double_spill_slots_);
- ColorSpillSlots(&long_intervals, &num_long_spill_slots_);
- ColorSpillSlots(&float_intervals, &num_float_spill_slots_);
- ColorSpillSlots(&int_intervals, &num_int_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(double_intervals), &num_double_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(long_intervals), &num_long_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(float_intervals), &num_float_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(int_intervals), &num_int_spill_slots_);
}
-void RegisterAllocatorGraphColor::ColorSpillSlots(ArenaVector<LiveInterval*>* intervals,
- size_t* num_stack_slots_used) {
+void RegisterAllocatorGraphColor::ColorSpillSlots(ArrayRef<LiveInterval* const> intervals,
+ /* out */ size_t* num_stack_slots_used) {
// We cannot use the original interference graph here because spill slots are assigned to
// all of the siblings of an interval, whereas an interference node represents only a single
// sibling. So, we assign spill slots linear-scan-style by sorting all the interval endpoints
// by position, and assigning the lowest spill slot available when we encounter an interval
// beginning. We ignore lifetime holes for simplicity.
- ArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints(
- allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints(
+ allocator.Adapter(kArenaAllocRegisterAllocator));
- for (LiveInterval* parent_interval : *intervals) {
+ for (LiveInterval* parent_interval : intervals) {
DCHECK(parent_interval->IsParent());
DCHECK(!parent_interval->HasSpillSlot());
size_t start = parent_interval->GetStart();
@@ -1990,7 +2017,7 @@
< std::tie(std::get<0>(rhs), std::get<1>(rhs));
});
- ArenaBitVector taken(allocator_, 0, true);
+ ArenaBitVector taken(&allocator, 0, true, kArenaAllocRegisterAllocator);
for (auto it = interval_endpoints.begin(), end = interval_endpoints.end(); it != end; ++it) {
// Extract information from the current tuple.
LiveInterval* parent_interval;
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 3f6d674..3072c92 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -18,9 +18,10 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
#include "register_allocator.h"
namespace art {
@@ -85,11 +86,11 @@
*/
class RegisterAllocatorGraphColor : public RegisterAllocator {
public:
- RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+ RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis,
bool iterative_move_coalescing = true);
- ~RegisterAllocatorGraphColor() OVERRIDE {}
+ ~RegisterAllocatorGraphColor() OVERRIDE;
void AllocateRegisters() OVERRIDE;
@@ -141,11 +142,10 @@
// Assigns stack slots to a list of intervals, ensuring that interfering intervals are not
// assigned the same stack slot.
- void ColorSpillSlots(ArenaVector<LiveInterval*>* nodes,
- size_t* num_stack_slots_used);
+ void ColorSpillSlots(ArrayRef<LiveInterval* const> nodes, /* out */ size_t* num_stack_slots_used);
// Provide stack slots to nodes that need them.
- void AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes);
+ void AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes);
// Whether iterative move coalescing should be performed. Iterative move coalescing
// improves code quality, but increases compile time.
@@ -154,19 +154,19 @@
// Live intervals, split by kind (core and floating point).
// These should not contain high intervals, as those are represented by
// the corresponding low interval throughout register allocation.
- ArenaVector<LiveInterval*> core_intervals_;
- ArenaVector<LiveInterval*> fp_intervals_;
+ ScopedArenaVector<LiveInterval*> core_intervals_;
+ ScopedArenaVector<LiveInterval*> fp_intervals_;
// Intervals for temporaries, saved for special handling in the resolution phase.
- ArenaVector<LiveInterval*> temp_intervals_;
+ ScopedArenaVector<LiveInterval*> temp_intervals_;
// Safepoints, saved for special handling while processing instructions.
- ArenaVector<HInstruction*> safepoints_;
+ ScopedArenaVector<HInstruction*> safepoints_;
// Interference nodes representing specific registers. These are "pre-colored" nodes
// in the interference graph.
- ArenaVector<InterferenceNode*> physical_core_nodes_;
- ArenaVector<InterferenceNode*> physical_fp_nodes_;
+ ScopedArenaVector<InterferenceNode*> physical_core_nodes_;
+ ScopedArenaVector<InterferenceNode*> physical_fp_nodes_;
// Allocated stack slot counters.
size_t num_int_spill_slots_;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 9803a7b..cfe63bd 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -40,7 +40,7 @@
return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
}
-RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
: RegisterAllocator(allocator, codegen, liveness),
@@ -81,6 +81,8 @@
reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
+RegisterAllocatorLinearScan::~RegisterAllocatorLinearScan() {}
+
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
if (interval == nullptr) return false;
bool is_core_register = (interval->GetType() != DataType::Type::kFloat64)
@@ -90,7 +92,7 @@
void RegisterAllocatorLinearScan::AllocateRegisters() {
AllocateRegistersInternal();
- RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ RegisterAllocationResolver(codegen_, liveness_)
.Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_out_slots_,
int_spill_slots_.size(),
@@ -98,7 +100,7 @@
float_spill_slots_.size(),
double_spill_slots_.size(),
catch_phi_spill_slots_,
- temp_intervals_);
+ ArrayRef<LiveInterval* const>(temp_intervals_));
if (kIsDebugBuild) {
processing_core_registers_ = true;
@@ -298,7 +300,7 @@
LiveInterval* current = instruction->GetLiveInterval();
if (current == nullptr) return;
- ArenaVector<LiveInterval*>& unhandled = core_register
+ ScopedArenaVector<LiveInterval*>& unhandled = core_register
? unhandled_core_intervals_
: unhandled_fp_intervals_;
@@ -425,7 +427,9 @@
bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const {
// To simplify unit testing, we eagerly create the array of intervals, and
// call the helper method.
- ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<LiveInterval*> intervals(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
@@ -433,7 +437,7 @@
}
}
- const ArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
+ const ScopedArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
? &physical_core_register_intervals_
: &physical_fp_register_intervals_;
for (LiveInterval* fixed : *physical_register_intervals) {
@@ -448,8 +452,12 @@
}
}
- return ValidateIntervals(intervals, GetNumberOfSpillSlots(), reserved_out_slots_, *codegen_,
- allocator_, processing_core_registers_, log_fatal_on_failure);
+ return ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
+ GetNumberOfSpillSlots(),
+ reserved_out_slots_,
+ *codegen_,
+ processing_core_registers_,
+ log_fatal_on_failure);
}
void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
@@ -813,7 +821,7 @@
// Remove interval and its other half if any. Return iterator to the following element.
static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf(
- ArenaVector<LiveInterval*>* intervals, ArenaVector<LiveInterval*>::iterator pos) {
+ ScopedArenaVector<LiveInterval*>* intervals, ScopedArenaVector<LiveInterval*>::iterator pos) {
DCHECK(intervals->begin() <= pos && pos < intervals->end());
LiveInterval* interval = *pos;
if (interval->IsLowInterval()) {
@@ -1044,7 +1052,8 @@
}
}
-void RegisterAllocatorLinearScan::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) {
+void RegisterAllocatorLinearScan::AddSorted(ScopedArenaVector<LiveInterval*>* array,
+ LiveInterval* interval) {
DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
size_t insert_at = 0;
for (size_t i = array->size(); i > 0; --i) {
@@ -1102,7 +1111,7 @@
return;
}
- ArenaVector<size_t>* spill_slots = nullptr;
+ ScopedArenaVector<size_t>* spill_slots = nullptr;
switch (interval->GetType()) {
case DataType::Type::kFloat64:
spill_slots = &double_spill_slots_;
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 9c650a4..36788b7 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
+#include "base/scoped_arena_containers.h"
#include "base/macros.h"
#include "register_allocator.h"
@@ -39,10 +39,10 @@
*/
class RegisterAllocatorLinearScan : public RegisterAllocator {
public:
- RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+ RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
- ~RegisterAllocatorLinearScan() OVERRIDE {}
+ ~RegisterAllocatorLinearScan() OVERRIDE;
void AllocateRegisters() OVERRIDE;
@@ -70,7 +70,7 @@
bool AllocateBlockedReg(LiveInterval* interval);
// Add `interval` in the given sorted list.
- static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval);
+ static void AddSorted(ScopedArenaVector<LiveInterval*>* array, LiveInterval* interval);
// Returns whether `reg` is blocked by the code generator.
bool IsBlocked(int reg) const;
@@ -107,43 +107,43 @@
// List of intervals for core registers that must be processed, ordered by start
// position. Last entry is the interval that has the lowest start position.
// This list is initially populated before doing the linear scan.
- ArenaVector<LiveInterval*> unhandled_core_intervals_;
+ ScopedArenaVector<LiveInterval*> unhandled_core_intervals_;
// List of intervals for floating-point registers. Same comments as above.
- ArenaVector<LiveInterval*> unhandled_fp_intervals_;
+ ScopedArenaVector<LiveInterval*> unhandled_fp_intervals_;
// Currently processed list of unhandled intervals. Either `unhandled_core_intervals_`
// or `unhandled_fp_intervals_`.
- ArenaVector<LiveInterval*>* unhandled_;
+ ScopedArenaVector<LiveInterval*>* unhandled_;
// List of intervals that have been processed.
- ArenaVector<LiveInterval*> handled_;
+ ScopedArenaVector<LiveInterval*> handled_;
// List of intervals that are currently active when processing a new live interval.
// That is, they have a live range that spans the start of the new interval.
- ArenaVector<LiveInterval*> active_;
+ ScopedArenaVector<LiveInterval*> active_;
// List of intervals that are currently inactive when processing a new live interval.
// That is, they have a lifetime hole that spans the start of the new interval.
- ArenaVector<LiveInterval*> inactive_;
+ ScopedArenaVector<LiveInterval*> inactive_;
// Fixed intervals for physical registers. Such intervals cover the positions
// where an instruction requires a specific register.
- ArenaVector<LiveInterval*> physical_core_register_intervals_;
- ArenaVector<LiveInterval*> physical_fp_register_intervals_;
+ ScopedArenaVector<LiveInterval*> physical_core_register_intervals_;
+ ScopedArenaVector<LiveInterval*> physical_fp_register_intervals_;
// Intervals for temporaries. Such intervals cover the positions
// where an instruction requires a temporary.
- ArenaVector<LiveInterval*> temp_intervals_;
+ ScopedArenaVector<LiveInterval*> temp_intervals_;
// The spill slots allocated for live intervals. We ensure spill slots
// are typed to avoid (1) doing moves and swaps between two different kinds
// of registers, and (2) swapping between a single stack slot and a double
// stack slot. This simplifies the parallel move resolver.
- ArenaVector<size_t> int_spill_slots_;
- ArenaVector<size_t> long_spill_slots_;
- ArenaVector<size_t> float_spill_slots_;
- ArenaVector<size_t> double_spill_slots_;
+ ScopedArenaVector<size_t> int_spill_slots_;
+ ScopedArenaVector<size_t> long_spill_slots_;
+ ScopedArenaVector<size_t> float_spill_slots_;
+ ScopedArenaVector<size_t> double_spill_slots_;
// Spill slots allocated to catch phis. This category is special-cased because
// (1) slots are allocated prior to linear scan and in reverse linear order,
@@ -151,7 +151,7 @@
size_t catch_phi_spill_slots_;
// Instructions that need a safepoint.
- ArenaVector<HInstruction*> safepoints_;
+ ScopedArenaVector<HInstruction*> safepoints_;
// True if processing core registers. False if processing floating
// point registers.
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 9c8b1df..69ed8c7 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -58,6 +58,16 @@
HGraph* BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub);
HGraph* BuildDiv(HInstruction** div);
void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy);
+
+ bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
+ const CodeGenerator& codegen) {
+ return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
+ /* number_of_spill_slots */ 0u,
+ /* number_of_out_slots */ 0u,
+ codegen,
+ /* processing_core_registers */ true,
+ /* log_fatal_on_failure */ false);
+ }
};
// This macro should include all register allocation strategies that should be tested.
@@ -74,10 +84,10 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
return register_allocator->Validate(false);
}
@@ -91,85 +101,74 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- ArenaVector<LiveInterval*> intervals(GetAllocator()->Adapter());
+ ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter());
// Test with two intervals of the same range.
{
static constexpr size_t ranges[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetAllocator(), 0));
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetAllocator(), 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with two non-intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with two non-intersecting intervals, with one with a lifetime hole.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with siblings.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
intervals[0]->SplitAt(43);
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
// Sibling of the first interval has no register allocated to it.
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[0]->GetNextSibling()->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
}
}
@@ -328,10 +327,10 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
@@ -363,7 +362,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
HXor* first_xor = graph->GetBlocks()[1]->GetFirstInstruction()->AsXor();
@@ -416,10 +415,10 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
}
@@ -442,9 +441,9 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocatorLinearScan register_allocator(GetAllocator(), &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness);
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
@@ -464,15 +463,15 @@
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
LiveInterval* interval =
- LiveInterval::MakeFixedInterval(GetAllocator(), 0, DataType::Type::kInt32);
+ LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(40, 50);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(GetAllocator(), 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(20, 30);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(GetAllocator(), 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(60, 70);
register_allocator.inactive_.push_back(interval);
@@ -570,12 +569,12 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Check that the register allocator is deterministic.
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0);
@@ -588,14 +587,14 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set the phi to a specific register, and check that the inputs get allocated
// the same register.
phi->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -608,14 +607,14 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set input1 to a specific register, and check that the phi and other input get allocated
// the same register.
input1->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -628,14 +627,14 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set input2 to a specific register, and check that the phi and other input get allocated
// the same register.
input2->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -693,11 +692,11 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
@@ -709,15 +708,15 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
// Don't use SetInAt because we are overriding an already allocated location.
ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2);
@@ -765,11 +764,11 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the registers are the same.
@@ -782,7 +781,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// check that both adds get the same register.
@@ -791,8 +790,8 @@
ASSERT_EQ(first_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
@@ -838,11 +837,11 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(GetAllocator(), &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// div on x86 requires its first input in eax and the output be the same as the first input.
@@ -892,14 +891,14 @@
LocationSummary* locations = new (GetAllocator()) LocationSummary(user, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
static constexpr size_t phi_ranges[][2] = {{20, 30}};
- BuildInterval(phi_ranges, arraysize(phi_ranges), GetAllocator(), -1, user);
+ BuildInterval(phi_ranges, arraysize(phi_ranges), GetScopedAllocator(), -1, user);
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
- LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetAllocator(), -1, one);
- first->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 8));
- first->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 7));
- first->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 6));
+ LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one);
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6));
locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
@@ -908,7 +907,7 @@
// Create an interval that conflicts with the next interval, to force the next
// interval to call `AllocateBlockedReg`.
static constexpr size_t ranges2[][2] = {{2, 4}};
- LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), GetAllocator(), -1, two);
+ LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), -1, two);
locations =
new (GetAllocator()) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
@@ -919,10 +918,10 @@
// "[0, 2(, [4, 6(" in the list of handled intervals, even though we haven't processed intervals
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
- LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetAllocator(), -1, three);
- third->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 8));
- third->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 4));
- third->uses_.push_front(*new(GetAllocator()) UsePosition(user, false, 3));
+ LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three);
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3));
locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
@@ -930,7 +929,7 @@
// Because the first part of the split interval was considered handled, this interval
// was free to allocate the same register, even though it conflicts with it.
static constexpr size_t ranges4[][2] = {{4, 6}};
- LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), GetAllocator(), -1, four);
+ LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), GetScopedAllocator(), -1, four);
locations =
new (GetAllocator()) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
@@ -938,13 +937,13 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
// Populate the instructions in the liveness object, to please the register allocator.
for (size_t i = 0; i < 32; ++i) {
liveness.instructions_from_lifetime_position_.push_back(user);
}
- RegisterAllocatorLinearScan register_allocator(GetAllocator(), &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness);
register_allocator.unhandled_core_intervals_.push_back(fourth);
register_allocator.unhandled_core_intervals_.push_back(third);
register_allocator.unhandled_core_intervals_.push_back(second);
@@ -958,13 +957,12 @@
register_allocator.LinearScan();
// Test that there is no conflicts between intervals.
- ArenaVector<LiveInterval*> intervals(GetAllocator()->Adapter());
+ ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter());
intervals.push_back(first);
intervals.push_back(second);
intervals.push_back(third);
intervals.push_back(fourth);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, GetAllocator(), true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
}
} // namespace art
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 9acf6d2..c673d54 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -564,7 +564,7 @@
}
void HScheduler::Schedule(HBasicBlock* block) {
- ScopedArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator_->Adapter(kArenaAllocScheduler));
// Build the scheduling graph.
scheduling_graph_.Clear();
@@ -595,7 +595,7 @@
}
}
- ScopedArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> initial_candidates(allocator_->Adapter(kArenaAllocScheduler));
if (kDumpDotSchedulingGraphs) {
// Remember the list of initial candidates for debug output purposes.
initial_candidates.assign(candidates_.begin(), candidates_.end());
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 493ec0b..3efd26a 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -156,14 +156,14 @@
*/
class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
public:
- SchedulingNode(HInstruction* instr, ScopedArenaAllocator* arena, bool is_scheduling_barrier)
+ SchedulingNode(HInstruction* instr, ScopedArenaAllocator* allocator, bool is_scheduling_barrier)
: latency_(0),
internal_latency_(0),
critical_path_(0),
instruction_(instr),
is_scheduling_barrier_(is_scheduling_barrier),
- data_predecessors_(arena->Adapter(kArenaAllocScheduler)),
- other_predecessors_(arena->Adapter(kArenaAllocScheduler)),
+ data_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
+ other_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
num_unscheduled_successors_(0) {
data_predecessors_.reserve(kPreallocatedPredecessors);
}
@@ -251,9 +251,9 @@
*/
class SchedulingGraph : public ValueObject {
public:
- SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* arena)
+ SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
: scheduler_(scheduler),
- arena_(arena),
+ arena_(allocator),
contains_scheduling_barrier_(false),
nodes_map_(arena_->Adapter(kArenaAllocScheduler)),
heap_location_collector_(nullptr) {}
@@ -434,16 +434,16 @@
class HScheduler {
public:
- HScheduler(ScopedArenaAllocator* arena,
+ HScheduler(ScopedArenaAllocator* allocator,
SchedulingLatencyVisitor* latency_visitor,
SchedulingNodeSelector* selector)
- : arena_(arena),
+ : allocator_(allocator),
latency_visitor_(latency_visitor),
selector_(selector),
only_optimize_loop_blocks_(true),
- scheduling_graph_(this, arena),
+ scheduling_graph_(this, allocator),
cursor_(nullptr),
- candidates_(arena_->Adapter(kArenaAllocScheduler)) {}
+ candidates_(allocator_->Adapter(kArenaAllocScheduler)) {}
virtual ~HScheduler() {}
void Schedule(HGraph* graph);
@@ -471,7 +471,7 @@
node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
}
- ScopedArenaAllocator* const arena_;
+ ScopedArenaAllocator* const allocator_;
SchedulingLatencyVisitor* const latency_visitor_;
SchedulingNodeSelector* const selector_;
bool only_optimize_loop_blocks_;
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 62cd75c..0cb8684 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -137,10 +137,10 @@
class HSchedulerARM : public HScheduler {
public:
- HSchedulerARM(ScopedArenaAllocator* arena,
+ HSchedulerARM(ScopedArenaAllocator* allocator,
SchedulingNodeSelector* selector,
SchedulingLatencyVisitorARM* arm_latency_visitor)
- : HScheduler(arena, arm_latency_visitor, selector) {}
+ : HScheduler(allocator, arm_latency_visitor, selector) {}
~HSchedulerARM() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 6682c66..32f161f 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -131,8 +131,8 @@
class HSchedulerARM64 : public HScheduler {
public:
- HSchedulerARM64(ScopedArenaAllocator* arena, SchedulingNodeSelector* selector)
- : HScheduler(arena, &arm64_latency_visitor_, selector) {}
+ HSchedulerARM64(ScopedArenaAllocator* allocator, SchedulingNodeSelector* selector)
+ : HScheduler(allocator, &arm64_latency_visitor_, selector) {}
~HSchedulerARM64() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 7e1ec70..dfc1633 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -146,8 +146,7 @@
environment->SetRawEnvAt(1, mul);
mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- SchedulingGraph scheduling_graph(scheduler, &allocator);
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
@@ -273,8 +272,7 @@
entry->AddInstruction(instr);
}
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- SchedulingGraph scheduling_graph(scheduler, &allocator);
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
HeapLocationCollector heap_location_collector(graph_);
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
@@ -352,15 +350,13 @@
#if defined(ART_ENABLE_CODEGEN_arm64)
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- ScopedArenaAllocator allocator(GetArenaStack());
- arm64::HSchedulerARM64 scheduler(&allocator, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- ScopedArenaAllocator allocator(GetArenaStack());
- arm64::HSchedulerARM64 scheduler(&allocator, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
@@ -369,16 +365,14 @@
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- ScopedArenaAllocator allocator(GetArenaStack());
- arm::HSchedulerARM scheduler(&allocator, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- ScopedArenaAllocator allocator(GetArenaStack());
- arm::HSchedulerARM scheduler(&allocator, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index fd56601..9ab7a89 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -56,7 +56,7 @@
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetAllocator(), current->GetType(), current));
+ LiveInterval::MakeInterval(allocator_, current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -74,7 +74,7 @@
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetAllocator(), current->GetType(), current));
+ LiveInterval::MakeInterval(allocator_, current->GetType(), current));
}
instructions_from_lifetime_position_.push_back(current);
current->SetLifetimePosition(lifetime_position);
@@ -88,8 +88,8 @@
void SsaLivenessAnalysis::ComputeLiveness() {
for (HBasicBlock* block : graph_->GetLinearOrder()) {
- block_infos_[block->GetBlockId()] = new (graph_->GetAllocator()) BlockInfo(
- graph_->GetAllocator(), *block, number_of_ssa_values_);
+ block_infos_[block->GetBlockId()] =
+ new (allocator_) BlockInfo(allocator_, *block, number_of_ssa_values_);
}
// Compute the live ranges, as well as the initial live_in, live_out, and kill sets.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 0d81e9d..9800af7 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -20,6 +20,8 @@
#include <iostream>
#include "base/iteration_range.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "nodes.h"
#include "utils/intrusive_forward_list.h"
@@ -32,7 +34,7 @@
class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
public:
- BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
+ BlockInfo(ScopedArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
live_in_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
live_out_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
@@ -82,7 +84,7 @@
stream << "[" << start_ << "," << end_ << ")";
}
- LiveRange* Dup(ArenaAllocator* allocator) const {
+ LiveRange* Dup(ScopedArenaAllocator* allocator) const {
return new (allocator) LiveRange(
start_, end_, next_ == nullptr ? nullptr : next_->Dup(allocator));
}
@@ -135,7 +137,7 @@
return user_->GetBlock()->GetLoopInformation();
}
- UsePosition* Clone(ArenaAllocator* allocator) const {
+ UsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) UsePosition(user_, input_index_, position_);
}
@@ -180,7 +182,7 @@
stream << position_;
}
- EnvUsePosition* Clone(ArenaAllocator* allocator) const {
+ EnvUsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) EnvUsePosition(environment_, input_index_, position_);
}
@@ -261,17 +263,19 @@
*/
class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
public:
- static LiveInterval* MakeInterval(ArenaAllocator* allocator,
+ static LiveInterval* MakeInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* instruction = nullptr) {
return new (allocator) LiveInterval(allocator, type, instruction);
}
- static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, DataType::Type type) {
+ static LiveInterval* MakeFixedInterval(ScopedArenaAllocator* allocator,
+ int reg,
+ DataType::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, true, reg, false);
}
- static LiveInterval* MakeTempInterval(ArenaAllocator* allocator, DataType::Type type) {
+ static LiveInterval* MakeTempInterval(ScopedArenaAllocator* allocator, DataType::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, false, kNoRegister, true);
}
@@ -969,7 +973,7 @@
}
private:
- LiveInterval(ArenaAllocator* allocator,
+ LiveInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* defined_by = nullptr,
bool is_fixed = false,
@@ -1082,7 +1086,7 @@
}
}
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
// for liveness (see `IsDeadAt`).
@@ -1158,15 +1162,15 @@
*/
class SsaLivenessAnalysis : public ValueObject {
public:
- SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen)
+ SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen, ScopedArenaAllocator* allocator)
: graph_(graph),
codegen_(codegen),
+ allocator_(allocator),
block_infos_(graph->GetBlocks().size(),
nullptr,
- graph->GetAllocator()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_ssa_index_(graph->GetAllocator()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_lifetime_position_(
- graph->GetAllocator()->Adapter(kArenaAllocSsaLiveness)),
+ allocator_->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_ssa_index_(allocator_->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_lifetime_position_(allocator_->Adapter(kArenaAllocSsaLiveness)),
number_of_ssa_values_(0) {
}
@@ -1285,13 +1289,18 @@
HGraph* const graph_;
CodeGenerator* const codegen_;
- ArenaVector<BlockInfo*> block_infos_;
+
+ // Use a local ScopedArenaAllocator for allocating memory.
+ // This allocator must remain alive while doing register allocation.
+ ScopedArenaAllocator* allocator_;
+
+ ScopedArenaVector<BlockInfo*> block_infos_;
// Temporary array used when computing live_in, live_out, and kill sets.
- ArenaVector<HInstruction*> instructions_from_ssa_index_;
+ ScopedArenaVector<HInstruction*> instructions_from_ssa_index_;
// Temporary array used when inserting moves in the graph.
- ArenaVector<HInstruction*> instructions_from_lifetime_position_;
+ ScopedArenaVector<HInstruction*> instructions_from_lifetime_position_;
size_t number_of_ssa_values_;
ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 82ee441..9b78e0e 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -75,7 +75,7 @@
block->AddInstruction(new (GetAllocator()) HExit());
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
std::ostringstream arg_dump;
@@ -127,7 +127,7 @@
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
EXPECT_FALSE(graph_->IsDebuggable());
@@ -201,7 +201,7 @@
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
EXPECT_FALSE(graph_->IsDebuggable());
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 70f290d..1e9a521 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -57,11 +57,11 @@
#endif
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
using vixl::aarch32::MemOperand;
using vixl::aarch32::pc;
using vixl::aarch32::r0;
- ArmVIXLAssembler assembler(arena);
+ ArmVIXLAssembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI.
@@ -98,8 +98,8 @@
#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
- Arm64Assembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
+ Arm64Assembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
@@ -137,8 +137,8 @@
#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
- MipsAssembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
+ MipsAssembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -169,8 +169,8 @@
#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
- Mips64Assembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
+ Mips64Assembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -200,9 +200,9 @@
#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
ThreadOffset32 offset) {
- X86Assembler assembler(arena);
+ X86Assembler assembler(allocator);
// All x86 trampolines call via the Thread* held in fs.
__ fs()->jmp(Address::Absolute(offset));
@@ -221,9 +221,9 @@
#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
ThreadOffset64 offset) {
- x86_64::X86_64Assembler assembler(arena);
+ x86_64::X86_64Assembler assembler(allocator);
// All x86 trampolines call via the Thread* held in gs.
__ gs()->jmp(x86_64::Address::Absolute(offset, true));
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 9c11fd3..0e73e6b 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -151,8 +151,8 @@
private:
class ArmException;
public:
- explicit ArmVIXLAssembler(ArenaAllocator* arena)
- : Assembler(arena) {
+ explicit ArmVIXLAssembler(ArenaAllocator* allocator)
+ : Assembler(allocator) {
// Use Thumb2 instruction set.
vixl_masm_.UseT32();
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 13b52e5..e239004 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -35,9 +35,9 @@
private:
class ArmException;
public:
- explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd(allocator),
+ exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
virtual ~ArmVIXLJNIMacroAssembler() {}
void FinalizeCode() OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 6b28363..e5ec24a 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -61,7 +61,7 @@
class Arm64Assembler FINAL : public Assembler {
public:
- explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
+ explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
virtual ~Arm64Assembler() {}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index c993bbf..fda87aa 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -40,9 +40,9 @@
class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
public:
- explicit Arm64JNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd(allocator),
+ exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
~Arm64JNIMacroAssembler();
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 227954e..11a9b91 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -741,8 +741,8 @@
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
- virtual Ass* CreateAssembler(ArenaAllocator* arena) {
- return new (arena) Ass(arena);
+ virtual Ass* CreateAssembler(ArenaAllocator* allocator) {
+ return new (allocator) Ass(allocator);
}
// Override this to set up any architecture-specific things, e.g., register vectors.
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 3ac6c3c..0616b35 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -47,7 +47,7 @@
template <>
MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features) {
#ifndef ART_ENABLE_CODEGEN_mips
@@ -58,19 +58,19 @@
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return MacroAsm32UniquePtr(new (arena) arm::ArmVIXLJNIMacroAssembler(arena));
+ return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return MacroAsm32UniquePtr(new (arena) mips::MipsAssembler(
- arena,
+ return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler(
+ allocator,
instruction_set_features != nullptr
? instruction_set_features->AsMipsInstructionSetFeatures()
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
- return MacroAsm32UniquePtr(new (arena) x86::X86JNIMacroAssembler(arena));
+ return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator));
#endif
default:
LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set;
@@ -82,7 +82,7 @@
template <>
MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features) {
#ifndef ART_ENABLE_CODEGEN_mips64
@@ -92,22 +92,22 @@
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena));
+ return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(
- arena,
+ return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler(
+ allocator,
instruction_set_features != nullptr
? instruction_set_features->AsMips64InstructionSetFeatures()
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena));
+ return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
#endif
default:
- UNUSED(arena);
+ UNUSED(allocator);
LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set;
UNREACHABLE();
}
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 72f1ce0..0fc1353 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -46,7 +46,7 @@
class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
public:
static std::unique_ptr<JNIMacroAssembler<kPointerSize>> Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features = nullptr);
@@ -275,7 +275,7 @@
}
protected:
- explicit JNIMacroAssemblerFwd(ArenaAllocator* arena) : asm_(arena) {}
+ explicit JNIMacroAssemblerFwd(ArenaAllocator* allocator) : asm_(allocator) {}
T asm_;
};
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index 6129680..ba95e21 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -80,8 +80,8 @@
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
- virtual Ass* CreateAssembler(ArenaAllocator* arena) {
- return new (arena) Ass(arena);
+ virtual Ass* CreateAssembler(ArenaAllocator* allocator) {
+ return new (allocator) Ass(allocator);
}
// Override this to set up any architecture-specific things, e.g., register vectors.
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index e82693a..57b3edd 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -192,16 +192,16 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k32>;
- explicit MipsAssembler(ArenaAllocator* arena,
+ explicit MipsAssembler(ArenaAllocator* allocator,
const MipsInstructionSetFeatures* instruction_set_features = nullptr)
- : Assembler(arena),
+ : Assembler(allocator),
overwriting_(false),
overwrite_location_(0),
reordering_(true),
ds_fsm_state_(kExpectingLabel),
ds_fsm_target_pc_(0),
- literals_(arena->Adapter(kArenaAllocAssembler)),
- jump_tables_(arena->Adapter(kArenaAllocAssembler)),
+ literals_(allocator->Adapter(kArenaAllocAssembler)),
+ jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index a3662db..9a69ffd 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -72,8 +72,8 @@
return " -D -bbinary -mmips:isa32r5";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get());
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index b6cb30a..b12b6b6 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -85,8 +85,8 @@
return " -D -bbinary -mmips:isa32r6";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get());
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index ee78cdb..a3787ac 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -418,14 +418,14 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k64>;
- explicit Mips64Assembler(ArenaAllocator* arena,
+ explicit Mips64Assembler(ArenaAllocator* allocator,
const Mips64InstructionSetFeatures* instruction_set_features = nullptr)
- : Assembler(arena),
+ : Assembler(allocator),
overwriting_(false),
overwrite_location_(0),
- literals_(arena->Adapter(kArenaAllocAssembler)),
- long_literals_(arena->Adapter(kArenaAllocAssembler)),
- jump_tables_(arena->Adapter(kArenaAllocAssembler)),
+ literals_(allocator->Adapter(kArenaAllocAssembler)),
+ long_literals_(allocator->Adapter(kArenaAllocAssembler)),
+ jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 16a36f9..bf0326d 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -83,8 +83,8 @@
return " -D -bbinary -mmips:isa64r6";
}
- mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips64::Mips64Assembler(arena, instruction_set_features_.get());
+ mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index dce3ad2..f3b516c 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -266,7 +266,8 @@
*/
class ConstantArea {
public:
- explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ConstantArea(ArenaAllocator* allocator)
+ : buffer_(allocator->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -307,7 +308,8 @@
class X86Assembler FINAL : public Assembler {
public:
- explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
+ explicit X86Assembler(ArenaAllocator* allocator)
+ : Assembler(allocator), constant_area_(allocator) {}
virtual ~X86Assembler() {}
/*
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 75cdd1e..56eaf19 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -34,7 +34,7 @@
class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
public:
- explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
+ explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {}
virtual ~X86JNIMacroAssembler() {}
//
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 1130444..0d24a75 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -290,7 +290,8 @@
*/
class ConstantArea {
public:
- explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ConstantArea(ArenaAllocator* allocator)
+ : buffer_(allocator->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -352,7 +353,8 @@
class X86_64Assembler FINAL : public Assembler {
public:
- explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
+ explicit X86_64Assembler(ArenaAllocator* allocator)
+ : Assembler(allocator), constant_area_(allocator) {}
virtual ~X86_64Assembler() {}
/*
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 734ed96..d1a3032 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -34,8 +34,8 @@
class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
PointerSize::k64> {
public:
- explicit X86_64JNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {}
+ explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(allocator) {}
virtual ~X86_64JNIMacroAssembler() {}
//
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index e2c2e2f..6bf56c8 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -23,9 +23,9 @@
class ArenaAllocatorTest : public testing::Test {
protected:
- size_t NumberOfArenas(ArenaAllocator* arena) {
+ size_t NumberOfArenas(ArenaAllocator* allocator) {
size_t result = 0u;
- for (Arena* a = arena->arena_head_; a != nullptr; a = a->next_) {
+ for (Arena* a = allocator->arena_head_; a != nullptr; a = a->next_) {
++result;
}
return result;
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 62b974e..2e71156 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -137,22 +137,22 @@
typedef ArenaAllocatorAdapter<U> other;
};
- explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator,
+ explicit ArenaAllocatorAdapter(ArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
: ArenaAllocatorAdapterKind(kind),
- arena_allocator_(arena_allocator) {
+ allocator_(allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit
: ArenaAllocatorAdapterKind(other),
- arena_allocator_(other.arena_allocator_) {
+ allocator_(other.allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
~ArenaAllocatorAdapter() = default;
private:
- ArenaAllocator* arena_allocator_;
+ ArenaAllocator* allocator_;
template <typename U>
friend class ArenaAllocatorAdapter;
@@ -174,14 +174,14 @@
typedef ArenaAllocatorAdapter<U> other;
};
- ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind)
+ ArenaAllocatorAdapter(ArenaAllocator* allocator, ArenaAllocKind kind)
: ArenaAllocatorAdapterKind(kind),
- arena_allocator_(arena_allocator) {
+ allocator_(allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit
: ArenaAllocatorAdapterKind(other),
- arena_allocator_(other.arena_allocator_) {
+ allocator_(other.allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
@@ -197,10 +197,10 @@
pointer allocate(size_type n,
ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
- return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
+ return allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
void deallocate(pointer p, size_type n) {
- arena_allocator_->MakeInaccessible(p, sizeof(T) * n);
+ allocator_->MakeInaccessible(p, sizeof(T) * n);
}
template <typename U, typename... Args>
@@ -213,7 +213,7 @@
}
private:
- ArenaAllocator* arena_allocator_;
+ ArenaAllocator* allocator_;
template <typename U>
friend class ArenaAllocatorAdapter;
@@ -226,7 +226,7 @@
template <typename T>
inline bool operator==(const ArenaAllocatorAdapter<T>& lhs,
const ArenaAllocatorAdapter<T>& rhs) {
- return lhs.arena_allocator_ == rhs.arena_allocator_;
+ return lhs.allocator_ == rhs.allocator_;
}
template <typename T>
diff --git a/runtime/base/arena_object.h b/runtime/base/arena_object.h
index 2d8e7d8..ed00bab 100644
--- a/runtime/base/arena_object.h
+++ b/runtime/base/arena_object.h
@@ -32,8 +32,8 @@
return allocator->Alloc(size, kAllocKind);
}
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kAllocKind);
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
@@ -56,8 +56,8 @@
return allocator->Alloc(size, kAllocKind);
}
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kAllocKind);
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 1a0eb5e..f156f52 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -145,6 +145,10 @@
explicit ScopedArenaAllocator(ArenaStack* arena_stack);
~ScopedArenaAllocator();
+ ArenaStack* GetArenaStack() const {
+ return arena_stack_;
+ }
+
void Reset();
void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 632ba43..fccaaea 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -52,6 +52,12 @@
using ScopedArenaVector = dchecked_vector<T, ScopedArenaAllocatorAdapter<T>>;
template <typename T, typename Comparator = std::less<T>>
+using ScopedArenaPriorityQueue = std::priority_queue<T, ScopedArenaVector<T>, Comparator>;
+
+template <typename T>
+using ScopedArenaStdStack = std::stack<T, ScopedArenaDeque<T>>;
+
+template <typename T, typename Comparator = std::less<T>>
using ScopedArenaSet = std::set<T, Comparator, ScopedArenaAllocatorAdapter<T>>;
template <typename K, typename V, typename Comparator = std::less<K>>
@@ -96,12 +102,12 @@
typedef ScopedArenaAllocatorAdapter<U> other;
};
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
+ : DebugStackReference(allocator),
+ DebugStackIndirectTopRef(allocator),
ArenaAllocatorAdapterKind(kind),
- arena_stack_(arena_allocator->arena_stack_) {
+ arena_stack_(allocator->arena_stack_) {
}
template <typename U>
ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit
@@ -139,12 +145,12 @@
typedef ScopedArenaAllocatorAdapter<U> other;
};
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
+ : DebugStackReference(allocator),
+ DebugStackIndirectTopRef(allocator),
ArenaAllocatorAdapterKind(kind),
- arena_stack_(arena_allocator->arena_stack_) {
+ arena_stack_(allocator->arena_stack_) {
}
template <typename U>
ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 6870b94..8889b34 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -133,10 +133,10 @@
// megamorphic and its possible types).
// If the receiver is megamorphic or is missing types the set of classes will be empty.
struct DexPcData : public ArenaObject<kArenaAllocProfile> {
- explicit DexPcData(ArenaAllocator* arena)
+ explicit DexPcData(ArenaAllocator* allocator)
: is_missing_types(false),
is_megamorphic(false),
- classes(std::less<ClassReference>(), arena->Adapter(kArenaAllocProfile)) {}
+ classes(std::less<ClassReference>(), allocator->Adapter(kArenaAllocProfile)) {}
void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx);
void SetIsMegamorphic() {
if (is_missing_types) return;
@@ -429,19 +429,19 @@
// profile_key_map_ and info_. However, it makes the profiles logic much
// simpler if we have references here as well.
struct DexFileData : public DeletableArenaObject<kArenaAllocProfile> {
- DexFileData(ArenaAllocator* arena,
+ DexFileData(ArenaAllocator* allocator,
const std::string& key,
uint32_t location_checksum,
uint16_t index,
uint32_t num_methods)
- : arena_(arena),
+ : arena_(allocator),
profile_key(key),
profile_index(index),
checksum(location_checksum),
- method_map(std::less<uint16_t>(), arena->Adapter(kArenaAllocProfile)),
- class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)),
+ method_map(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)),
+ class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
num_method_ids(num_methods),
- bitmap_storage(arena->Adapter(kArenaAllocProfile)) {
+ bitmap_storage(allocator->Adapter(kArenaAllocProfile)) {
const size_t num_bits = num_method_ids * kBitmapIndexCount;
bitmap_storage.resize(RoundUp(num_bits, kBitsPerByte) / kBitsPerByte);
if (!bitmap_storage.empty()) {
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index 704d2a8..631c6bd 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -199,8 +199,8 @@
return instance_;
}
-inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocMisc);
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kArenaAllocMisc);
}
} // namespace verifier
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index c5d8ff5..a2085a3 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -264,8 +264,8 @@
return ::operator new(size);
}
- static void* operator new(size_t size, ArenaAllocator* arena) = delete;
- static void* operator new(size_t size, ScopedArenaAllocator* arena);
+ static void* operator new(size_t size, ArenaAllocator* allocator) = delete;
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator);
enum class AssignmentType {
kBoolean,