summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.h8
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h8
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc31
-rw-r--r--compiler/optimizing/dead_code_elimination.cc15
-rw-r--r--compiler/optimizing/instruction_simplifier.cc6
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc8
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc12
-rw-r--r--compiler/optimizing/intrinsics_riscv64.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc12
-rw-r--r--compiler/optimizing/locations.cc11
-rw-r--r--compiler/optimizing/locations.h19
-rw-r--r--compiler/optimizing/nodes.cc19
-rw-r--r--compiler/optimizing/nodes.h1
-rw-r--r--compiler/optimizing/reference_type_propagation.cc2
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc6
17 files changed, 88 insertions, 76 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 5c2e4dbc51..d63b0abcc7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -645,7 +645,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary(
bool is_get = field_access->IsUnresolvedInstanceFieldGet()
|| field_access->IsUnresolvedStaticFieldGet();
- ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 4566cdf0ca..de13814eaf 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -186,11 +186,12 @@ class JumpTableARM64 : public DeletableArenaObject<kArenaAllocSwitchTable> {
public:
using VIXLInt32Literal = vixl::aarch64::Literal<int32_t>;
- explicit JumpTableARM64(HPackedSwitch* switch_instr)
+ JumpTableARM64(HPackedSwitch* switch_instr, ArenaAllocator* allocator)
: switch_instr_(switch_instr),
table_start_(),
- jump_targets_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+ jump_targets_(allocator->Adapter(kArenaAllocCodeGenerator)) {
uint32_t num_entries = switch_instr_->GetNumEntries();
+ jump_targets_.reserve(num_entries);
for (uint32_t i = 0; i < num_entries; i++) {
VIXLInt32Literal* lit = new VIXLInt32Literal(0);
jump_targets_.emplace_back(lit);
@@ -765,7 +766,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; }
JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
+ jump_tables_.emplace_back(new (allocator) JumpTableARM64(switch_instr, allocator));
return jump_tables_.back().get();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 2e20591c98..bbc519fcf9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -204,11 +204,12 @@ ALWAYS_INLINE inline StoreOperandType GetStoreOperandType(DataType::Type type) {
class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
public:
- explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
+ JumpTableARMVIXL(HPackedSwitch* switch_instr, ArenaAllocator* allocator)
: switch_instr_(switch_instr),
table_start_(),
- bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+ bb_addresses_(allocator->Adapter(kArenaAllocCodeGenerator)) {
uint32_t num_entries = switch_instr_->GetNumEntries();
+ bb_addresses_.reserve(num_entries);
for (uint32_t i = 0; i < num_entries; i++) {
VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced);
bb_addresses_.emplace_back(lit);
@@ -883,7 +884,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
void GenerateExplicitNullCheck(HNullCheck* instruction) override;
JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
+ jump_tables_.emplace_back(new (allocator) JumpTableARMVIXL(switch_instr, allocator));
return jump_tables_.back().get();
}
void EmitJumpTables();
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 71fc39a956..c89ec171d9 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -33,7 +33,7 @@ class CFREVisitor final : public HGraphVisitor {
: HGraphVisitor(graph),
scoped_allocator_(graph->GetArenaStack()),
candidate_fences_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
- candidate_fence_targets_(std::nullopt),
+ candidate_fence_targets_(),
stats_(stats) {}
void VisitBasicBlock(HBasicBlock* block) override {
@@ -48,14 +48,17 @@ class CFREVisitor final : public HGraphVisitor {
void VisitConstructorFence(HConstructorFence* constructor_fence) override {
candidate_fences_.push_back(constructor_fence);
- if (!candidate_fence_targets_.has_value()) {
+ if (candidate_fence_targets_.SizeInBits() == 0u) {
size_t number_of_instructions = GetGraph()->GetCurrentInstructionId();
- candidate_fence_targets_.emplace(
- &scoped_allocator_, number_of_instructions, /*expandable=*/ false, kArenaAllocCFRE);
+ candidate_fence_targets_ = ArenaBitVector::CreateFixedSize(
+ &scoped_allocator_, number_of_instructions, kArenaAllocCFRE);
+ } else {
+ DCHECK_EQ(candidate_fence_targets_.SizeInBits(),
+ static_cast<size_t>(GetGraph()->GetCurrentInstructionId()));
}
for (HInstruction* input : constructor_fence->GetInputs()) {
- candidate_fence_targets_->SetBit(input->GetId());
+ candidate_fence_targets_.SetBit(input->GetId());
}
}
@@ -162,8 +165,7 @@ class CFREVisitor final : public HGraphVisitor {
void VisitSetLocation([[maybe_unused]] HInstruction* inst, HInstruction* store_input) {
if (candidate_fences_.empty()) {
// There is no need to look at inputs if there are no candidate fence targets.
- DCHECK_IMPLIES(candidate_fence_targets_.has_value(),
- !candidate_fence_targets_->IsAnyBitSet());
+ DCHECK(!candidate_fence_targets_.IsAnyBitSet());
return;
}
// An object is considered "published" if it's stored onto the heap.
@@ -179,8 +181,7 @@ class CFREVisitor final : public HGraphVisitor {
bool HasInterestingPublishTargetAsInput(HInstruction* inst) {
if (candidate_fences_.empty()) {
// There is no need to look at inputs if there are no candidate fence targets.
- DCHECK_IMPLIES(candidate_fence_targets_.has_value(),
- !candidate_fence_targets_->IsAnyBitSet());
+ DCHECK(!candidate_fence_targets_.IsAnyBitSet());
return false;
}
for (HInstruction* input : inst->GetInputs()) {
@@ -221,15 +222,17 @@ class CFREVisitor final : public HGraphVisitor {
// there is no benefit to this extra complexity unless we also reordered
// the stores to come later.
candidate_fences_.clear();
- DCHECK(candidate_fence_targets_.has_value());
- candidate_fence_targets_->ClearAllBits();
+ DCHECK_EQ(candidate_fence_targets_.SizeInBits(),
+ static_cast<size_t>(GetGraph()->GetCurrentInstructionId()));
+ candidate_fence_targets_.ClearAllBits();
}
// A publishing 'store' is only interesting if the value being stored
// is one of the fence `targets` in `candidate_fences`.
bool IsInterestingPublishTarget(HInstruction* store_input) const {
- DCHECK(candidate_fence_targets_.has_value());
- return candidate_fence_targets_->IsBitSet(store_input->GetId());
+ DCHECK_EQ(candidate_fence_targets_.SizeInBits(),
+ static_cast<size_t>(GetGraph()->GetCurrentInstructionId()));
+ return candidate_fence_targets_.IsBitSet(store_input->GetId());
}
// Phase-local heap memory allocator for CFRE optimizer.
@@ -245,7 +248,7 @@ class CFREVisitor final : public HGraphVisitor {
// Stores a set of the fence targets, to allow faster lookup of whether
// a detected publish is a target of one of the candidate fences.
- std::optional<ArenaBitVector> candidate_fence_targets_;
+ BitVectorView<size_t> candidate_fence_targets_;
// Used to record stats about the optimization.
OptimizingCompilerStats* const stats_;
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9955982309..c367a20a06 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -201,7 +201,7 @@ static bool RemoveNonNullControlDependences(HBasicBlock* block, HBasicBlock* thr
user_block != throws &&
block->Dominates(user_block)) {
if (bound == nullptr) {
- bound = new (obj->GetBlock()->GetGraph()->GetAllocator()) HBoundType(obj);
+ bound = new (block->GetGraph()->GetAllocator()) HBoundType(obj);
bound->SetUpperBound(ti, /*can_be_null*/ false);
bound->SetReferenceTypeInfo(ti);
bound->SetCanBeNull(false);
@@ -591,14 +591,15 @@ void HDeadCodeElimination::ConnectSuccessiveBlocks() {
struct HDeadCodeElimination::TryBelongingInformation {
TryBelongingInformation(HGraph* graph, ScopedArenaAllocator* allocator)
- : blocks_in_try(allocator, graph->GetBlocks().size(), /*expandable=*/false, kArenaAllocDCE),
- coalesced_try_entries(
- allocator, graph->GetBlocks().size(), /*expandable=*/false, kArenaAllocDCE) {}
+ : blocks_in_try(ArenaBitVector::CreateFixedSize(
+ allocator, graph->GetBlocks().size(), kArenaAllocDCE)),
+ coalesced_try_entries(ArenaBitVector::CreateFixedSize(
+ allocator, graph->GetBlocks().size(), kArenaAllocDCE)) {}
// Which blocks belong in the try.
- ArenaBitVector blocks_in_try;
+ BitVectorView<size_t> blocks_in_try;
// Which other try entries are referencing this same try.
- ArenaBitVector coalesced_try_entries;
+ BitVectorView<size_t> coalesced_try_entries;
};
bool HDeadCodeElimination::CanPerformTryRemoval(const TryBelongingInformation& try_belonging_info) {
@@ -725,7 +726,7 @@ bool HDeadCodeElimination::RemoveUnneededTries() {
if (try_boundary->HasSameExceptionHandlersAs(*other_try_boundary)) {
// Merge the entries as they are really the same one.
// Block merging.
- it->second.blocks_in_try.Union(&other_it->second.blocks_in_try);
+ it->second.blocks_in_try.Union(other_it->second.blocks_in_try);
// Add the coalesced try entry to update it too.
it->second.coalesced_try_entries.SetBit(other_block->GetBlockId());
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 26efefa2d8..4ef0fc907a 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -301,7 +301,7 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul)
return false;
}
- ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
if (!mul->HasOnlyOneNonEnvironmentUse()) {
return false;
}
@@ -3637,8 +3637,8 @@ bool InstructionSimplifierVisitor::TrySubtractionChainSimplification(
bool is_x_negated = is_y_negated ^ ((x == right) && y->IsSub());
int64_t const3_val = ComputeAddition(type, const1_val, const2_val);
HBasicBlock* block = instruction->GetBlock();
- HConstant* const3 = block->GetGraph()->GetConstant(type, const3_val);
- ArenaAllocator* allocator = instruction->GetAllocator();
+ HConstant* const3 = GetGraph()->GetConstant(type, const3_val);
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
HInstruction* z;
if (is_x_negated) {
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 5323ae2445..edd454c93e 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -198,7 +198,7 @@ void IntrinsicVisitor::CreateReferenceRefersToLocations(HInvoke* invoke, CodeGen
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 98aa5600b4..3eaaa6cb94 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2961,9 +2961,8 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopyChar(HInvoke* invoke) {
}
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations =
- new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(char[] src, int src_pos, char[] dst, int dst_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, LocationForSystemArrayCopyInput(invoke->InputAt(1)));
@@ -4925,7 +4924,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
DataType::Type return_type = invoke->GetType();
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
@@ -5976,8 +5975,7 @@ void VarHandleSlowPathARM64::EmitByteArrayViewCode(CodeGenerator* codegen_in) {
}
void IntrinsicLocationsBuilderARM64::VisitMethodHandleInvokeExact(HInvoke* invoke) {
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
- LocationSummary* locations = new (allocator)
+ LocationSummary* locations = new (allocator_)
LocationSummary(invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeDexCallingConventionVisitorARM64 calling_convention;
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index a6f6eb0ba0..9e60090a03 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2690,7 +2690,7 @@ static void CreateUnsafeGetLocations(HInvoke* invoke,
DataType::Type type,
bool atomic) {
bool can_call = codegen->EmitReadBarrier() && IsUnsafeGetReference(invoke);
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -3101,7 +3101,7 @@ static void CreateUnsafePutLocations(HInvoke* invoke,
CodeGeneratorARMVIXL* codegen,
DataType::Type type,
bool atomic) {
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
@@ -3115,7 +3115,7 @@ static void CreateUnsafePutAbsoluteLocations(HInvoke* invoke,
CodeGeneratorARMVIXL* codegen,
DataType::Type type,
bool atomic) {
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
@@ -3752,7 +3752,7 @@ class ReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL {
static void CreateUnsafeCASLocations(HInvoke* invoke, CodeGeneratorARMVIXL* codegen) {
const bool can_call = codegen->EmitReadBarrier() && IsUnsafeCASReference(invoke);
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -4046,7 +4046,7 @@ static void CreateUnsafeGetAndUpdateLocations(HInvoke* invoke,
DataType::Type type,
GetAndUpdateOp get_and_update_op) {
const bool can_call = codegen->EmitReadBarrier() && IsUnsafeGetAndSetReference(invoke);
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -4653,7 +4653,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
DataType::Type return_type = invoke->GetType();
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index cc0f114c56..4c56800920 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -3858,7 +3858,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
DataType::Type return_type = invoke->GetType();
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5a6b8832c4..5710ce42bb 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -4117,7 +4117,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke, CodeGeneratorX86* codeg
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(
invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
@@ -4253,7 +4253,7 @@ static void CreateVarHandleSetLocations(HInvoke* invoke, CodeGeneratorX86* codeg
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(
invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
@@ -4430,7 +4430,7 @@ static void CreateVarHandleGetAndSetLocations(HInvoke* invoke, CodeGeneratorX86*
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(
invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->AddRegisterTemps(2);
@@ -4630,7 +4630,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke,
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(
invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->AddRegisterTemps(2);
@@ -4810,7 +4810,7 @@ static void CreateVarHandleGetAndAddLocations(HInvoke* invoke, CodeGeneratorX86*
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(
invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
locations->AddRegisterTemps(2);
@@ -4985,7 +4985,7 @@ static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke, CodeGenerat
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(
invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// We need a byte register temp to store the result of the bitwise operation
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 4189bc4053..f419263f62 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -26,17 +26,24 @@ namespace art HIDDEN {
// Verify that Location is trivially copyable.
static_assert(std::is_trivially_copyable<Location>::value, "Location should be trivially copyable");
+static inline ArrayRef<Location> AllocateInputLocations(HInstruction* instruction,
+ ArenaAllocator* allocator) {
+ size_t input_count = instruction->InputCount();
+ Location* array = allocator->AllocArray<Location>(input_count, kArenaAllocLocationSummary);
+ return {array, input_count};
+}
+
LocationSummary::LocationSummary(HInstruction* instruction,
CallKind call_kind,
bool intrinsified,
ArenaAllocator* allocator)
- : inputs_(instruction->InputCount(), allocator->Adapter(kArenaAllocLocationSummary)),
+ : inputs_(AllocateInputLocations(instruction, allocator)),
temps_(allocator->Adapter(kArenaAllocLocationSummary)),
+ stack_mask_(nullptr),
call_kind_(call_kind),
intrinsified_(intrinsified),
has_custom_slow_path_calling_convention_(false),
output_overlaps_(Location::kOutputOverlap),
- stack_mask_(nullptr),
register_mask_(0),
live_registers_(RegisterSet::Empty()),
custom_slow_path_caller_saves_(RegisterSet::Empty()) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 2209f05c0b..b8fe29c621 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -19,6 +19,7 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/bit_field.h"
#include "base/bit_utils.h"
#include "base/bit_vector.h"
@@ -39,7 +40,7 @@ std::ostream& operator<<(std::ostream& os, const Location& location);
*/
class Location : public ValueObject {
public:
- enum OutputOverlap {
+ enum OutputOverlap : uint8_t {
// The liveness of the output overlaps the liveness of one or
// several input(s); the register allocator cannot reuse an
// input's location for the output's location.
@@ -534,7 +535,7 @@ static constexpr bool kIntrinsified = true;
*/
class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
public:
- enum CallKind {
+ enum CallKind : uint8_t {
kNoCall,
kCallOnMainAndSlowPath,
kCallOnSlowPath,
@@ -713,8 +714,13 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
bool intrinsified,
ArenaAllocator* allocator);
- ArenaVector<Location> inputs_;
+ ArrayRef<Location> inputs_;
ArenaVector<Location> temps_;
+ Location output_;
+
+ // Mask of objects that live in the stack.
+ BitVector* stack_mask_;
+
const CallKind call_kind_;
// Whether these are locations for an intrinsified call.
const bool intrinsified_;
@@ -723,10 +729,6 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
// Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
// share the same register as the inputs.
Location::OutputOverlap output_overlaps_;
- Location output_;
-
- // Mask of objects that live in the stack.
- BitVector* stack_mask_;
// Mask of objects that live in register.
uint32_t register_mask_;
@@ -734,7 +736,8 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
// Registers that are in use at this position.
RegisterSet live_registers_;
- // Custom slow path caller saves. Valid only if indicated by slow_path_calling_convention_.
+ // Custom slow path caller saves. Valid only if indicated by
+ // `has_custom_slow_path_calling_convention_`.
RegisterSet custom_slow_path_caller_saves_;
ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 752e8b10d1..6b74e7246e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1457,18 +1457,17 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator,
HInstruction* replacement,
bool strictly_dominated) {
HBasicBlock* dominator_block = dominator->GetBlock();
- std::optional<ArenaBitVector> visited_blocks;
+ BitVectorView<size_t> visited_blocks;
// Lazily compute the dominated blocks to faster calculation of domination afterwards.
auto maybe_generate_visited_blocks = [&visited_blocks, this, dominator_block]() {
- if (visited_blocks.has_value()) {
+ if (visited_blocks.SizeInBits() != 0u) {
+ DCHECK_EQ(visited_blocks.SizeInBits(), GetBlock()->GetGraph()->GetBlocks().size());
return;
}
HGraph* graph = GetBlock()->GetGraph();
- visited_blocks.emplace(graph->GetAllocator(),
- graph->GetBlocks().size(),
- /* expandable= */ false,
- kArenaAllocMisc);
+ visited_blocks = ArenaBitVector::CreateFixedSize(
+ graph->GetAllocator(), graph->GetBlocks().size(), kArenaAllocMisc);
ScopedArenaAllocator allocator(graph->GetArenaStack());
ScopedArenaQueue<const HBasicBlock*> worklist(allocator.Adapter(kArenaAllocMisc));
worklist.push(dominator_block);
@@ -1476,9 +1475,9 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator,
while (!worklist.empty()) {
const HBasicBlock* current = worklist.front();
worklist.pop();
- visited_blocks->SetBit(current->GetBlockId());
+ visited_blocks.SetBit(current->GetBlockId());
for (HBasicBlock* dominated : current->GetDominatedBlocks()) {
- if (visited_blocks->IsBitSet(dominated->GetBlockId())) {
+ if (visited_blocks.IsBitSet(dominated->GetBlockId())) {
continue;
}
worklist.push(dominated);
@@ -1501,7 +1500,7 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator,
} else {
// Block domination.
maybe_generate_visited_blocks();
- dominated = visited_blocks->IsBitSet(block->GetBlockId());
+ dominated = visited_blocks.IsBitSet(block->GetBlockId());
}
if (dominated) {
@@ -1512,7 +1511,7 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator,
// for their inputs.
HBasicBlock* predecessor = block->GetPredecessors()[index];
maybe_generate_visited_blocks();
- if (visited_blocks->IsBitSet(predecessor->GetBlockId())) {
+ if (visited_blocks.IsBitSet(predecessor->GetBlockId())) {
user->ReplaceInput(replacement, index);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 772828d9ef..bcf27ae9fa 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2113,7 +2113,6 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
HInstruction* GetPreviousDisregardingMoves() const;
HBasicBlock* GetBlock() const { return block_; }
- ArenaAllocator* GetAllocator() const { return block_->GetGraph()->GetAllocator(); }
void SetBlock(HBasicBlock* block) { block_ = block; }
bool IsInBlock() const { return block_ != nullptr; }
bool IsInLoop() const { return block_->IsInLoop(); }
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 9867e11f35..0417f04c12 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -218,7 +218,7 @@ static void BoundTypeIn(HInstruction* receiver,
: start_block->GetFirstInstruction();
if (ShouldCreateBoundType(
insert_point, receiver, class_rti, start_instruction, start_block)) {
- bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
+ bound_type = new (start_block->GetGraph()->GetAllocator()) HBoundType(receiver);
bound_type->SetUpperBound(class_rti, /* can_be_null= */ false);
start_block->InsertInstructionBefore(bound_type, insert_point);
// To comply with the RTP algorithm, don't type the bound type just yet, it will
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 0796acc687..b2a3846dc4 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -139,10 +139,8 @@ bool SsaRedundantPhiElimination::Run() {
}
}
- ArenaBitVector visited_phis_in_cycle(&allocator,
- graph_->GetCurrentInstructionId(),
- /* expandable= */ false,
- kArenaAllocSsaPhiElimination);
+ BitVectorView<size_t> visited_phis_in_cycle = ArenaBitVector::CreateFixedSize(
+ &allocator, graph_->GetCurrentInstructionId(), kArenaAllocSsaPhiElimination);
ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
while (!worklist.empty()) {