summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_sinking.cc23
-rw-r--r--compiler/optimizing/dead_code_elimination.cc23
-rw-r--r--compiler/optimizing/nodes.cc4
3 files changed, 26 insertions, 24 deletions
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index 0abcaea719..b1d14132c4 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -150,8 +150,8 @@ static bool IsInterestingInstruction(HInstruction* instruction) {
}
static void AddInstruction(HInstruction* instruction,
- const ArenaBitVector& processed_instructions,
- const ArenaBitVector& discard_blocks,
+ BitVectorView<size_t> processed_instructions,
+ BitVectorView<size_t> discard_blocks,
ScopedArenaVector<HInstruction*>* worklist) {
// Add to the work list if the instruction is not in the list of blocks
// to discard, hasn't been already processed and is of interest.
@@ -163,8 +163,8 @@ static void AddInstruction(HInstruction* instruction,
}
static void AddInputs(HInstruction* instruction,
- const ArenaBitVector& processed_instructions,
- const ArenaBitVector& discard_blocks,
+ BitVectorView<size_t> processed_instructions,
+ BitVectorView<size_t> discard_blocks,
ScopedArenaVector<HInstruction*>* worklist) {
for (HInstruction* input : instruction->GetInputs()) {
AddInstruction(input, processed_instructions, discard_blocks, worklist);
@@ -172,8 +172,8 @@ static void AddInputs(HInstruction* instruction,
}
static void AddInputs(HBasicBlock* block,
- const ArenaBitVector& processed_instructions,
- const ArenaBitVector& discard_blocks,
+ BitVectorView<size_t> processed_instructions,
+ BitVectorView<size_t> discard_blocks,
ScopedArenaVector<HInstruction*>* worklist) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
@@ -185,7 +185,7 @@ static void AddInputs(HBasicBlock* block,
static bool ShouldFilterUse(HInstruction* instruction,
HInstruction* user,
- const ArenaBitVector& post_dominated) {
+ BitVectorView<size_t> post_dominated) {
if (instruction->IsNewInstance()) {
return (user->IsInstanceFieldSet() || user->IsConstructorFence()) &&
(user->InputAt(0) == instruction) &&
@@ -204,7 +204,7 @@ static bool ShouldFilterUse(HInstruction* instruction,
// This method is tailored to the sinking algorithm, unlike
// the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops.
static HInstruction* FindIdealPosition(HInstruction* instruction,
- const ArenaBitVector& post_dominated,
+ BitVectorView<size_t> post_dominated,
bool filter = false) {
DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
@@ -333,9 +333,10 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
size_t number_of_instructions = graph_->GetCurrentInstructionId();
ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
- ArenaBitVector processed_instructions(
- &allocator, number_of_instructions, /* expandable= */ false);
- ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
+ BitVectorView<size_t> processed_instructions =
+ ArenaBitVector::CreateFixedSize(&allocator, number_of_instructions);
+ BitVectorView<size_t> post_dominated =
+ ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size());
// Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
// TODO(ngeoffray): Getting the full set of post-dominated should be done by
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index b8cd39e77f..9955982309 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -29,21 +29,21 @@
namespace art HIDDEN {
-static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
+static void MarkReachableBlocks(HGraph* graph, BitVectorView<size_t> visited) {
// Use local allocator for allocating memory.
ScopedArenaAllocator allocator(graph->GetArenaStack());
ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocDCE));
constexpr size_t kDefaultWorlistSize = 8;
worklist.reserve(kDefaultWorlistSize);
- visited->SetBit(graph->GetEntryBlock()->GetBlockId());
+ visited.SetBit(graph->GetEntryBlock()->GetBlockId());
worklist.push_back(graph->GetEntryBlock());
while (!worklist.empty()) {
HBasicBlock* block = worklist.back();
worklist.pop_back();
int block_id = block->GetBlockId();
- DCHECK(visited->IsBitSet(block_id));
+ DCHECK(visited.IsBitSet(block_id));
ArrayRef<HBasicBlock* const> live_successors(block->GetSuccessors());
HInstruction* last_instruction = block->GetLastInstruction();
@@ -83,8 +83,8 @@ static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
for (HBasicBlock* successor : live_successors) {
// Add only those successors that have not been visited yet.
- if (!visited->IsBitSet(successor->GetBlockId())) {
- visited->SetBit(successor->GetBlockId());
+ if (!visited.IsBitSet(successor->GetBlockId())) {
+ visited.SetBit(successor->GetBlockId());
worklist.push_back(successor);
}
}
@@ -799,8 +799,8 @@ bool HDeadCodeElimination::RemoveEmptyIfs() {
// 5
// where 2, 3, and 4 are single HGoto blocks, and block 5 has Phis.
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- ArenaBitVector visited_blocks(
- &allocator, graph_->GetBlocks().size(), /*expandable=*/ false, kArenaAllocDCE);
+ BitVectorView<size_t> visited_blocks =
+ ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size(), kArenaAllocDCE);
HBasicBlock* merge_true = true_block;
visited_blocks.SetBit(merge_true->GetBlockId());
while (merge_true->IsSingleGoto()) {
@@ -822,8 +822,8 @@ bool HDeadCodeElimination::RemoveEmptyIfs() {
// Data structures to help remove now-dead instructions.
ScopedArenaQueue<HInstruction*> maybe_remove(allocator.Adapter(kArenaAllocDCE));
- ArenaBitVector visited(
- &allocator, graph_->GetCurrentInstructionId(), /*expandable=*/ false, kArenaAllocDCE);
+ BitVectorView<size_t> visited = ArenaBitVector::CreateFixedSize(
+ &allocator, graph_->GetCurrentInstructionId(), kArenaAllocDCE);
maybe_remove.push(if_instr->InputAt(0));
visited.SetBit(if_instr->GetId());
@@ -874,9 +874,10 @@ bool HDeadCodeElimination::RemoveDeadBlocks(bool force_recomputation,
ScopedArenaAllocator allocator(graph_->GetArenaStack());
// Classify blocks as reachable/unreachable.
- ArenaBitVector live_blocks(&allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
+ BitVectorView<size_t> live_blocks =
+ ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size(), kArenaAllocDCE);
- MarkReachableBlocks(graph_, &live_blocks);
+ MarkReachableBlocks(graph_, live_blocks);
bool removed_one_or_more_blocks = false;
bool rerun_dominance_and_loop_analysis = false;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fcac6cdf5e..9b5cc50e93 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -68,8 +68,8 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
// Allocate memory from local ScopedArenaAllocator.
ScopedArenaAllocator allocator(GetArenaStack());
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(
- &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder);
+ BitVectorView visiting =
+ ArenaBitVector::CreateFixedSize(&allocator, blocks_.size(), kArenaAllocGraphBuilder);
// Number of successors visited from a given node, indexed by block id.
ScopedArenaVector<size_t> successors_visited(blocks_.size(),
0u,