summaryrefslogtreecommitdiff
path: root/compiler/optimizing/code_sinking.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2025-02-19 09:55:20 +0000
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2025-02-20 04:26:43 -0800
commit16a42183aff1275ad238bf5fb2e6416ecebc16cd (patch)
tree3527e3cbb618a34ae511a9463f987b21755e2bb2 /compiler/optimizing/code_sinking.cc
parentc6aa6f7f1c7ea91b512d98caf493b8ad93e983b2 (diff)
Introduce `BitVectorView<>`.
Initially implement only simple bit getter and setters and use the new class to avoid overheads of `ArenaBitVector` in a few places. Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Bug: 331194861 Change-Id: Ie29dfcd02286770e07131e43b65e6e9fb044a924
Diffstat (limited to 'compiler/optimizing/code_sinking.cc')
-rw-r--r--compiler/optimizing/code_sinking.cc23
1 files changed, 12 insertions, 11 deletions
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index 0abcaea719..b1d14132c4 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -150,8 +150,8 @@ static bool IsInterestingInstruction(HInstruction* instruction) {
}
static void AddInstruction(HInstruction* instruction,
- const ArenaBitVector& processed_instructions,
- const ArenaBitVector& discard_blocks,
+ BitVectorView<size_t> processed_instructions,
+ BitVectorView<size_t> discard_blocks,
ScopedArenaVector<HInstruction*>* worklist) {
// Add to the work list if the instruction is not in the list of blocks
// to discard, hasn't been already processed and is of interest.
@@ -163,8 +163,8 @@ static void AddInstruction(HInstruction* instruction,
}
static void AddInputs(HInstruction* instruction,
- const ArenaBitVector& processed_instructions,
- const ArenaBitVector& discard_blocks,
+ BitVectorView<size_t> processed_instructions,
+ BitVectorView<size_t> discard_blocks,
ScopedArenaVector<HInstruction*>* worklist) {
for (HInstruction* input : instruction->GetInputs()) {
AddInstruction(input, processed_instructions, discard_blocks, worklist);
@@ -172,8 +172,8 @@ static void AddInputs(HInstruction* instruction,
}
static void AddInputs(HBasicBlock* block,
- const ArenaBitVector& processed_instructions,
- const ArenaBitVector& discard_blocks,
+ BitVectorView<size_t> processed_instructions,
+ BitVectorView<size_t> discard_blocks,
ScopedArenaVector<HInstruction*>* worklist) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
@@ -185,7 +185,7 @@ static void AddInputs(HBasicBlock* block,
static bool ShouldFilterUse(HInstruction* instruction,
HInstruction* user,
- const ArenaBitVector& post_dominated) {
+ BitVectorView<size_t> post_dominated) {
if (instruction->IsNewInstance()) {
return (user->IsInstanceFieldSet() || user->IsConstructorFence()) &&
(user->InputAt(0) == instruction) &&
@@ -204,7 +204,7 @@ static bool ShouldFilterUse(HInstruction* instruction,
// This method is tailored to the sinking algorithm, unlike
// the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops.
static HInstruction* FindIdealPosition(HInstruction* instruction,
- const ArenaBitVector& post_dominated,
+ BitVectorView<size_t> post_dominated,
bool filter = false) {
DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
@@ -333,9 +333,10 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
size_t number_of_instructions = graph_->GetCurrentInstructionId();
ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
- ArenaBitVector processed_instructions(
- &allocator, number_of_instructions, /* expandable= */ false);
- ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
+ BitVectorView<size_t> processed_instructions =
+ ArenaBitVector::CreateFixedSize(&allocator, number_of_instructions);
+ BitVectorView<size_t> post_dominated =
+ ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size());
// Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
// TODO(ngeoffray): Getting the full set of post-dominated should be done by