diff options
-rw-r--r-- | compiler/optimizing/code_sinking.cc | 23 | ||||
-rw-r--r-- | compiler/optimizing/dead_code_elimination.cc | 23 | ||||
-rw-r--r-- | compiler/optimizing/nodes.cc | 4 | ||||
-rw-r--r-- | libartbase/base/arena_bit_vector.h | 20 | ||||
-rw-r--r-- | libartbase/base/bit_vector.h | 69 | ||||
-rw-r--r-- | libartbase/base/bit_vector_test.cc | 88 |
6 files changed, 198 insertions, 29 deletions
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index 0abcaea719..b1d14132c4 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -150,8 +150,8 @@ static bool IsInterestingInstruction(HInstruction* instruction) { } static void AddInstruction(HInstruction* instruction, - const ArenaBitVector& processed_instructions, - const ArenaBitVector& discard_blocks, + BitVectorView<size_t> processed_instructions, + BitVectorView<size_t> discard_blocks, ScopedArenaVector<HInstruction*>* worklist) { // Add to the work list if the instruction is not in the list of blocks // to discard, hasn't been already processed and is of interest. @@ -163,8 +163,8 @@ static void AddInstruction(HInstruction* instruction, } static void AddInputs(HInstruction* instruction, - const ArenaBitVector& processed_instructions, - const ArenaBitVector& discard_blocks, + BitVectorView<size_t> processed_instructions, + BitVectorView<size_t> discard_blocks, ScopedArenaVector<HInstruction*>* worklist) { for (HInstruction* input : instruction->GetInputs()) { AddInstruction(input, processed_instructions, discard_blocks, worklist); @@ -172,8 +172,8 @@ static void AddInputs(HInstruction* instruction, } static void AddInputs(HBasicBlock* block, - const ArenaBitVector& processed_instructions, - const ArenaBitVector& discard_blocks, + BitVectorView<size_t> processed_instructions, + BitVectorView<size_t> discard_blocks, ScopedArenaVector<HInstruction*>* worklist) { for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { AddInputs(it.Current(), processed_instructions, discard_blocks, worklist); @@ -185,7 +185,7 @@ static void AddInputs(HBasicBlock* block, static bool ShouldFilterUse(HInstruction* instruction, HInstruction* user, - const ArenaBitVector& post_dominated) { + BitVectorView<size_t> post_dominated) { if (instruction->IsNewInstance()) { return (user->IsInstanceFieldSet() || user->IsConstructorFence()) && (user->InputAt(0) == instruction) && @@ -204,7 +204,7 @@ static bool ShouldFilterUse(HInstruction* instruction, // This method is tailored to the sinking algorithm, unlike // the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops. static HInstruction* FindIdealPosition(HInstruction* instruction, - const ArenaBitVector& post_dominated, + BitVectorView<size_t> post_dominated, bool filter = false) { DCHECK(!instruction->IsPhi()); // Makes no sense for Phi. @@ -333,9 +333,10 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { size_t number_of_instructions = graph_->GetCurrentInstructionId(); ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc)); - ArenaBitVector processed_instructions( - &allocator, number_of_instructions, /* expandable= */ false); - ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false); + BitVectorView<size_t> processed_instructions = + ArenaBitVector::CreateFixedSize(&allocator, number_of_instructions); + BitVectorView<size_t> post_dominated = + ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size()); // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`. // TODO(ngeoffray): Getting the full set of post-dominated should be done by diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index b8cd39e77f..9955982309 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -29,21 +29,21 @@ namespace art HIDDEN { -static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) { +static void MarkReachableBlocks(HGraph* graph, BitVectorView<size_t> visited) { // Use local allocator for allocating memory. ScopedArenaAllocator allocator(graph->GetArenaStack()); ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocDCE)); constexpr size_t kDefaultWorlistSize = 8; worklist.reserve(kDefaultWorlistSize); - visited->SetBit(graph->GetEntryBlock()->GetBlockId()); + visited.SetBit(graph->GetEntryBlock()->GetBlockId()); worklist.push_back(graph->GetEntryBlock()); while (!worklist.empty()) { HBasicBlock* block = worklist.back(); worklist.pop_back(); int block_id = block->GetBlockId(); - DCHECK(visited->IsBitSet(block_id)); + DCHECK(visited.IsBitSet(block_id)); ArrayRef<HBasicBlock* const> live_successors(block->GetSuccessors()); HInstruction* last_instruction = block->GetLastInstruction(); @@ -83,8 +83,8 @@ static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) { for (HBasicBlock* successor : live_successors) { // Add only those successors that have not been visited yet. - if (!visited->IsBitSet(successor->GetBlockId())) { - visited->SetBit(successor->GetBlockId()); + if (!visited.IsBitSet(successor->GetBlockId())) { + visited.SetBit(successor->GetBlockId()); worklist.push_back(successor); } } @@ -799,8 +799,8 @@ bool HDeadCodeElimination::RemoveEmptyIfs() { // 5 // where 2, 3, and 4 are single HGoto blocks, and block 5 has Phis. ScopedArenaAllocator allocator(graph_->GetArenaStack()); - ArenaBitVector visited_blocks( - &allocator, graph_->GetBlocks().size(), /*expandable=*/ false, kArenaAllocDCE); + BitVectorView<size_t> visited_blocks = + ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size(), kArenaAllocDCE); HBasicBlock* merge_true = true_block; visited_blocks.SetBit(merge_true->GetBlockId()); while (merge_true->IsSingleGoto()) { @@ -822,8 +822,8 @@ bool HDeadCodeElimination::RemoveEmptyIfs() { // Data structures to help remove now-dead instructions. ScopedArenaQueue<HInstruction*> maybe_remove(allocator.Adapter(kArenaAllocDCE)); - ArenaBitVector visited( - &allocator, graph_->GetCurrentInstructionId(), /*expandable=*/ false, kArenaAllocDCE); + BitVectorView<size_t> visited = ArenaBitVector::CreateFixedSize( + &allocator, graph_->GetCurrentInstructionId(), kArenaAllocDCE); maybe_remove.push(if_instr->InputAt(0)); visited.SetBit(if_instr->GetId()); @@ -874,9 +874,10 @@ bool HDeadCodeElimination::RemoveDeadBlocks(bool force_recomputation, ScopedArenaAllocator allocator(graph_->GetArenaStack()); // Classify blocks as reachable/unreachable. - ArenaBitVector live_blocks(&allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE); + BitVectorView<size_t> live_blocks = + ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size(), kArenaAllocDCE); - MarkReachableBlocks(graph_, &live_blocks); + MarkReachableBlocks(graph_, live_blocks); bool removed_one_or_more_blocks = false; bool rerun_dominance_and_loop_analysis = false; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index fcac6cdf5e..9b5cc50e93 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -68,8 +68,8 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) { // Allocate memory from local ScopedArenaAllocator. ScopedArenaAllocator allocator(GetArenaStack()); // Nodes that we're currently visiting, indexed by block id. - ArenaBitVector visiting( - &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder); + BitVectorView visiting = + ArenaBitVector::CreateFixedSize(&allocator, blocks_.size(), kArenaAllocGraphBuilder); // Number of successors visited from a given node, indexed by block id. ScopedArenaVector<size_t> successors_visited(blocks_.size(), 0u, diff --git a/libartbase/base/arena_bit_vector.h b/libartbase/base/arena_bit_vector.h index 52ba24cecf..41d4ff7458 100644 --- a/libartbase/base/arena_bit_vector.h +++ b/libartbase/base/arena_bit_vector.h @@ -17,13 +17,15 @@ #ifndef ART_LIBARTBASE_BASE_ARENA_BIT_VECTOR_H_ #define ART_LIBARTBASE_BASE_ARENA_BIT_VECTOR_H_ +#include <algorithm> +#include <cstring> + #include "arena_object.h" #include "base/arena_allocator.h" #include "bit_vector.h" namespace art { -class ArenaAllocator; class ScopedArenaAllocator; /* @@ -53,6 +55,22 @@ class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableB ArenaBitVector(ArenaBitVector&&) = default; ArenaBitVector(const ArenaBitVector&) = delete; + + template <typename StorageType = size_t, typename Allocator> + static BitVectorView<StorageType> CreateFixedSize( + Allocator* allocator, size_t bits, ArenaAllocKind kind = kArenaAllocGrowableBitMap) { + static_assert(std::is_same_v<Allocator, ArenaAllocator> || + std::is_same_v<Allocator, ScopedArenaAllocator>); + size_t num_elements = BitVectorView<StorageType>::BitsToWords(bits); + StorageType* storage = allocator->template AllocArray<StorageType>(num_elements, kind); + if (std::is_same_v<Allocator, ScopedArenaAllocator>) { + memset(storage, 0, num_elements * sizeof(StorageType)); + } else { + DCHECK_EQ(std::count(storage, storage + num_elements, static_cast<StorageType>(0)), + num_elements); + } + return {storage, bits}; + } }; } // namespace art diff --git a/libartbase/base/bit_vector.h b/libartbase/base/bit_vector.h index ec94efb09f..35a5e7c95b 100644 --- a/libartbase/base/bit_vector.h +++ b/libartbase/base/bit_vector.h @@ -20,6 +20,7 @@ #include <stdint.h> #include <iterator> +#include <limits> #include "bit_utils.h" #include "globals.h" @@ -27,7 +28,59 @@ namespace art { class Allocator; -class ArenaBitVector; + +// A bit vector view encapsulating externally-provided fixed-size storage for bits. +template <typename StorageType = size_t> +class BitVectorView { + public: + using WordType = StorageType; + static_assert(std::numeric_limits<WordType>::is_integer); + static_assert(!std::numeric_limits<WordType>::is_signed); + static constexpr size_t kWordBits = BitSizeOf<WordType>(); + static_assert(IsPowerOfTwo(kWordBits)); + + static constexpr size_t BitsToWords(size_t bits) { + return (bits + /* round up */ (kWordBits - 1)) / kWordBits; + } + + constexpr BitVectorView(WordType* storage, size_t size_in_bits) + : storage_(storage), size_in_bits_(size_in_bits) {} + + // The `BitVectorView<>` can be copied and passed to functions by value. + // The new copy shall reference the same underlying data, similarly to `std::string_view`. + BitVectorView(const BitVectorView& src) = default; + + constexpr size_t SizeInBits() const { + return size_in_bits_; + } + + void SetBit(size_t index) { + DCHECK_LT(index, size_in_bits_); + storage_[WordIndex(index)] |= BitMask(index); + } + + void ClearBit(size_t index) { + DCHECK_LT(index, size_in_bits_); + storage_[WordIndex(index)] &= ~BitMask(index); + } + + constexpr bool IsBitSet(size_t index) const { + DCHECK_LT(index, size_in_bits_); + return (storage_[WordIndex(index)] & BitMask(index)) != 0u; + } + + private: + static constexpr size_t WordIndex(size_t index) { + return index >> WhichPowerOf2(kWordBits); + } + + static constexpr WordType BitMask(size_t index) { + return static_cast<WordType>(1) << (index % kWordBits); + } + + WordType* storage_; + size_t size_in_bits_; +}; /* * Expanding bitmap. Bits are numbered starting from zero. All operations on a BitVector are @@ -154,7 +207,7 @@ class BitVector { if (idx >= storage_size_ * kWordBits) { EnsureSize(idx); } - storage_[WordIndex(idx)] |= BitMask(idx); + AsView().SetBit(idx); } // Mark the specified bit as "unset". @@ -162,7 +215,7 @@ class BitVector { // If the index is over the size, we don't have to do anything, it is cleared. if (idx < storage_size_ * kWordBits) { // Otherwise, go ahead and clear it. - storage_[WordIndex(idx)] &= ~BitMask(idx); + AsView().ClearBit(idx); } } @@ -170,7 +223,7 @@ class BitVector { bool IsBitSet(uint32_t idx) const { // If the index is over the size, whether it is expandable or not, this bit does not exist: // thus it is not set. - return (idx < (storage_size_ * kWordBits)) && IsBitSet(storage_, idx); + return (idx < (storage_size_ * kWordBits)) && AsView().IsBitSet(idx); } // Mark all bits bit as "clear". @@ -291,6 +344,14 @@ class BitVector { */ void DumpHelper(const char* prefix, std::ostringstream& buffer) const; + BitVectorView<uint32_t> AsView() { + return {storage_, storage_size_ * kWordBits}; + } + + BitVectorView<const uint32_t> AsView() const { + return {storage_, storage_size_ * kWordBits}; + } + // Ensure there is space for a bit at idx. void EnsureSize(uint32_t idx); diff --git a/libartbase/base/bit_vector_test.cc b/libartbase/base/bit_vector_test.cc index 244cff1cb4..15803edd17 100644 --- a/libartbase/base/bit_vector_test.cc +++ b/libartbase/base/bit_vector_test.cc @@ -25,6 +25,94 @@ namespace art { +template <typename StorageType, StorageType kWord0, StorageType kWord1> +void TestBitVectorViewSetBitAndClearBit() { + static constexpr StorageType kStorage[2] = { kWord0, kWord1 }; + static constexpr size_t kSizeInBits = 2 * BitSizeOf<StorageType>(); + static constexpr BitVectorView<const StorageType> kRbv(kStorage, kSizeInBits); + auto get_bit_from_params = [](size_t index) constexpr { + StorageType word = (index < BitSizeOf<StorageType>()) ? kWord0 : kWord1; + size_t shift = index % BitSizeOf<StorageType>(); + return (word & (static_cast<StorageType>(1u) << shift)) != 0u; + }; + auto verify_is_bit_set = [get_bit_from_params]() constexpr { + for (size_t index = 0; index != kSizeInBits; ++index) { + // If the `CHECK_EQ()` fails, the `static_assert` evaluation fails at compile time. + CHECK_EQ(get_bit_from_params(index), kRbv.IsBitSet(index)) << index; + } + return true; + }; + static_assert(verify_is_bit_set()); + + auto verify_size_in_bits = []() constexpr { + for (size_t size = 0; size != kSizeInBits; ++size) { + // If the `CHECK_EQ()` fails, the `static_assert` evaluation fails at compile time. + CHECK_EQ(size, BitVectorView(kStorage, size).SizeInBits()); + } + return true; + }; + static_assert(verify_size_in_bits()); + + StorageType storage[2] = {0u, 0u}; + size_t size_in_bits = 2 * BitSizeOf<StorageType>(); + BitVectorView<StorageType> rbv(storage, size_in_bits); + for (size_t index = 0; index != size_in_bits; ++index) { + ASSERT_FALSE(rbv.IsBitSet(index)); + } + // Set one bit at a time, then clear it. + for (size_t bit_to_set = 0; bit_to_set != size_in_bits; ++bit_to_set) { + rbv.SetBit(bit_to_set); + for (size_t index = 0; index != size_in_bits; ++index) { + ASSERT_EQ(index == bit_to_set, rbv.IsBitSet(index)); + } + rbv.ClearBit(bit_to_set); + for (size_t index = 0; index != size_in_bits; ++index) { + ASSERT_FALSE(rbv.IsBitSet(index)); + } + } + // Set bits for `kWord0` and `kWord1`. + for (size_t index = 0; index != size_in_bits; ++index) { + if (get_bit_from_params(index)) { + rbv.SetBit(index); + } + } + ASSERT_EQ(kWord0, storage[0]); + ASSERT_EQ(kWord1, storage[1]); + // Clear all bits that are already clear. + for (size_t index = 0; index != size_in_bits; ++index) { + if (!get_bit_from_params(index)) { + rbv.ClearBit(index); + } + } + ASSERT_EQ(kWord0, storage[0]); + ASSERT_EQ(kWord1, storage[1]); + // Clear all bits that are set. + for (size_t index = 0; index != size_in_bits; ++index) { + if (get_bit_from_params(index)) { + rbv.ClearBit(index); + } + } + ASSERT_EQ(0u, storage[0]); + ASSERT_EQ(0u, storage[1]); +} + +TEST(BitVectorView, Uint32T) { + TestBitVectorViewSetBitAndClearBit<uint32_t, 0x12345678u, 0x87654321u>(); +} + +TEST(BitVectorView, Uint64T) { + TestBitVectorViewSetBitAndClearBit<uint64_t, + UINT64_C(0x1234567890abcdef), + UINT64_C(0xfedcba0987654321)>(); +} + +TEST(BitVectorView, SizeT) { + // Note: The constants below are truncated on 32-bit architectures. + TestBitVectorViewSetBitAndClearBit<size_t, + static_cast<size_t>(UINT64_C(0xfedcba0987654321)), + static_cast<size_t>(UINT64_C(0x1234567890abcdef))>(); +} + TEST(BitVector, Test) { const size_t kBits = 32; |