Use ScopedArenaAllocator for code generation.
Reuse the memory previously allocated on the ArenaStack by
optimization passes.
This CL handles only the architecture-independent codegen
and slow paths, architecture-dependent codegen allocations
shall be moved to the ScopedArenaAllocator in a follow-up.
Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
BatteryStats.dumpCheckinLocked() : 19.6MiB -> 18.5MiB (-1189KiB)
BatteryStats.dumpLocked(): 39.3MiB -> 37.0MiB (-2379KiB)
Also move definitions of functions that use bit_vector-inl.h
from bit_vector.h also to bit_vector-inl.h .
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 64312607
Change-Id: I84688c3a5a95bf90f56bd3a150bc31fedc95f29c
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 5609067..564092a 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -70,15 +70,8 @@
struct begin_tag { };
struct end_tag { };
- IndexIterator(const BitVector* bit_vector, begin_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(FindIndex(0u)) { }
-
- IndexIterator(const BitVector* bit_vector, end_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(BitSize()) { }
+ IndexIterator(const BitVector* bit_vector, begin_tag);
+ IndexIterator(const BitVector* bit_vector, end_tag);
uint32_t BitSize() const {
return storage_size_ * kWordBits;
@@ -99,13 +92,8 @@
public:
explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
- IndexIterator begin() const {
- return IndexIterator(bit_vector_, IndexIterator::begin_tag());
- }
-
- IndexIterator end() const {
- return IndexIterator(bit_vector_, IndexIterator::end_tag());
- }
+ IndexIterator begin() const;
+ IndexIterator end() const;
private:
const BitVector* const bit_vector_;