summaryrefslogtreecommitdiff
path: root/compiler/utils/swap_space.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2017-02-22 10:57:03 +0000
committer Vladimir Marko <vmarko@google.com> 2017-02-22 17:33:17 +0000
commit3e1070239a920cc94b020a621acf4daeccebb140 (patch)
tree10149549582fa984d81b737ac236c6bc931fe6db /compiler/utils/swap_space.cc
parent30e015c442c8033390c30d2f293604723c29bc75 (diff)
Avoid excessive allocation of std::set<> nodes in SwapSpace.
This does not affect the overall memory usage but avoids a lot of deallocations followed by allocation. Measured compilation of a big app using heap track: bytes allocated in total (ignoring deallocations): 4.14GB -> 4.04GB calls to allocation functions: 21662554 -> 19545823 Test: m test-art-host-gtest Test: Manually check that oat file for the big app remains identical. Bug: 34053922 Change-Id: I00568422ba5510550986e29f30bace9ae6245269
Diffstat (limited to 'compiler/utils/swap_space.cc')
-rw-r--r--compiler/utils/swap_space.cc65
1 files changed, 45 insertions, 20 deletions
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 1a8f567aa1..a1eb08e041 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -36,17 +36,17 @@ template <typename FreeBySizeSet>
static void DumpFreeMap(const FreeBySizeSet& free_by_size) {
size_t last_size = static_cast<size_t>(-1);
for (const auto& entry : free_by_size) {
- if (last_size != entry.first) {
- last_size = entry.first;
+ if (last_size != entry.size) {
+ last_size = entry.size;
LOG(INFO) << "Size " << last_size;
}
- LOG(INFO) << " 0x" << std::hex << entry.second->Start()
- << " size=" << std::dec << entry.second->size;
+ LOG(INFO) << " 0x" << std::hex << entry.free_by_start_entry->Start()
+ << " size=" << std::dec << entry.free_by_start_entry->size;
}
}
void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) {
- auto free_by_start_pos = free_by_size_pos->second;
+ auto free_by_start_pos = free_by_size_pos->free_by_start_entry;
free_by_size_.erase(free_by_size_pos);
free_by_start_.erase(free_by_start_pos);
}
@@ -89,7 +89,7 @@ static size_t CollectFree(const FreeByStartSet& free_by_start, const FreeBySizeS
// Calculate over free_by_size.
size_t sum1 = 0;
for (const auto& entry : free_by_size) {
- sum1 += entry.second->size;
+ sum1 += entry.free_by_start_entry->size;
}
// Calculate over free_by_start.
@@ -110,27 +110,52 @@ void* SwapSpace::Alloc(size_t size) {
// Check the free list for something that fits.
// TODO: Smarter implementation. Global biggest chunk, ...
- SpaceChunk old_chunk;
auto it = free_by_start_.empty()
? free_by_size_.end()
: free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
if (it != free_by_size_.end()) {
- old_chunk = *it->second;
- RemoveChunk(it);
+ auto entry = it->free_by_start_entry;
+ SpaceChunk old_chunk = *entry;
+ if (old_chunk.size == size) {
+ RemoveChunk(it);
+ } else {
+ // Try to avoid deallocating and allocating the std::set<> nodes.
+ // This would be much simpler if we could use replace() from Boost.Bimap.
+
+ // The free_by_start_ map contains disjoint intervals ordered by the `ptr`.
+ // Shrinking the interval does not affect the ordering.
+ it->free_by_start_entry->ptr += size;
+ it->free_by_start_entry->size -= size;
+
+ // The free_by_size_ map is ordered by the `size` and then `free_by_start_entry->ptr`.
+ // Adjusting the `ptr` above does not change that ordering but decreasing `size` can
+ // push the node before the previous node(s).
+ if (it == free_by_size_.begin()) {
+ it->size -= size;
+ } else {
+ auto prev = it;
+ --prev;
+ FreeBySizeEntry new_value(old_chunk.size - size, entry);
+ if (free_by_size_.key_comp()(*prev, new_value)) {
+ it->size -= size;
+ } else {
+ // Changing in place would break the std::set<> ordering, we need to remove and insert.
+ free_by_size_.erase(it);
+ free_by_size_.insert(new_value);
+ }
+ }
+ }
+ return old_chunk.ptr;
} else {
// Not a big enough free chunk, need to increase file size.
- old_chunk = NewFileChunk(size);
- }
-
- void* ret = old_chunk.ptr;
-
- if (old_chunk.size != size) {
- // Insert the remainder.
- SpaceChunk new_chunk = { old_chunk.ptr + size, old_chunk.size - size };
- InsertChunk(new_chunk);
+ SpaceChunk new_chunk = NewFileChunk(size);
+ if (new_chunk.size != size) {
+ // Insert the remainder.
+ SpaceChunk remainder = { new_chunk.ptr + size, new_chunk.size - size };
+ InsertChunk(remainder);
+ }
+ return new_chunk.ptr;
}
-
- return ret;
}
SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {