summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2019-02-05 16:35:01 +0000
committer Vladimir Marko <vmarko@google.com> 2019-02-05 16:35:01 +0000
commit22e22236cc12c977c01a8d10d4cc0906fc5ead2e (patch)
tree1df2e62e339e51b58e682df9a668ff5f3099dfe5
parent7909e1e4cc741b38b25328e2f9077beb7ecd018b (diff)
ART: Use C++17 extract/modify/insert pattern.
Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Bug: 123750182 Change-Id: I2721948a566cc11d351324d173b4e1c8e5e54f53
-rw-r--r--compiler/utils/swap_space.cc26
-rw-r--r--libartbase/base/mem_map.cc12
2 files changed, 11 insertions, 27 deletions
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index dee83d1c71..841ff1c58d 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -115,12 +115,11 @@ void* SwapSpace::Alloc(size_t size) {
? free_by_size_.end()
: free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
if (it != free_by_size_.end()) {
- auto entry = it->free_by_start_entry;
- SpaceChunk old_chunk = *entry;
+ SpaceChunk old_chunk = *it->free_by_start_entry;
if (old_chunk.size == size) {
RemoveChunk(it);
} else {
- // Try to avoid deallocating and allocating the std::set<> nodes.
+ // Avoid deallocating and allocating the std::set<> nodes.
// This would be much simpler if we could use replace() from Boost.Bimap.
// The free_by_start_ map contains disjoint intervals ordered by the `ptr`.
@@ -128,24 +127,9 @@ void* SwapSpace::Alloc(size_t size) {
it->free_by_start_entry->ptr += size;
it->free_by_start_entry->size -= size;
- // The free_by_size_ map is ordered by the `size` and then `free_by_start_entry->ptr`.
- // Adjusting the `ptr` above does not change that ordering but decreasing `size` can
- // push the node before the previous node(s).
- if (it == free_by_size_.begin()) {
- it->size -= size;
- } else {
- auto prev = it;
- --prev;
- FreeBySizeEntry new_value(old_chunk.size - size, entry);
- if (free_by_size_.key_comp()(*prev, new_value)) {
- it->size -= size;
- } else {
- // Changing in place would break the std::set<> ordering, we need to remove and insert.
- // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
- free_by_size_.erase(it);
- free_by_size_.insert(new_value);
- }
- }
+ auto node = free_by_size_.extract(it);
+ node.value().size -= size;
+ free_by_size_.insert(std::move(node));
}
return old_chunk.ptr;
} else {
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 28337507d8..ba2a7c6b93 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -796,13 +796,13 @@ void MemMap::ReleaseReservedMemory(size_t byte_count) {
// Shrink the reservation MemMap and update its `gMaps` entry.
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
auto it = GetGMapsEntry(*this);
- // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
- gMaps->erase(it);
+ auto node = gMaps->extract(it);
begin_ += byte_count;
size_ -= byte_count;
base_begin_ = begin_;
base_size_ = size_;
- gMaps->emplace(base_begin_, this);
+ node.key() = base_begin_;
+ gMaps->insert(std::move(node));
}
}
@@ -1266,9 +1266,9 @@ void MemMap::AlignBy(size_t size) {
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
if (base_begin < aligned_base_begin) {
auto it = GetGMapsEntry(*this);
- // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
- gMaps->erase(it);
- gMaps->insert(std::make_pair(aligned_base_begin, this));
+ auto node = gMaps->extract(it);
+ node.key() = aligned_base_begin;
+ gMaps->insert(std::move(node));
}
base_begin_ = aligned_base_begin;
base_size_ = aligned_base_size;