Merge "Clean up SwapSpace."
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 42ed881..244a5fe 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -18,6 +18,7 @@
#include <algorithm>
#include <numeric>
+#include <sys/mman.h>
#include "base/logging.h"
#include "base/macros.h"
@@ -44,23 +45,17 @@
}
}
-template <typename FreeByStartSet, typename FreeBySizeSet>
-static void RemoveChunk(FreeByStartSet* free_by_start,
- FreeBySizeSet* free_by_size,
- typename FreeBySizeSet::const_iterator free_by_size_pos) {
+void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) {
auto free_by_start_pos = free_by_size_pos->second;
- free_by_size->erase(free_by_size_pos);
- free_by_start->erase(free_by_start_pos);
+ free_by_size_.erase(free_by_size_pos);
+ free_by_start_.erase(free_by_start_pos);
}
-template <typename FreeByStartSet, typename FreeBySizeSet>
-static void InsertChunk(FreeByStartSet* free_by_start,
- FreeBySizeSet* free_by_size,
- const SpaceChunk& chunk) {
+inline void SwapSpace::InsertChunk(const SpaceChunk& chunk) {
DCHECK_NE(chunk.size, 0u);
- auto insert_result = free_by_start->insert(chunk);
+ auto insert_result = free_by_start_.insert(chunk);
DCHECK(insert_result.second);
- free_by_size->emplace(chunk.size, insert_result.first);
+ free_by_size_.emplace(chunk.size, insert_result.first);
}
SwapSpace::SwapSpace(int fd, size_t initial_size)
@@ -69,10 +64,18 @@
lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) {
// Assume that the file is unlinked.
- InsertChunk(&free_by_start_, &free_by_size_, NewFileChunk(initial_size));
+ InsertChunk(NewFileChunk(initial_size));
}
SwapSpace::~SwapSpace() {
+ // Unmap all mmapped chunks. Nothing should be allocated anymore at
+ // this point, so there should be only full size chunks in free_by_start_.
+ for (const SpaceChunk& chunk : free_by_start_) {
+ if (munmap(chunk.ptr, chunk.size) != 0) {
+ PLOG(ERROR) << "Failed to unmap swap space chunk at "
+ << static_cast<const void*>(chunk.ptr) << " size=" << chunk.size;
+ }
+ }
// All arenas are backed by the same file. Just close the descriptor.
close(fd_);
}
@@ -113,7 +116,7 @@
: free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
if (it != free_by_size_.end()) {
old_chunk = *it->second;
- RemoveChunk(&free_by_start_, &free_by_size_, it);
+ RemoveChunk(it);
} else {
// Not a big enough free chunk, need to increase file size.
old_chunk = NewFileChunk(size);
@@ -124,13 +127,13 @@
if (old_chunk.size != size) {
// Insert the remainder.
SpaceChunk new_chunk = { old_chunk.ptr + size, old_chunk.size - size };
- InsertChunk(&free_by_start_, &free_by_size_, new_chunk);
+ InsertChunk(new_chunk);
}
return ret;
}
-SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
+SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
#if !defined(__APPLE__)
size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize));
int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
@@ -159,7 +162,7 @@
}
// TODO: Full coalescing.
-void SwapSpace::Free(void* ptrV, size_t size) {
+void SwapSpace::Free(void* ptr, size_t size) {
MutexLock lock(Thread::Current(), lock_);
size = RoundUp(size, 8U);
@@ -168,7 +171,7 @@
free_before = CollectFree(free_by_start_, free_by_size_);
}
- SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptrV), size };
+ SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptr), size };
auto it = free_by_start_.lower_bound(chunk);
if (it != free_by_start_.begin()) {
auto prev = it;
@@ -180,7 +183,7 @@
chunk.ptr -= prev->size;
auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev });
DCHECK(erase_pos != free_by_size_.end());
- RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
+ RemoveChunk(erase_pos);
// "prev" is invalidated but "it" remains valid.
}
}
@@ -191,11 +194,11 @@
chunk.size += it->size;
auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it });
DCHECK(erase_pos != free_by_size_.end());
- RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
+ RemoveChunk(erase_pos);
// "it" is invalidated but we don't need it anymore.
}
}
- InsertChunk(&free_by_start_, &free_by_size_, chunk);
+ InsertChunk(chunk);
if (kCheckFreeMaps) {
size_t free_after = CollectFree(free_by_start_, free_by_size_);
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 9127b6b..b659f1d 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -19,42 +19,17 @@
#include <cstdlib>
#include <list>
+#include <vector>
#include <set>
#include <stdint.h>
#include <stddef.h>
-#include "base/debug_stack.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "mem_map.h"
namespace art {
-// Chunk of space.
-struct SpaceChunk {
- uint8_t* ptr;
- size_t size;
-
- uintptr_t Start() const {
- return reinterpret_cast<uintptr_t>(ptr);
- }
- uintptr_t End() const {
- return reinterpret_cast<uintptr_t>(ptr) + size;
- }
-};
-
-inline bool operator==(const SpaceChunk& lhs, const SpaceChunk& rhs) {
- return (lhs.size == rhs.size) && (lhs.ptr == rhs.ptr);
-}
-
-class SortChunkByPtr {
- public:
- bool operator()(const SpaceChunk& a, const SpaceChunk& b) const {
- return reinterpret_cast<uintptr_t>(a.ptr) < reinterpret_cast<uintptr_t>(b.ptr);
- }
-};
-
// An arena pool that creates arenas backed by an mmaped file.
class SwapSpace {
public:
@@ -68,17 +43,27 @@
}
private:
- SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
+ // Chunk of space.
+ struct SpaceChunk {
+ uint8_t* ptr;
+ size_t size;
- int fd_;
- size_t size_;
- std::list<SpaceChunk> maps_;
+ uintptr_t Start() const {
+ return reinterpret_cast<uintptr_t>(ptr);
+ }
+ uintptr_t End() const {
+ return reinterpret_cast<uintptr_t>(ptr) + size;
+ }
+ };
- // NOTE: Boost.Bimap would be useful for the two following members.
+ class SortChunkByPtr {
+ public:
+ bool operator()(const SpaceChunk& a, const SpaceChunk& b) const {
+ return reinterpret_cast<uintptr_t>(a.ptr) < reinterpret_cast<uintptr_t>(b.ptr);
+ }
+ };
- // Map start of a free chunk to its size.
typedef std::set<SpaceChunk, SortChunkByPtr> FreeByStartSet;
- FreeByStartSet free_by_start_ GUARDED_BY(lock_);
// Map size to an iterator to free_by_start_'s entry.
typedef std::pair<size_t, FreeByStartSet::const_iterator> FreeBySizeEntry;
@@ -92,6 +77,21 @@
}
};
typedef std::set<FreeBySizeEntry, FreeBySizeComparator> FreeBySizeSet;
+
+ SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
+
+ void RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) REQUIRES(lock_);
+ void InsertChunk(const SpaceChunk& chunk) REQUIRES(lock_);
+
+ int fd_;
+ size_t size_;
+ std::list<SpaceChunk> maps_;
+
+ // NOTE: Boost.Bimap would be useful for the two following members.
+
+ // Map start of a free chunk to its size.
+ FreeByStartSet free_by_start_ GUARDED_BY(lock_);
+ // Free chunks ordered by size.
FreeBySizeSet free_by_size_ GUARDED_BY(lock_);
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -126,6 +126,9 @@
template <typename U>
friend class SwapAllocator;
+
+ template <typename U>
+ friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs);
};
template <typename T>
@@ -201,9 +204,22 @@
template <typename U>
friend class SwapAllocator;
+
+ template <typename U>
+ friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs);
};
template <typename T>
+inline bool operator==(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) {
+ return lhs.swap_space_ == rhs.swap_space_;
+}
+
+template <typename T>
+inline bool operator!=(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename T>
using SwapVector = std::vector<T, SwapAllocator<T>>;
template <typename T, typename Comparator>
using SwapSet = std::set<T, Comparator, SwapAllocator<T>>;