Fix an array copy benchmark regression.
Add different page release modes to rosalloc.
Bug: 12064551
Change-Id: Ib837bbd1a2757741a4e2743e0a1272bf46a30252
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 469b098..8ae61a3 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -37,12 +37,16 @@
size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
bool RosAlloc::initialized_ = false;
-RosAlloc::RosAlloc(void* base, size_t capacity)
+RosAlloc::RosAlloc(void* base, size_t capacity,
+ PageReleaseMode page_release_mode, size_t page_release_size_threshold)
: base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
capacity_(capacity),
lock_("rosalloc global lock", kRosAllocGlobalLock),
- bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock) {
+ bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
+ page_release_mode_(page_release_mode),
+ page_release_size_threshold_(page_release_size_threshold) {
DCHECK(RoundUp(capacity, kPageSize) == capacity);
+ CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
if (!initialized_) {
Initialize();
}
@@ -65,7 +69,9 @@
}
free_pages->SetByteSize(this, capacity_);
DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
+ DCHECK(free_pages->IsFree());
free_pages->ReleasePages(this);
+ DCHECK(free_pages->IsFree());
free_page_runs_.insert(free_pages);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
@@ -387,7 +393,9 @@
// Insert it.
DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
+ DCHECK(fpr->IsFree());
fpr->ReleasePages(this);
+ DCHECK(fpr->IsFree());
free_page_runs_.insert(fpr);
DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
if (kTraceRosAlloc) {
@@ -404,20 +412,26 @@
MutexLock mu(self, lock_);
r = AllocPages(self, num_pages, kPageMapLargeObject);
}
+ if (UNLIKELY(r == nullptr)) {
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
+ }
+ return nullptr;
+ }
if (bytes_allocated != NULL) {
*bytes_allocated = num_pages * kPageSize;
}
if (kTraceRosAlloc) {
- if (r != NULL) {
- LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
- << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
- << "(" << std::dec << (num_pages * kPageSize) << ")";
- } else {
- LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
- }
+ LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
+ << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
+ << "(" << std::dec << (num_pages * kPageSize) << ")";
+ }
+ if (!DoesReleaseAllPages()) {
+ // If it does not release all pages, pages may not be zeroed out.
+ memset(r, 0, size);
}
// Check if the returned memory is really all zero.
- if (kCheckZeroMemory && r != NULL) {
+ if (kCheckZeroMemory) {
byte* bytes = reinterpret_cast<byte*>(r);
for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(bytes[i], 0);
@@ -1366,7 +1380,12 @@
size_t fpr_size = fpr->ByteSize(this);
DCHECK(IsAligned<kPageSize>(fpr_size));
void* start = fpr;
- void* end = reinterpret_cast<byte*>(start) + fpr_size;
+ if (kIsDebugBuild) {
+ // In the debug build, the first page of a free page run
+ // contains a magic number for debugging. Exclude it.
+ start = reinterpret_cast<byte*>(fpr) + kPageSize;
+ }
+ void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
handler(start, end, 0, arg);
size_t num_pages = fpr_size / kPageSize;
if (kIsDebugBuild) {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index d5b6de1..4eb13315 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -91,18 +91,50 @@
byte* end = fpr_base + ByteSize(rosalloc);
return end;
}
+ bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
+ EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
+ }
+ bool IsAtEndOfSpace(RosAlloc* rosalloc)
+ EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
+ }
+ bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ switch (rosalloc->page_release_mode_) {
+ case kPageReleaseModeNone:
+ return false;
+ case kPageReleaseModeEnd:
+ return IsAtEndOfSpace(rosalloc);
+ case kPageReleaseModeSize:
+ return IsLargerThanPageReleaseThreshold(rosalloc);
+ case kPageReleaseModeSizeAndEnd:
+ return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc);
+ case kPageReleaseModeAll:
+ return true;
+ default:
+ LOG(FATAL) << "Unexpected page release mode ";
+ return false;
+ }
+ }
void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+ byte* start = reinterpret_cast<byte*>(this);
size_t byte_size = ByteSize(rosalloc);
DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
+ bool release_pages = ShouldReleasePages(rosalloc);
if (kIsDebugBuild) {
// Exclude the first page that stores the magic number.
DCHECK_GE(byte_size, static_cast<size_t>(kPageSize));
+ start += kPageSize;
byte_size -= kPageSize;
if (byte_size > 0) {
- madvise(reinterpret_cast<byte*>(this) + kPageSize, byte_size, MADV_DONTNEED);
+ if (release_pages) {
+ madvise(start, byte_size, MADV_DONTNEED);
+ }
}
} else {
- madvise(this, byte_size, MADV_DONTNEED);
+ if (release_pages) {
+ madvise(start, byte_size, MADV_DONTNEED);
+ }
}
}
};
@@ -363,6 +395,21 @@
}
};
+ public:
+ // Different page release modes.
+ enum PageReleaseMode {
+ kPageReleaseModeNone, // Release no empty pages.
+ kPageReleaseModeEnd, // Release empty pages at the end of the space.
+ kPageReleaseModeSize, // Release empty pages that are larger than the threshold.
+ kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or
+ // at the end of the space.
+ kPageReleaseModeAll, // Release all empty pages.
+ };
+
+ // The default value for page_release_size_threshold_.
+ static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
+
+ private:
// The base address of the memory region that's managed by this allocator.
byte* base_;
@@ -412,6 +459,12 @@
// allowing multiple individual frees at the same time.
ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // The page release mode.
+ const PageReleaseMode page_release_mode_;
+ // Under kPageReleaseModeSize(AndEnd), if the free page run size is
+ // greater than or equal to this value, release pages.
+ const size_t page_release_size_threshold_;
+
// The base address of the memory region that's managed by this allocator.
byte* Begin() { return base_; }
// The end address of the memory region that's managed by this allocator.
@@ -439,7 +492,9 @@
void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
public:
- RosAlloc(void* base, size_t capacity);
+ RosAlloc(void* base, size_t capacity,
+ PageReleaseMode page_release_mode,
+ size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
LOCKS_EXCLUDED(lock_);
void Free(Thread* self, void* ptr)
@@ -480,6 +535,10 @@
// allocated and objects allocated, respectively.
static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
+
+ bool DoesReleaseAllPages() const {
+ return page_release_mode_ == kPageReleaseModeAll;
+ }
};
} // namespace allocator