diff options
author | 2023-08-02 21:40:08 +0000 | |
---|---|---|
committer | 2023-08-04 19:06:01 +0000 | |
commit | a83a6eeb22255f76b56a641ef7d019233cb8669f (patch) | |
tree | e32f31e6436387c7d030d926c6030fb40a043729 | |
parent | 4b8b1c0f5924c6d58274db0f23b69c75521b6927 (diff) |
Unregister unused moving space before starting concurrent compaction
For the next GC cycle's mremap to succeed it is important to register
the entire moving space with userfaultfd. The downside is that we get
userfaults for the new TLABs.
We can eliminate these by mapping a zero-page in the unused portion of
the moving space and then unregistering the unused portion. This takes
care of the kernel's constraint that otherwise makes mremap fail the
next time.
Test: manual
Bug: 160737021
Change-Id: Ibae6174e9af82537ff6b6b13eb6b6dc53d5b8cb9
-rw-r--r-- | runtime/gc/collector/mark_compact.cc | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 85e6a5fffa..c9566c0c3f 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -3519,9 +3519,30 @@ void MarkCompact::CompactionPhase() { RecordFree(ObjectBytePair(freed_objects_, freed_bytes)); } + size_t moving_space_size = bump_pointer_space_->Capacity(); + size_t used_size = (moving_first_objs_count_ + black_page_count_) * kPageSize; if (CanCompactMovingSpaceWithMinorFault()) { CompactMovingSpace<kMinorFaultMode>(/*page=*/nullptr); } else { + if (used_size < moving_space_size) { + // mremap clears 'anon_vma' field of anonymous mappings. If we + // uffd-register only the used portion of the space, then the vma gets + // split (between used and unused portions) and as soon as pages are + // mapped to the vmas, they get different `anon_vma` assigned, which + // ensures that the two vmas cannot merged after we uffd-unregister the + // used portion. OTOH, registering the entire space avoids the split, but + // unnecessarily causes userfaults on allocations. + // By mapping a zero-page (below) we let the kernel assign an 'anon_vma' + // *before* the vma-split caused by uffd-unregister of the unused portion + // This ensures that when we unregister the used portion after compaction, + // the two split vmas merge. This is necessary for the mremap of the + // next GC cycle to not fail due to having more than one vmas in the source + // range. + uint8_t* unused_first_page = bump_pointer_space_->Begin() + used_size; + // It's ok if somebody else already mapped the page. + ZeropageIoctl(unused_first_page, /*tolerate_eexist*/ true, /*tolerate_enoent*/ false); + UnregisterUffd(unused_first_page, moving_space_size - used_size); + } CompactMovingSpace<kCopyMode>(compaction_buffers_map_.Begin()); } @@ -3535,13 +3556,9 @@ void MarkCompact::CompactionPhase() { for (uint32_t i = 0; compaction_in_progress_count_.load(std::memory_order_acquire) > 0; i++) { BackOff(i); } - - size_t moving_space_size = bump_pointer_space_->Capacity(); - UnregisterUffd(bump_pointer_space_->Begin(), - minor_fault_initialized_ ? - (moving_first_objs_count_ + black_page_count_) * kPageSize : - moving_space_size); - + if (used_size > 0) { + UnregisterUffd(bump_pointer_space_->Begin(), used_size); + } // Release all of the memory taken by moving-space's from-map if (minor_fault_initialized_) { if (IsValidFd(moving_from_space_fd_)) { |