diff options
author | 2023-07-14 17:35:06 +0100 | |
---|---|---|
committer | 2023-11-28 19:12:26 +0000 | |
commit | ca01707ad2b9ef68ae45e22837437b4f69f25ce9 (patch) | |
tree | bb238c72e1b4f244e22fe3a58cd1a687188bbc64 | |
parent | 4cf0e586df97103b6daad9f0533bd8f56adb9eac (diff) |
Use IsAlignedParam for variable page size
This patch is part of the chain preparing for making kPageSize
non-constexpr in a future patch.
Since kPageSize is going to be a non-constexpr value, it can't be a
template parameter anymore. Consequently IsAligned<kPageSize>(...)
expressions have to be replaced with IsAlignedParam(..., kPageSize).
Test: Same as for I5430741a8494b340ed7fd2d8692c41a59ad9c530.
The whole patches chain was tested as a whole.
Change-Id: Ic79f26c3f9b87d7153e02071cc59ec0a0f1f8e16
-rw-r--r-- | libartbase/base/mem_map.cc | 8 | ||||
-rw-r--r-- | runtime/base/gc_visited_arena_pool.cc | 2 | ||||
-rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 6 | ||||
-rw-r--r-- | runtime/gc/collector/mark_compact.cc | 14 | ||||
-rw-r--r-- | runtime/oat_file.cc | 2 | ||||
-rw-r--r-- | runtime/thread_android.cc | 4 |
6 files changed, 18 insertions, 18 deletions
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc index 5f5dd13473..bc4dbb3626 100644 --- a/libartbase/base/mem_map.cc +++ b/libartbase/base/mem_map.cc @@ -1247,8 +1247,8 @@ void MemMap::TryReadable() { CHECK_NE(prot_ & PROT_READ, 0); volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_); volatile uint8_t* end = begin + base_size_; - DCHECK(IsAligned<kPageSize>(begin)); - DCHECK(IsAligned<kPageSize>(end)); + DCHECK(IsAlignedParam(begin, kPageSize)); + DCHECK(IsAlignedParam(end, kPageSize)); // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the // reads. for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) { @@ -1264,8 +1264,8 @@ static void inline RawClearMemory(uint8_t* begin, uint8_t* end) { #if defined(__linux__) static inline void ClearMemory(uint8_t* page_begin, size_t size, bool resident) { - DCHECK(IsAligned<kPageSize>(page_begin)); - DCHECK(IsAligned<kPageSize>(page_begin + size)); + DCHECK(IsAlignedParam(page_begin, kPageSize)); + DCHECK(IsAlignedParam(page_begin + size, kPageSize)); if (resident) { RawClearMemory(page_begin, page_begin + size); // Note we check madvise return value against -1, as it seems old kernels diff --git a/runtime/base/gc_visited_arena_pool.cc b/runtime/base/gc_visited_arena_pool.cc index 38ef0ffd2b..d587a71c4d 100644 --- a/runtime/base/gc_visited_arena_pool.cc +++ b/runtime/base/gc_visited_arena_pool.cc @@ -89,7 +89,7 @@ void TrackedArena::SetFirstObject(uint8_t* obj_begin, uint8_t* obj_end) { // userfaultfd registration. ReaderMutexLock rmu(Thread::Current(), arena_pool->GetLock()); // If the addr is at the beginning of a page, then we set it for that page too. - if (IsAligned<kPageSize>(obj_begin)) { + if (IsAlignedParam(obj_begin, kPageSize)) { first_obj_array_[idx] = obj_begin; } while (idx < last_byte_idx) { diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index b9ea85fa51..4e006b5ed9 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -2662,14 +2662,14 @@ void ConcurrentCopying::CaptureRssAtPeak() { }; // region space - DCHECK(IsAligned<kPageSize>(region_space_->Limit())); + DCHECK(IsAlignedParam(region_space_->Limit(), kPageSize)); gc_ranges.emplace_back(range_t(region_space_->Begin(), region_space_->Limit())); // mark bitmap add_gc_range(region_space_bitmap_->Begin(), region_space_bitmap_->Size()); // non-moving space { - DCHECK(IsAligned<kPageSize>(heap_->non_moving_space_->Limit())); + DCHECK(IsAlignedParam(heap_->non_moving_space_->Limit(), kPageSize)); gc_ranges.emplace_back(range_t(heap_->non_moving_space_->Begin(), heap_->non_moving_space_->Limit())); // mark bitmap @@ -2689,7 +2689,7 @@ void ConcurrentCopying::CaptureRssAtPeak() { // large-object space if (heap_->GetLargeObjectsSpace()) { heap_->GetLargeObjectsSpace()->ForEachMemMap([&add_gc_range](const MemMap& map) { - DCHECK(IsAligned<kPageSize>(map.BaseSize())); + DCHECK(IsAlignedParam(map.BaseSize(), kPageSize)); add_gc_range(map.BaseBegin(), map.BaseSize()); }); // mark bitmap diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index c321f7c2c5..5886497956 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -1764,7 +1764,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj, uint8_t* const pre_compact_page, uint8_t* dest, bool needs_memset_zero) { - DCHECK(IsAligned<kPageSize>(pre_compact_page)); + DCHECK(IsAlignedParam(pre_compact_page, kPageSize)); size_t bytes_copied; uint8_t* src_addr = reinterpret_cast<uint8_t*>(GetFromSpaceAddr(first_obj)); uint8_t* pre_compact_addr = reinterpret_cast<uint8_t*>(first_obj); @@ -1792,7 +1792,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj, size_t offset = pre_compact_page - pre_compact_addr; pre_compact_addr = pre_compact_page; src_addr += offset; - DCHECK(IsAligned<kPageSize>(src_addr)); + DCHECK(IsAlignedParam(src_addr, kPageSize)); } // Copy the first chunk of live words std::memcpy(dest, src_addr, first_chunk_size); @@ -2043,7 +2043,7 @@ void MarkCompact::MapProcessedPages(uint8_t* to_space_start, void MarkCompact::ZeropageIoctl(void* addr, bool tolerate_eexist, bool tolerate_enoent) { struct uffdio_zeropage uffd_zeropage; - DCHECK(IsAligned<kPageSize>(addr)); + DCHECK(IsAlignedParam(addr, kPageSize)); uffd_zeropage.range.start = reinterpret_cast<uintptr_t>(addr); uffd_zeropage.range.len = kPageSize; uffd_zeropage.mode = 0; @@ -2277,7 +2277,7 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) { } uint8_t* pre_compact_page = black_allocations_begin_ + (black_page_count_ * kPageSize); - DCHECK(IsAligned<kPageSize>(pre_compact_page)); + DCHECK(IsAlignedParam(pre_compact_page, kPageSize)); UpdateClassAfterObjMap(); // These variables are maintained by FreeFromSpacePages(). @@ -3326,7 +3326,7 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page, uint8_t* unused_space_begin = bump_pointer_space_->Begin() + nr_moving_space_used_pages * kPageSize; - DCHECK(IsAligned<kPageSize>(unused_space_begin)); + DCHECK(IsAlignedParam(unused_space_begin, kPageSize)); DCHECK(kMode == kCopyMode || fault_page < unused_space_begin); if (kMode == kCopyMode && fault_page >= unused_space_begin) { // There is a race which allows more than one thread to install a @@ -3395,7 +3395,7 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page, if (page_idx + 1 < moving_first_objs_count_ + black_page_count_) { next_page_first_obj = first_objs_moving_space_[page_idx + 1].AsMirrorPtr(); } - DCHECK(IsAligned<kPageSize>(pre_compact_page)); + DCHECK(IsAlignedParam(pre_compact_page, kPageSize)); SlideBlackPage(first_obj, next_page_first_obj, first_chunk_size, @@ -3842,7 +3842,7 @@ void MarkCompact::CompactionPhase() { count &= ~kSigbusCounterCompactionDoneMask; } } else { - DCHECK(IsAligned<kPageSize>(conc_compaction_termination_page_)); + DCHECK(IsAlignedParam(conc_compaction_termination_page_, kPageSize)); // We will only iterate once if gKernelHasFaultRetry is true. do { // madvise the page so that we can get userfaults on it. diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 189d3b853f..c773f65b4b 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -626,7 +626,7 @@ bool OatFileBase::Setup(int zip_fd, // however not aligned to kElfSegmentAlignment. While technically this isn't // correct as per requirement in the ELF header, it has to be supported for // now. See also the comment at ImageHeader::RelocateImageReferences. - if (!IsAligned<kPageSize>(bss_begin_) || + if (!IsAlignedParam(bss_begin_, kPageSize) || !IsAlignedParam(bss_methods_, static_cast<size_t>(pointer_size)) || !IsAlignedParam(bss_roots_, static_cast<size_t>(pointer_size)) || !IsAligned<alignof(GcRoot<mirror::Object>)>(bss_end_)) { diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc index 0f41e2f57f..df4511b0d4 100644 --- a/runtime/thread_android.cc +++ b/runtime/thread_android.cc @@ -39,8 +39,8 @@ void Thread::MadviseAwayAlternateSignalStack() { // create different arbitrary alternate signal stacks and we do not want to erroneously // `madvise()` away pages that may hold data other than the alternate signal stack. if ((old_ss.ss_flags & SS_DISABLE) == 0 && - IsAligned<kPageSize>(old_ss.ss_sp) && - IsAligned<kPageSize>(old_ss.ss_size)) { + IsAlignedParam(old_ss.ss_sp, kPageSize) && + IsAlignedParam(old_ss.ss_size, kPageSize)) { CHECK_EQ(old_ss.ss_flags & SS_ONSTACK, 0); // Note: We're testing and benchmarking ART on devices with old kernels // which may not support `MADV_FREE`, so we do not check the result. |