diff options
author | 2023-07-14 17:35:06 +0100 | |
---|---|---|
committer | 2023-11-28 19:12:26 +0000 | |
commit | f1e9e38c0652fbe0cec16b520599f2fef53a38c1 (patch) | |
tree | 228a0ec3f3cee41505eb940c4550ef26c388861b | |
parent | ca01707ad2b9ef68ae45e22837437b4f69f25ce9 (diff) |
Use CHECK_ALIGNED_PARAM for variable page size
This patch is part of the chain preparing for making kPageSize
non-constexpr in a future patch.
Since kPageSize is going to be a non-constexpr value, it can't be a
template parameter anymore. Consequently CHECK_ALIGNED(..., kPageSize),
expressions implemented via a template function IsAligned<>, have to be
replaced with CHECK_ALIGNED_PARAM(..., kPageSize).
Same is done for DCHECK_ALIGNED.
Test: Same as for I5430741a8494b340ed7fd2d8692c41a59ad9c530.
The whole patches chain was tested as a whole.
Change-Id: I0e634dab62f2b44d078d7199d5b5feab945077b9
-rw-r--r-- | imgdiag/imgdiag.cc | 4 | ||||
-rw-r--r-- | libartbase/base/mem_map.cc | 30 | ||||
-rw-r--r-- | runtime/base/gc_visited_arena_pool.cc | 6 | ||||
-rw-r--r-- | runtime/base/gc_visited_arena_pool.h | 8 | ||||
-rw-r--r-- | runtime/gc/allocator/rosalloc.cc | 18 | ||||
-rw-r--r-- | runtime/gc/allocator/rosalloc.h | 2 | ||||
-rw-r--r-- | runtime/gc/collector/mark_compact.cc | 16 | ||||
-rw-r--r-- | runtime/gc/collector/semi_space.cc | 2 | ||||
-rw-r--r-- | runtime/gc/space/malloc_space.cc | 4 | ||||
-rw-r--r-- | runtime/gc/space/region_space.cc | 4 | ||||
-rw-r--r-- | runtime/image.cc | 2 | ||||
-rw-r--r-- | runtime/indirect_reference_table.cc | 4 | ||||
-rw-r--r-- | runtime/jni/local_reference_table.cc | 4 | ||||
-rw-r--r-- | runtime/thread_pool.cc | 2 |
14 files changed, 53 insertions, 53 deletions
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc index 1845ed3274..1fba35493c 100644 --- a/imgdiag/imgdiag.cc +++ b/imgdiag/imgdiag.cc @@ -1603,8 +1603,8 @@ class ImgDiagDumper { auto read_contents = [&](File* mem_file, /*out*/ MemMap* map, /*out*/ ArrayRef<uint8_t>* contents) { - DCHECK_ALIGNED(boot_map.start, kPageSize); - DCHECK_ALIGNED(boot_map_size, kPageSize); + DCHECK_ALIGNED_PARAM(boot_map.start, kPageSize); + DCHECK_ALIGNED_PARAM(boot_map_size, kPageSize); std::string name = "Contents of " + mem_file->GetPath(); std::string local_error_msg; // We need to use low 4 GiB memory so that we can walk the objects using standard diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc index bc4dbb3626..aba20c6df9 100644 --- a/libartbase/base/mem_map.cc +++ b/libartbase/base/mem_map.cc @@ -236,7 +236,7 @@ bool MemMap::CheckReservation(uint8_t* expected_ptr, *error_msg = StringPrintf("Invalid reservation for %s", name); return false; } - DCHECK_ALIGNED(reservation.Begin(), kPageSize); + DCHECK_ALIGNED_PARAM(reservation.Begin(), kPageSize); if (reservation.Begin() != expected_ptr) { *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p", name, @@ -765,10 +765,10 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end, DCHECK_GE(new_end, Begin()); DCHECK_LE(new_end, End()); DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); - DCHECK_ALIGNED(begin_, kPageSize); - DCHECK_ALIGNED(base_begin_, kPageSize); - DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize); - DCHECK_ALIGNED(new_end, kPageSize); + DCHECK_ALIGNED_PARAM(begin_, kPageSize); + DCHECK_ALIGNED_PARAM(base_begin_, kPageSize); + DCHECK_ALIGNED_PARAM(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize); + DCHECK_ALIGNED_PARAM(new_end, kPageSize); uint8_t* old_end = begin_ + size_; uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_; uint8_t* new_base_end = new_end; @@ -783,7 +783,7 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end, uint8_t* tail_base_begin = new_base_end; size_t tail_base_size = old_base_end - new_base_end; DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); - DCHECK_ALIGNED(tail_base_size, kPageSize); + DCHECK_ALIGNED_PARAM(tail_base_size, kPageSize); MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size); // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically @@ -834,8 +834,8 @@ void MemMap::ReleaseReservedMemory(size_t byte_count) { DCHECK_EQ(redzone_size_, 0u); DCHECK_EQ(begin_, base_begin_); DCHECK_EQ(size_, base_size_); - DCHECK_ALIGNED(begin_, kPageSize); - DCHECK_ALIGNED(size_, kPageSize); + DCHECK_ALIGNED_PARAM(begin_, kPageSize); + DCHECK_ALIGNED_PARAM(size_, kPageSize); // Check and round up the `byte_count`. DCHECK_NE(byte_count, 0u); @@ -955,7 +955,7 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { size_t num_gaps = 0; size_t num = 1u; size_t size = map->BaseSize(); - CHECK_ALIGNED(size, kPageSize); + CHECK_ALIGNED_PARAM(size, kPageSize); void* end = map->BaseEnd(); while (it != maps_end && it->second->GetProtect() == map->GetProtect() && @@ -969,12 +969,12 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { } size_t gap = reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end); - CHECK_ALIGNED(gap, kPageSize); + CHECK_ALIGNED_PARAM(gap, kPageSize); os << "~0x" << std::hex << (gap / kPageSize) << "P"; num = 0u; size = 0u; } - CHECK_ALIGNED(it->second->BaseSize(), kPageSize); + CHECK_ALIGNED_PARAM(it->second->BaseSize(), kPageSize); ++num; size += it->second->BaseSize(); end = it->second->BaseEnd(); @@ -1092,7 +1092,7 @@ void* MemMap::MapInternalArtLow4GBAllocator(size_t length, --before_it; // Start at the end of the map before the upper bound. ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd())); - CHECK_ALIGNED(ptr, kPageSize); + CHECK_ALIGNED_PARAM(ptr, kPageSize); } while (it != gMaps->end()) { // How much space do we have until the next map? @@ -1103,7 +1103,7 @@ void* MemMap::MapInternalArtLow4GBAllocator(size_t length, } // Otherwise, skip to the end of the map. ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd()); - CHECK_ALIGNED(ptr, kPageSize); + CHECK_ALIGNED_PARAM(ptr, kPageSize); ++it; } @@ -1188,7 +1188,7 @@ void* MemMap::MapInternal(void* addr, #else UNUSED(low_4gb); #endif - DCHECK_ALIGNED(length, kPageSize); + DCHECK_ALIGNED_PARAM(length, kPageSize); // TODO: // A page allocator would be a useful abstraction here, as // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us @@ -1347,7 +1347,7 @@ void MemMap::AlignBy(size_t alignment, bool align_both_ends) { CHECK_EQ(begin_, base_begin_) << "Unsupported"; CHECK_EQ(size_, base_size_) << "Unsupported"; CHECK_GT(alignment, static_cast<size_t>(kPageSize)); - CHECK_ALIGNED(alignment, kPageSize); + CHECK_ALIGNED_PARAM(alignment, kPageSize); CHECK(!reuse_); if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), alignment) && (!align_both_ends || IsAlignedParam(base_size_, alignment))) { diff --git a/runtime/base/gc_visited_arena_pool.cc b/runtime/base/gc_visited_arena_pool.cc index d587a71c4d..88180dcc02 100644 --- a/runtime/base/gc_visited_arena_pool.cc +++ b/runtime/base/gc_visited_arena_pool.cc @@ -41,8 +41,8 @@ TrackedArena::TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork, bo // entire arena. bytes_allocated_ = size; } else { - DCHECK_ALIGNED(size, kPageSize); - DCHECK_ALIGNED(start, kPageSize); + DCHECK_ALIGNED_PARAM(size, kPageSize); + DCHECK_ALIGNED_PARAM(start, kPageSize); size_t arr_size = size / kPageSize; first_obj_array_.reset(new uint8_t*[arr_size]); std::fill_n(first_obj_array_.get(), arr_size, nullptr); @@ -50,7 +50,7 @@ TrackedArena::TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork, bo } void TrackedArena::ReleasePages(uint8_t* begin, size_t size, bool pre_zygote_fork) { - DCHECK_ALIGNED(begin, kPageSize); + DCHECK_ALIGNED_PARAM(begin, kPageSize); // Userfaultfd GC uses MAP_SHARED mappings for linear-alloc and therefore // MADV_DONTNEED will not free the pages from page cache. Therefore use // MADV_REMOVE instead, which is meant for this purpose. diff --git a/runtime/base/gc_visited_arena_pool.h b/runtime/base/gc_visited_arena_pool.h index 5a40fb8a6a..d4fe4fb8f0 100644 --- a/runtime/base/gc_visited_arena_pool.h +++ b/runtime/base/gc_visited_arena_pool.h @@ -44,8 +44,8 @@ class TrackedArena final : public Arena { void VisitRoots(PageVisitor& visitor) const REQUIRES_SHARED(Locks::mutator_lock_) { uint8_t* page_begin = Begin(); if (first_obj_array_.get() != nullptr) { - DCHECK_ALIGNED(Size(), kPageSize); - DCHECK_ALIGNED(Begin(), kPageSize); + DCHECK_ALIGNED_PARAM(Size(), kPageSize); + DCHECK_ALIGNED_PARAM(Begin(), kPageSize); for (int i = 0, nr_pages = Size() / kPageSize; i < nr_pages; i++, page_begin += kPageSize) { uint8_t* first = first_obj_array_[i]; if (first != nullptr) { @@ -71,8 +71,8 @@ class TrackedArena final : public Arena { // by arena-allocator. This helps in reducing loop iterations below. uint8_t* last_byte = AlignUp(Begin() + GetBytesAllocated(), kPageSize); if (first_obj_array_.get() != nullptr) { - DCHECK_ALIGNED(Begin(), kPageSize); - DCHECK_ALIGNED(End(), kPageSize); + DCHECK_ALIGNED_PARAM(Begin(), kPageSize); + DCHECK_ALIGNED_PARAM(End(), kPageSize); DCHECK_LE(last_byte, End()); } else { DCHECK_EQ(last_byte, End()); diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index d0eaa88bc8..93b1498a18 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -63,11 +63,11 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity, page_release_mode_(page_release_mode), page_release_size_threshold_(page_release_size_threshold), is_running_on_memory_tool_(running_on_memory_tool) { - DCHECK_ALIGNED(base, kPageSize); + DCHECK_ALIGNED_PARAM(base, kPageSize); DCHECK_EQ(RoundUp(capacity, kPageSize), capacity); DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity); CHECK_LE(capacity, max_capacity); - CHECK_ALIGNED(page_release_size_threshold_, kPageSize); + CHECK_ALIGNED_PARAM(page_release_size_threshold_, kPageSize); // Zero the memory explicitly (don't rely on that the mem map is zero-initialized). if (!kMadviseZeroes) { memset(base_, 0, max_capacity); @@ -361,7 +361,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) { fpr->magic_num_ = kMagicNumFree; } fpr->SetByteSize(this, byte_size); - DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize); + DCHECK_ALIGNED_PARAM(fpr->ByteSize(this), kPageSize); DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end()); if (!free_page_runs_.empty()) { @@ -1368,7 +1368,7 @@ bool RosAlloc::Trim() { DCHECK_LE(madvise_begin, page_map_mem_map_.End()); size_t madvise_size = page_map_mem_map_.End() - madvise_begin; if (madvise_size > 0) { - DCHECK_ALIGNED(madvise_begin, kPageSize); + DCHECK_ALIGNED_PARAM(madvise_begin, kPageSize); DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size); if (!kMadviseZeroes) { memset(madvise_begin, 0, madvise_size); @@ -1413,7 +1413,7 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize); DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end()); size_t fpr_size = fpr->ByteSize(this); - DCHECK_ALIGNED(fpr_size, kPageSize); + DCHECK_ALIGNED_PARAM(fpr_size, kPageSize); void* start = fpr; if (kIsDebugBuild) { // In the debug build, the first page of a free page run @@ -1768,7 +1768,7 @@ void RosAlloc::Verify() { CHECK(free_page_runs_.find(fpr) != free_page_runs_.end()) << "An empty page must belong to the free page run set"; size_t fpr_size = fpr->ByteSize(this); - CHECK_ALIGNED(fpr_size, kPageSize) + CHECK_ALIGNED_PARAM(fpr_size, kPageSize) << "A free page run size isn't page-aligned : " << fpr_size; size_t num_pages = fpr_size / kPageSize; CHECK_GT(num_pages, static_cast<uintptr_t>(0)) @@ -2013,7 +2013,7 @@ size_t RosAlloc::ReleasePages() { // to the next page. if (free_page_runs_.find(fpr) != free_page_runs_.end()) { size_t fpr_size = fpr->ByteSize(this); - DCHECK_ALIGNED(fpr_size, kPageSize); + DCHECK_ALIGNED_PARAM(fpr_size, kPageSize); uint8_t* start = reinterpret_cast<uint8_t*>(fpr); reclaimed_bytes += ReleasePageRange(start, start + fpr_size); size_t pages = fpr_size / kPageSize; @@ -2040,8 +2040,8 @@ size_t RosAlloc::ReleasePages() { } size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) { - DCHECK_ALIGNED(start, kPageSize); - DCHECK_ALIGNED(end, kPageSize); + DCHECK_ALIGNED_PARAM(start, kPageSize); + DCHECK_ALIGNED_PARAM(end, kPageSize); DCHECK_LT(start, end); if (kIsDebugBuild) { // In the debug build, the first page of a free page run diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index bb2f426aef..a9007f226f 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -56,7 +56,7 @@ class RosAlloc { size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx]; DCHECK_GE(byte_size, static_cast<size_t>(0)); - DCHECK_ALIGNED(byte_size, kPageSize); + DCHECK_ALIGNED_PARAM(byte_size, kPageSize); return byte_size; } void SetByteSize(RosAlloc* rosalloc, size_t byte_size) diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 5886497956..22089f9351 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -554,8 +554,8 @@ MarkCompact::MarkCompact(Heap* heap) } void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) { - DCHECK_ALIGNED(begin, kPageSize); - DCHECK_ALIGNED(len, kPageSize); + DCHECK_ALIGNED_PARAM(begin, kPageSize); + DCHECK_ALIGNED_PARAM(len, kPageSize); DCHECK_GE(len, kPMDSize); size_t alignment = BestPageTableAlignment(len); bool is_shared = false; @@ -1950,7 +1950,7 @@ void MarkCompact::MapProcessedPages(uint8_t* to_space_start, size_t arr_len) { DCHECK(minor_fault_initialized_); DCHECK_LT(arr_idx, arr_len); - DCHECK_ALIGNED(to_space_start, kPageSize); + DCHECK_ALIGNED_PARAM(to_space_start, kPageSize); // Claim all the contiguous pages, which are ready to be mapped, and then do // so in a single ioctl. This helps avoid the overhead of invoking syscall // several times and also maps the already-processed pages, avoiding @@ -1997,7 +1997,7 @@ void MarkCompact::MapProcessedPages(uint8_t* to_space_start, // Bail out by setting the remaining pages' state back to kProcessed and // then waking up any waiting threads. DCHECK_GE(uffd_continue.mapped, 0); - DCHECK_ALIGNED(uffd_continue.mapped, kPageSize); + DCHECK_ALIGNED_PARAM(uffd_continue.mapped, kPageSize); DCHECK_LT(uffd_continue.mapped, static_cast<ssize_t>(length)); if (kFirstPageMapping) { // In this case the first page must be mapped. @@ -2193,8 +2193,8 @@ void MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode) { } DCHECK_NE(reclaim_begin, nullptr); - DCHECK_ALIGNED(reclaim_begin, kPageSize); - DCHECK_ALIGNED(last_reclaimed_page_, kPageSize); + DCHECK_ALIGNED_PARAM(reclaim_begin, kPageSize); + DCHECK_ALIGNED_PARAM(last_reclaimed_page_, kPageSize); // Check if the 'class_after_obj_map_' map allows pages to be freed. for (; class_after_obj_iter_ != class_after_obj_ordered_map_.rend(); class_after_obj_iter_++) { mirror::Object* klass = class_after_obj_iter_->first.AsMirrorPtr(); @@ -2663,7 +2663,7 @@ class MarkCompact::LinearAllocPageUpdater { void MultiObjectArena(uint8_t* page_begin, uint8_t* first_obj) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(first_obj != nullptr); - DCHECK_ALIGNED(page_begin, kPageSize); + DCHECK_ALIGNED_PARAM(page_begin, kPageSize); uint8_t* page_end = page_begin + kPageSize; uint32_t obj_size; for (uint8_t* byte = first_obj; byte < page_end;) { @@ -3634,7 +3634,7 @@ void MarkCompact::ProcessLinearAlloc() { continue; } uint8_t* last_byte = pair.second; - DCHECK_ALIGNED(last_byte, kPageSize); + DCHECK_ALIGNED_PARAM(last_byte, kPageSize); others_processing = false; arena_begin = arena->Begin(); arena_size = arena->Size(); diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index aa94421a08..419b8421d1 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -398,7 +398,7 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size memcpy(dest, src, page_remain); byte_src += page_remain; byte_dest += page_remain; - DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); + DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); while (byte_src + kPageSize < limit) { diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index a9402d21c3..2a32b9b5cd 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -186,8 +186,8 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize))); DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize); DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize); - DCHECK_ALIGNED(begin_, kPageSize); - DCHECK_ALIGNED(End(), kPageSize); + DCHECK_ALIGNED_PARAM(begin_, kPageSize); + DCHECK_ALIGNED_PARAM(End(), kPageSize); size_t size = RoundUp(Size(), kPageSize); // Trimming the heap should be done by the caller since we may have invalidated the accounting // stored in between objects. diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 9a66f8a800..c25770cb70 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -405,8 +405,8 @@ void RegionSpace::ReleaseFreeRegions() { for (size_t i = 0u; i < num_regions_; ++i) { if (regions_[i].IsFree()) { uint8_t* begin = regions_[i].Begin(); - DCHECK_ALIGNED(begin, kPageSize); - DCHECK_ALIGNED(regions_[i].End(), kPageSize); + DCHECK_ALIGNED_PARAM(begin, kPageSize); + DCHECK_ALIGNED_PARAM(regions_[i].End(), kPageSize); bool res = madvise(begin, regions_[i].End() - begin, MADV_DONTNEED); CHECK_NE(res, -1) << "madvise failed"; } diff --git a/runtime/image.cc b/runtime/image.cc index 10c053ffda..170a5760a4 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -105,7 +105,7 @@ void ImageHeader::RelocateImageReferences(int64_t delta) { // to be done in alignment with the dynamic linker's ELF loader as // otherwise inconsistency would still be possible e.g. when using // `dlopen`-like calls to load OAT files. - CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned"; + CHECK_ALIGNED_PARAM(delta, kPageSize) << "relocation delta must be page aligned"; oat_file_begin_ += delta; oat_data_begin_ += delta; oat_data_end_ += delta; diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 67010f336e..96e51ce87c 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -317,8 +317,8 @@ void IndirectReferenceTable::Trim() { uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd()); DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start)); - DCHECK_ALIGNED(release_end, kPageSize); - DCHECK_ALIGNED(release_end - release_start, kPageSize); + DCHECK_ALIGNED_PARAM(release_end, kPageSize); + DCHECK_ALIGNED_PARAM(release_end - release_start, kPageSize); if (release_start != release_end) { madvise(release_start, release_end - release_start, MADV_DONTNEED); } diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc index 15aaf5baa3..68359f22ce 100644 --- a/runtime/jni/local_reference_table.cc +++ b/runtime/jni/local_reference_table.cc @@ -628,8 +628,8 @@ void LocalReferenceTable::Trim() { uint8_t* release_end = reinterpret_cast<uint8_t*>(&table[table_size]); DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start)); - DCHECK_ALIGNED(release_end, kPageSize); - DCHECK_ALIGNED(release_end - release_start, kPageSize); + DCHECK_ALIGNED_PARAM(release_end, kPageSize); + DCHECK_ALIGNED_PARAM(release_end - release_start, kPageSize); if (release_start != release_end) { madvise(release_start, release_end - release_start, MADV_DONTNEED); } diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index 92ac8452ba..98af0cd87a 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -62,7 +62,7 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n /*low_4gb=*/ false, &error_msg); CHECK(stack_.IsValid()) << error_msg; - CHECK_ALIGNED(stack_.Begin(), kPageSize); + CHECK_ALIGNED_PARAM(stack_.Begin(), kPageSize); CheckedCall(mprotect, "mprotect bottom page of thread pool worker stack", stack_.Begin(), |