Use IsAlignedParam for variable page size

This patch is part of the chain preparing for making kPageSize
non-constexpr in a future patch.

Since kPageSize is going to be a non-constexpr value, it can't be a
template parameter anymore. Consequently IsAligned<kPageSize>(...)
expressions have to be replaced with IsAlignedParam(..., kPageSize).

Test: Same as for I5430741a8494b340ed7fd2d8692c41a59ad9c530.
      The whole patches chain was tested as a whole.
Change-Id: Ic79f26c3f9b87d7153e02071cc59ec0a0f1f8e16
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 5f5dd13..bc4dbb3 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -1247,8 +1247,8 @@
   CHECK_NE(prot_ & PROT_READ, 0);
   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
   volatile uint8_t* end = begin + base_size_;
-  DCHECK(IsAligned<kPageSize>(begin));
-  DCHECK(IsAligned<kPageSize>(end));
+  DCHECK(IsAlignedParam(begin, kPageSize));
+  DCHECK(IsAlignedParam(end, kPageSize));
   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
   // reads.
   for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
@@ -1264,8 +1264,8 @@
 
 #if defined(__linux__)
 static inline void ClearMemory(uint8_t* page_begin, size_t size, bool resident) {
-  DCHECK(IsAligned<kPageSize>(page_begin));
-  DCHECK(IsAligned<kPageSize>(page_begin + size));
+  DCHECK(IsAlignedParam(page_begin, kPageSize));
+  DCHECK(IsAlignedParam(page_begin + size, kPageSize));
   if (resident) {
     RawClearMemory(page_begin, page_begin + size);
     // Note we check madvise return value against -1, as it seems old kernels
diff --git a/runtime/base/gc_visited_arena_pool.cc b/runtime/base/gc_visited_arena_pool.cc
index 38ef0ff..d587a71 100644
--- a/runtime/base/gc_visited_arena_pool.cc
+++ b/runtime/base/gc_visited_arena_pool.cc
@@ -89,7 +89,7 @@
   // userfaultfd registration.
   ReaderMutexLock rmu(Thread::Current(), arena_pool->GetLock());
   // If the addr is at the beginning of a page, then we set it for that page too.
-  if (IsAligned<kPageSize>(obj_begin)) {
+  if (IsAlignedParam(obj_begin, kPageSize)) {
     first_obj_array_[idx] = obj_begin;
   }
   while (idx < last_byte_idx) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b9ea85f..4e006b5 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2662,14 +2662,14 @@
     };
 
     // region space
-    DCHECK(IsAligned<kPageSize>(region_space_->Limit()));
+    DCHECK(IsAlignedParam(region_space_->Limit(), kPageSize));
     gc_ranges.emplace_back(range_t(region_space_->Begin(), region_space_->Limit()));
     // mark bitmap
     add_gc_range(region_space_bitmap_->Begin(), region_space_bitmap_->Size());
 
     // non-moving space
     {
-      DCHECK(IsAligned<kPageSize>(heap_->non_moving_space_->Limit()));
+      DCHECK(IsAlignedParam(heap_->non_moving_space_->Limit(), kPageSize));
       gc_ranges.emplace_back(range_t(heap_->non_moving_space_->Begin(),
                                      heap_->non_moving_space_->Limit()));
       // mark bitmap
@@ -2689,7 +2689,7 @@
     // large-object space
     if (heap_->GetLargeObjectsSpace()) {
       heap_->GetLargeObjectsSpace()->ForEachMemMap([&add_gc_range](const MemMap& map) {
-        DCHECK(IsAligned<kPageSize>(map.BaseSize()));
+        DCHECK(IsAlignedParam(map.BaseSize(), kPageSize));
         add_gc_range(map.BaseBegin(), map.BaseSize());
       });
       // mark bitmap
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index c321f7c..5886497 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -1764,7 +1764,7 @@
                                  uint8_t* const pre_compact_page,
                                  uint8_t* dest,
                                  bool needs_memset_zero) {
-  DCHECK(IsAligned<kPageSize>(pre_compact_page));
+  DCHECK(IsAlignedParam(pre_compact_page, kPageSize));
   size_t bytes_copied;
   uint8_t* src_addr = reinterpret_cast<uint8_t*>(GetFromSpaceAddr(first_obj));
   uint8_t* pre_compact_addr = reinterpret_cast<uint8_t*>(first_obj);
@@ -1792,7 +1792,7 @@
     size_t offset = pre_compact_page - pre_compact_addr;
     pre_compact_addr = pre_compact_page;
     src_addr += offset;
-    DCHECK(IsAligned<kPageSize>(src_addr));
+    DCHECK(IsAlignedParam(src_addr, kPageSize));
   }
   // Copy the first chunk of live words
   std::memcpy(dest, src_addr, first_chunk_size);
@@ -2043,7 +2043,7 @@
 
 void MarkCompact::ZeropageIoctl(void* addr, bool tolerate_eexist, bool tolerate_enoent) {
   struct uffdio_zeropage uffd_zeropage;
-  DCHECK(IsAligned<kPageSize>(addr));
+  DCHECK(IsAlignedParam(addr, kPageSize));
   uffd_zeropage.range.start = reinterpret_cast<uintptr_t>(addr);
   uffd_zeropage.range.len = kPageSize;
   uffd_zeropage.mode = 0;
@@ -2277,7 +2277,7 @@
   }
   uint8_t* pre_compact_page = black_allocations_begin_ + (black_page_count_ * kPageSize);
 
-  DCHECK(IsAligned<kPageSize>(pre_compact_page));
+  DCHECK(IsAlignedParam(pre_compact_page, kPageSize));
 
   UpdateClassAfterObjMap();
   // These variables are maintained by FreeFromSpacePages().
@@ -3326,7 +3326,7 @@
 
   uint8_t* unused_space_begin =
       bump_pointer_space_->Begin() + nr_moving_space_used_pages * kPageSize;
-  DCHECK(IsAligned<kPageSize>(unused_space_begin));
+  DCHECK(IsAlignedParam(unused_space_begin, kPageSize));
   DCHECK(kMode == kCopyMode || fault_page < unused_space_begin);
   if (kMode == kCopyMode && fault_page >= unused_space_begin) {
     // There is a race which allows more than one thread to install a
@@ -3395,7 +3395,7 @@
             if (page_idx + 1 < moving_first_objs_count_ + black_page_count_) {
               next_page_first_obj = first_objs_moving_space_[page_idx + 1].AsMirrorPtr();
             }
-            DCHECK(IsAligned<kPageSize>(pre_compact_page));
+            DCHECK(IsAlignedParam(pre_compact_page, kPageSize));
             SlideBlackPage(first_obj,
                            next_page_first_obj,
                            first_chunk_size,
@@ -3842,7 +3842,7 @@
       count &= ~kSigbusCounterCompactionDoneMask;
     }
   } else {
-    DCHECK(IsAligned<kPageSize>(conc_compaction_termination_page_));
+    DCHECK(IsAlignedParam(conc_compaction_termination_page_, kPageSize));
     // We will only iterate once if gKernelHasFaultRetry is true.
     do {
       // madvise the page so that we can get userfaults on it.
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 189d3b8..c773f65 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -626,7 +626,7 @@
   // however not aligned to kElfSegmentAlignment. While technically this isn't
   // correct as per requirement in the ELF header, it has to be supported for
   // now. See also the comment at ImageHeader::RelocateImageReferences.
-  if (!IsAligned<kPageSize>(bss_begin_) ||
+  if (!IsAlignedParam(bss_begin_, kPageSize) ||
       !IsAlignedParam(bss_methods_, static_cast<size_t>(pointer_size)) ||
       !IsAlignedParam(bss_roots_, static_cast<size_t>(pointer_size)) ||
       !IsAligned<alignof(GcRoot<mirror::Object>)>(bss_end_)) {
diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc
index 0f41e2f..df4511b 100644
--- a/runtime/thread_android.cc
+++ b/runtime/thread_android.cc
@@ -39,8 +39,8 @@
   // create different arbitrary alternate signal stacks and we do not want to erroneously
   // `madvise()` away pages that may hold data other than the alternate signal stack.
   if ((old_ss.ss_flags & SS_DISABLE) == 0 &&
-      IsAligned<kPageSize>(old_ss.ss_sp) &&
-      IsAligned<kPageSize>(old_ss.ss_size)) {
+      IsAlignedParam(old_ss.ss_sp, kPageSize) &&
+      IsAlignedParam(old_ss.ss_size, kPageSize)) {
     CHECK_EQ(old_ss.ss_flags & SS_ONSTACK, 0);
     // Note: We're testing and benchmarking ART on devices with old kernels
     // which may not support `MADV_FREE`, so we do not check the result.