Use CHECK_ALIGNED_PARAM for variable page size

This patch is part of the chain preparing for making kPageSize
non-constexpr in a future patch.

Since kPageSize is going to be a non-constexpr value, it can't be a
template parameter anymore. Consequently CHECK_ALIGNED(..., kPageSize),
expressions implemented via a template function IsAligned<>, have to be
replaced with CHECK_ALIGNED_PARAM(..., kPageSize).

Same is done for DCHECK_ALIGNED.

Test: Same as for I5430741a8494b340ed7fd2d8692c41a59ad9c530.
      The whole patches chain was tested as a whole.
Change-Id: I0e634dab62f2b44d078d7199d5b5feab945077b9
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 1845ed3..1fba354 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -1603,8 +1603,8 @@
     auto read_contents = [&](File* mem_file,
                              /*out*/ MemMap* map,
                              /*out*/ ArrayRef<uint8_t>* contents) {
-      DCHECK_ALIGNED(boot_map.start, kPageSize);
-      DCHECK_ALIGNED(boot_map_size, kPageSize);
+      DCHECK_ALIGNED_PARAM(boot_map.start, kPageSize);
+      DCHECK_ALIGNED_PARAM(boot_map_size, kPageSize);
       std::string name = "Contents of " + mem_file->GetPath();
       std::string local_error_msg;
       // We need to use low 4 GiB memory so that we can walk the objects using standard
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index bc4dbb3..aba20c6 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -236,7 +236,7 @@
     *error_msg = StringPrintf("Invalid reservation for %s", name);
     return false;
   }
-  DCHECK_ALIGNED(reservation.Begin(), kPageSize);
+  DCHECK_ALIGNED_PARAM(reservation.Begin(), kPageSize);
   if (reservation.Begin() != expected_ptr) {
     *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
                               name,
@@ -765,10 +765,10 @@
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
-  DCHECK_ALIGNED(begin_, kPageSize);
-  DCHECK_ALIGNED(base_begin_, kPageSize);
-  DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
-  DCHECK_ALIGNED(new_end, kPageSize);
+  DCHECK_ALIGNED_PARAM(begin_, kPageSize);
+  DCHECK_ALIGNED_PARAM(base_begin_, kPageSize);
+  DCHECK_ALIGNED_PARAM(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
+  DCHECK_ALIGNED_PARAM(new_end, kPageSize);
   uint8_t* old_end = begin_ + size_;
   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
   uint8_t* new_base_end = new_end;
@@ -783,7 +783,7 @@
   uint8_t* tail_base_begin = new_base_end;
   size_t tail_base_size = old_base_end - new_base_end;
   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
-  DCHECK_ALIGNED(tail_base_size, kPageSize);
+  DCHECK_ALIGNED_PARAM(tail_base_size, kPageSize);
 
   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
   // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
@@ -834,8 +834,8 @@
   DCHECK_EQ(redzone_size_, 0u);
   DCHECK_EQ(begin_, base_begin_);
   DCHECK_EQ(size_, base_size_);
-  DCHECK_ALIGNED(begin_, kPageSize);
-  DCHECK_ALIGNED(size_, kPageSize);
+  DCHECK_ALIGNED_PARAM(begin_, kPageSize);
+  DCHECK_ALIGNED_PARAM(size_, kPageSize);
 
   // Check and round up the `byte_count`.
   DCHECK_NE(byte_count, 0u);
@@ -955,7 +955,7 @@
     size_t num_gaps = 0;
     size_t num = 1u;
     size_t size = map->BaseSize();
-    CHECK_ALIGNED(size, kPageSize);
+    CHECK_ALIGNED_PARAM(size, kPageSize);
     void* end = map->BaseEnd();
     while (it != maps_end &&
         it->second->GetProtect() == map->GetProtect() &&
@@ -969,12 +969,12 @@
         }
         size_t gap =
             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
-        CHECK_ALIGNED(gap, kPageSize);
+        CHECK_ALIGNED_PARAM(gap, kPageSize);
         os << "~0x" << std::hex << (gap / kPageSize) << "P";
         num = 0u;
         size = 0u;
       }
-      CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
+      CHECK_ALIGNED_PARAM(it->second->BaseSize(), kPageSize);
       ++num;
       size += it->second->BaseSize();
       end = it->second->BaseEnd();
@@ -1092,7 +1092,7 @@
       --before_it;
       // Start at the end of the map before the upper bound.
       ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
-      CHECK_ALIGNED(ptr, kPageSize);
+      CHECK_ALIGNED_PARAM(ptr, kPageSize);
     }
     while (it != gMaps->end()) {
       // How much space do we have until the next map?
@@ -1103,7 +1103,7 @@
       }
       // Otherwise, skip to the end of the map.
       ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
-      CHECK_ALIGNED(ptr, kPageSize);
+      CHECK_ALIGNED_PARAM(ptr, kPageSize);
       ++it;
     }
 
@@ -1188,7 +1188,7 @@
 #else
   UNUSED(low_4gb);
 #endif
-  DCHECK_ALIGNED(length, kPageSize);
+  DCHECK_ALIGNED_PARAM(length, kPageSize);
   // TODO:
   // A page allocator would be a useful abstraction here, as
   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
@@ -1347,7 +1347,7 @@
   CHECK_EQ(begin_, base_begin_) << "Unsupported";
   CHECK_EQ(size_, base_size_) << "Unsupported";
   CHECK_GT(alignment, static_cast<size_t>(kPageSize));
-  CHECK_ALIGNED(alignment, kPageSize);
+  CHECK_ALIGNED_PARAM(alignment, kPageSize);
   CHECK(!reuse_);
   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), alignment) &&
       (!align_both_ends || IsAlignedParam(base_size_, alignment))) {
diff --git a/runtime/base/gc_visited_arena_pool.cc b/runtime/base/gc_visited_arena_pool.cc
index d587a71..88180dc 100644
--- a/runtime/base/gc_visited_arena_pool.cc
+++ b/runtime/base/gc_visited_arena_pool.cc
@@ -41,8 +41,8 @@
     // entire arena.
     bytes_allocated_ = size;
   } else {
-    DCHECK_ALIGNED(size, kPageSize);
-    DCHECK_ALIGNED(start, kPageSize);
+    DCHECK_ALIGNED_PARAM(size, kPageSize);
+    DCHECK_ALIGNED_PARAM(start, kPageSize);
     size_t arr_size = size / kPageSize;
     first_obj_array_.reset(new uint8_t*[arr_size]);
     std::fill_n(first_obj_array_.get(), arr_size, nullptr);
@@ -50,7 +50,7 @@
 }
 
 void TrackedArena::ReleasePages(uint8_t* begin, size_t size, bool pre_zygote_fork) {
-  DCHECK_ALIGNED(begin, kPageSize);
+  DCHECK_ALIGNED_PARAM(begin, kPageSize);
   // Userfaultfd GC uses MAP_SHARED mappings for linear-alloc and therefore
   // MADV_DONTNEED will not free the pages from page cache. Therefore use
   // MADV_REMOVE instead, which is meant for this purpose.
diff --git a/runtime/base/gc_visited_arena_pool.h b/runtime/base/gc_visited_arena_pool.h
index 5a40fb8..d4fe4fb 100644
--- a/runtime/base/gc_visited_arena_pool.h
+++ b/runtime/base/gc_visited_arena_pool.h
@@ -44,8 +44,8 @@
   void VisitRoots(PageVisitor& visitor) const REQUIRES_SHARED(Locks::mutator_lock_) {
     uint8_t* page_begin = Begin();
     if (first_obj_array_.get() != nullptr) {
-      DCHECK_ALIGNED(Size(), kPageSize);
-      DCHECK_ALIGNED(Begin(), kPageSize);
+      DCHECK_ALIGNED_PARAM(Size(), kPageSize);
+      DCHECK_ALIGNED_PARAM(Begin(), kPageSize);
       for (int i = 0, nr_pages = Size() / kPageSize; i < nr_pages; i++, page_begin += kPageSize) {
         uint8_t* first = first_obj_array_[i];
         if (first != nullptr) {
@@ -71,8 +71,8 @@
     // by arena-allocator. This helps in reducing loop iterations below.
     uint8_t* last_byte = AlignUp(Begin() + GetBytesAllocated(), kPageSize);
     if (first_obj_array_.get() != nullptr) {
-      DCHECK_ALIGNED(Begin(), kPageSize);
-      DCHECK_ALIGNED(End(), kPageSize);
+      DCHECK_ALIGNED_PARAM(Begin(), kPageSize);
+      DCHECK_ALIGNED_PARAM(End(), kPageSize);
       DCHECK_LE(last_byte, End());
     } else {
       DCHECK_EQ(last_byte, End());
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index d0eaa88..93b1498 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -63,11 +63,11 @@
       page_release_mode_(page_release_mode),
       page_release_size_threshold_(page_release_size_threshold),
       is_running_on_memory_tool_(running_on_memory_tool) {
-  DCHECK_ALIGNED(base, kPageSize);
+  DCHECK_ALIGNED_PARAM(base, kPageSize);
   DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
   DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
   CHECK_LE(capacity, max_capacity);
-  CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
+  CHECK_ALIGNED_PARAM(page_release_size_threshold_, kPageSize);
   // Zero the memory explicitly (don't rely on that the mem map is zero-initialized).
   if (!kMadviseZeroes) {
     memset(base_, 0, max_capacity);
@@ -361,7 +361,7 @@
     fpr->magic_num_ = kMagicNumFree;
   }
   fpr->SetByteSize(this, byte_size);
-  DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
+  DCHECK_ALIGNED_PARAM(fpr->ByteSize(this), kPageSize);
 
   DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
   if (!free_page_runs_.empty()) {
@@ -1368,7 +1368,7 @@
     DCHECK_LE(madvise_begin, page_map_mem_map_.End());
     size_t madvise_size = page_map_mem_map_.End() - madvise_begin;
     if (madvise_size > 0) {
-      DCHECK_ALIGNED(madvise_begin, kPageSize);
+      DCHECK_ALIGNED_PARAM(madvise_begin, kPageSize);
       DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
       if (!kMadviseZeroes) {
         memset(madvise_begin, 0, madvise_size);
@@ -1413,7 +1413,7 @@
         FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
         DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
         size_t fpr_size = fpr->ByteSize(this);
-        DCHECK_ALIGNED(fpr_size, kPageSize);
+        DCHECK_ALIGNED_PARAM(fpr_size, kPageSize);
         void* start = fpr;
         if (kIsDebugBuild) {
           // In the debug build, the first page of a free page run
@@ -1768,7 +1768,7 @@
           CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
               << "An empty page must belong to the free page run set";
           size_t fpr_size = fpr->ByteSize(this);
-          CHECK_ALIGNED(fpr_size, kPageSize)
+          CHECK_ALIGNED_PARAM(fpr_size, kPageSize)
               << "A free page run size isn't page-aligned : " << fpr_size;
           size_t num_pages = fpr_size / kPageSize;
           CHECK_GT(num_pages, static_cast<uintptr_t>(0))
@@ -2013,7 +2013,7 @@
           // to the next page.
           if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
             size_t fpr_size = fpr->ByteSize(this);
-            DCHECK_ALIGNED(fpr_size, kPageSize);
+            DCHECK_ALIGNED_PARAM(fpr_size, kPageSize);
             uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
             reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
             size_t pages = fpr_size / kPageSize;
@@ -2040,8 +2040,8 @@
 }
 
 size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
-  DCHECK_ALIGNED(start, kPageSize);
-  DCHECK_ALIGNED(end, kPageSize);
+  DCHECK_ALIGNED_PARAM(start, kPageSize);
+  DCHECK_ALIGNED_PARAM(end, kPageSize);
   DCHECK_LT(start, end);
   if (kIsDebugBuild) {
     // In the debug build, the first page of a free page run
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index bb2f426..a9007f2 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -56,7 +56,7 @@
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
       DCHECK_GE(byte_size, static_cast<size_t>(0));
-      DCHECK_ALIGNED(byte_size, kPageSize);
+      DCHECK_ALIGNED_PARAM(byte_size, kPageSize);
       return byte_size;
     }
     void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 5886497..22089f9 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -554,8 +554,8 @@
 }
 
 void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) {
-  DCHECK_ALIGNED(begin, kPageSize);
-  DCHECK_ALIGNED(len, kPageSize);
+  DCHECK_ALIGNED_PARAM(begin, kPageSize);
+  DCHECK_ALIGNED_PARAM(len, kPageSize);
   DCHECK_GE(len, kPMDSize);
   size_t alignment = BestPageTableAlignment(len);
   bool is_shared = false;
@@ -1950,7 +1950,7 @@
                                     size_t arr_len) {
   DCHECK(minor_fault_initialized_);
   DCHECK_LT(arr_idx, arr_len);
-  DCHECK_ALIGNED(to_space_start, kPageSize);
+  DCHECK_ALIGNED_PARAM(to_space_start, kPageSize);
   // Claim all the contiguous pages, which are ready to be mapped, and then do
   // so in a single ioctl. This helps avoid the overhead of invoking syscall
   // several times and also maps the already-processed pages, avoiding
@@ -1997,7 +1997,7 @@
       // Bail out by setting the remaining pages' state back to kProcessed and
       // then waking up any waiting threads.
       DCHECK_GE(uffd_continue.mapped, 0);
-      DCHECK_ALIGNED(uffd_continue.mapped, kPageSize);
+      DCHECK_ALIGNED_PARAM(uffd_continue.mapped, kPageSize);
       DCHECK_LT(uffd_continue.mapped, static_cast<ssize_t>(length));
       if (kFirstPageMapping) {
         // In this case the first page must be mapped.
@@ -2193,8 +2193,8 @@
   }
 
   DCHECK_NE(reclaim_begin, nullptr);
-  DCHECK_ALIGNED(reclaim_begin, kPageSize);
-  DCHECK_ALIGNED(last_reclaimed_page_, kPageSize);
+  DCHECK_ALIGNED_PARAM(reclaim_begin, kPageSize);
+  DCHECK_ALIGNED_PARAM(last_reclaimed_page_, kPageSize);
   // Check if the 'class_after_obj_map_' map allows pages to be freed.
   for (; class_after_obj_iter_ != class_after_obj_ordered_map_.rend(); class_after_obj_iter_++) {
     mirror::Object* klass = class_after_obj_iter_->first.AsMirrorPtr();
@@ -2663,7 +2663,7 @@
   void MultiObjectArena(uint8_t* page_begin, uint8_t* first_obj)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(first_obj != nullptr);
-    DCHECK_ALIGNED(page_begin, kPageSize);
+    DCHECK_ALIGNED_PARAM(page_begin, kPageSize);
     uint8_t* page_end = page_begin + kPageSize;
     uint32_t obj_size;
     for (uint8_t* byte = first_obj; byte < page_end;) {
@@ -3634,7 +3634,7 @@
         continue;
       }
       uint8_t* last_byte = pair.second;
-      DCHECK_ALIGNED(last_byte, kPageSize);
+      DCHECK_ALIGNED_PARAM(last_byte, kPageSize);
       others_processing = false;
       arena_begin = arena->Begin();
       arena_size = arena->Size();
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index aa94421..419b842 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -398,7 +398,7 @@
   memcpy(dest, src, page_remain);
   byte_src += page_remain;
   byte_dest += page_remain;
-  DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
+  DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
   while (byte_src + kPageSize < limit) {
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index a9402d2..2a32b9b 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -186,8 +186,8 @@
   SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
   DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize);
   DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize);
-  DCHECK_ALIGNED(begin_, kPageSize);
-  DCHECK_ALIGNED(End(), kPageSize);
+  DCHECK_ALIGNED_PARAM(begin_, kPageSize);
+  DCHECK_ALIGNED_PARAM(End(), kPageSize);
   size_t size = RoundUp(Size(), kPageSize);
   // Trimming the heap should be done by the caller since we may have invalidated the accounting
   // stored in between objects.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 9a66f8a..c25770c 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -405,8 +405,8 @@
   for (size_t i = 0u; i < num_regions_; ++i) {
     if (regions_[i].IsFree()) {
       uint8_t* begin = regions_[i].Begin();
-      DCHECK_ALIGNED(begin, kPageSize);
-      DCHECK_ALIGNED(regions_[i].End(), kPageSize);
+      DCHECK_ALIGNED_PARAM(begin, kPageSize);
+      DCHECK_ALIGNED_PARAM(regions_[i].End(), kPageSize);
       bool res = madvise(begin, regions_[i].End() - begin, MADV_DONTNEED);
       CHECK_NE(res, -1) << "madvise failed";
     }
diff --git a/runtime/image.cc b/runtime/image.cc
index 10c053f..170a576 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -105,7 +105,7 @@
   //       to be done in alignment with the dynamic linker's ELF loader as
   //       otherwise inconsistency would still be possible e.g. when using
   //       `dlopen`-like calls to load OAT files.
-  CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned";
+  CHECK_ALIGNED_PARAM(delta, kPageSize) << "relocation delta must be page aligned";
   oat_file_begin_ += delta;
   oat_data_begin_ += delta;
   oat_data_end_ += delta;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 67010f3..96e51ce 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -317,8 +317,8 @@
   uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
   uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
   DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
-  DCHECK_ALIGNED(release_end, kPageSize);
-  DCHECK_ALIGNED(release_end - release_start, kPageSize);
+  DCHECK_ALIGNED_PARAM(release_end, kPageSize);
+  DCHECK_ALIGNED_PARAM(release_end - release_start, kPageSize);
   if (release_start != release_end) {
     madvise(release_start, release_end - release_start, MADV_DONTNEED);
   }
diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc
index 15aaf5b..68359f2 100644
--- a/runtime/jni/local_reference_table.cc
+++ b/runtime/jni/local_reference_table.cc
@@ -628,8 +628,8 @@
       uint8_t* release_end = reinterpret_cast<uint8_t*>(&table[table_size]);
       DCHECK_GE(reinterpret_cast<uintptr_t>(release_end),
                 reinterpret_cast<uintptr_t>(release_start));
-      DCHECK_ALIGNED(release_end, kPageSize);
-      DCHECK_ALIGNED(release_end - release_start, kPageSize);
+      DCHECK_ALIGNED_PARAM(release_end, kPageSize);
+      DCHECK_ALIGNED_PARAM(release_end - release_start, kPageSize);
       if (release_start != release_end) {
         madvise(release_start, release_end - release_start, MADV_DONTNEED);
       }
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 92ac845..98af0cd 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -62,7 +62,7 @@
                                   /*low_4gb=*/ false,
                                   &error_msg);
     CHECK(stack_.IsValid()) << error_msg;
-    CHECK_ALIGNED(stack_.Begin(), kPageSize);
+    CHECK_ALIGNED_PARAM(stack_.Begin(), kPageSize);
     CheckedCall(mprotect,
                 "mprotect bottom page of thread pool worker stack",
                 stack_.Begin(),