Use (D)CHECK_ALIGNED more.

Change-Id: I9d740f6a88d01e028d4ddc3e4e62b0a73ea050af
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 436df92..86266e2 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -51,8 +51,8 @@
   void Clear(uint8_t* start_addr, uint8_t* end_addr) {
     DCHECK(IsValidHeapAddr(start_addr)) << start_addr;
     DCHECK(IsValidHeapAddr(end_addr)) << end_addr;
-    DCHECK(IsAligned<kRegionSize>(start_addr));
-    DCHECK(IsAligned<kRegionSize>(end_addr));
+    DCHECK_ALIGNED(start_addr, kRegionSize);
+    DCHECK_ALIGNED(end_addr, kRegionSize);
     uint8_t* entry_start = EntryFromAddr(start_addr);
     uint8_t* entry_end = EntryFromAddr(end_addr);
     memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 6546eb4..cdeaa50 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -79,7 +79,7 @@
 
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) {
-  DCHECK(IsAligned<kBitsPerIntPtrT * kAlignment>(new_end));
+  DCHECK_ALIGNED(new_end, kBitsPerIntPtrT * kAlignment);
   size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t);
   if (new_size < bitmap_size_) {
     bitmap_size_ = new_size;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index bd10f7b..abaa97f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -63,7 +63,7 @@
   DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
   DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
   CHECK_LE(capacity, max_capacity);
-  CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
+  CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
   if (!initialized_) {
     Initialize();
   }
@@ -349,7 +349,7 @@
     fpr->magic_num_ = kMagicNumFree;
   }
   fpr->SetByteSize(this, byte_size);
-  DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
+  DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
 
   DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
   if (!free_page_runs_.empty()) {
@@ -1567,7 +1567,7 @@
         FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
         DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
         size_t fpr_size = fpr->ByteSize(this);
-        DCHECK(IsAligned<kPageSize>(fpr_size));
+        DCHECK_ALIGNED(fpr_size, kPageSize);
         void* start = fpr;
         if (kIsDebugBuild) {
           // In the debug build, the first page of a free page run
@@ -1916,7 +1916,7 @@
           CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
               << "An empty page must belong to the free page run set";
           size_t fpr_size = fpr->ByteSize(this);
-          CHECK(IsAligned<kPageSize>(fpr_size))
+          CHECK_ALIGNED(fpr_size, kPageSize)
               << "A free page run size isn't page-aligned : " << fpr_size;
           size_t num_pages = fpr_size / kPageSize;
           CHECK_GT(num_pages, static_cast<uintptr_t>(0))
@@ -2163,7 +2163,7 @@
           // to the next page.
           if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
             size_t fpr_size = fpr->ByteSize(this);
-            DCHECK(IsAligned<kPageSize>(fpr_size));
+            DCHECK_ALIGNED(fpr_size, kPageSize);
             uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
             reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
             size_t pages = fpr_size / kPageSize;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b5d5c34..8bbace9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1585,7 +1585,7 @@
 // Fill the given memory block with a dummy object. Used to fill in a
 // copy of objects that was lost in race.
 void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
-  CHECK(IsAligned<kObjectAlignment>(byte_size));
+  CHECK_ALIGNED(byte_size, kObjectAlignment);
   memset(dummy_obj, 0, byte_size);
   mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
   CHECK(int_array_class != nullptr);
@@ -1618,7 +1618,7 @@
 // Reuse the memory blocks that were copy of objects that were lost in race.
 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
   // Try to reuse the blocks that were unused due to CAS failures.
-  CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
+  CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
   Thread* self = Thread::Current();
   size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
   MutexLock mu(self, skipped_blocks_lock_);
@@ -1637,7 +1637,7 @@
         // Not found.
         return nullptr;
       }
-      CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
+      CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
       CHECK_GE(it->first - alloc_size, min_object_size)
           << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
     }
@@ -1648,7 +1648,7 @@
   uint8_t* addr = it->second;
   CHECK_GE(byte_size, alloc_size);
   CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
-  CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
+  CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
   if (kVerboseMode) {
     LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
   }
@@ -1656,7 +1656,7 @@
   memset(addr, 0, byte_size);
   if (byte_size > alloc_size) {
     // Return the remainder to the map.
-    CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
+    CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
     CHECK_GE(byte_size - alloc_size, min_object_size);
     FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
                         byte_size - alloc_size);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index e0d6d6b..4eb15e2 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -831,8 +831,8 @@
       // Align up the end address. For example, the image space's end
       // may not be card-size-aligned.
       card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
-      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
-      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
+      DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
+      DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
       // Calculate how many bytes of heap we will scan,
       const size_t address_range = card_end - card_begin;
       // Calculate how much address range each task gets.
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 7b19dc9..a7de44f 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -34,7 +34,7 @@
   void operator()(const mirror::Object* obj) const {
     CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
     // Marking a large object, make sure its aligned as a sanity check.
-    CHECK(IsAligned<kPageSize>(obj));
+    CHECK_ALIGNED(obj, kPageSize);
   }
 
  private:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 795d2a2..2b94cf1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1431,10 +1431,10 @@
   if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
     return;
   }
-  CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+  CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
   CHECK(c != nullptr) << "Null class in object " << obj;
-  CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
+  CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
   CHECK(VerifyClassClass(c));
 
   if (verify_object_mode_ > kVerifyObjectModeFast) {
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index d9ad9a3..338a41e 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -63,7 +63,7 @@
 }
 
 inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   uint8_t* old_end;
   uint8_t* new_end;
   do {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a913e59..2798b21 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -440,7 +440,7 @@
       AllocationInfo* next_next_info = next_info->GetNextInfo();
       // Next next info can't be free since we always coalesce.
       DCHECK(!next_next_info->IsFree());
-      DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
+      DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
       new_free_info = next_next_info;
       new_free_size += next_next_info->GetPrevFreeBytes();
       RemoveFreePrev(next_next_info);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index b014217..3a0d814 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -46,8 +46,8 @@
   if (create_bitmaps) {
     size_t bitmap_index = bitmap_index_++;
     static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
-    CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
-    CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
     live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
         Begin(), NonGrowthLimitCapacity()));
@@ -164,10 +164,10 @@
   // alloc spaces.
   RevokeAllThreadLocalBuffers();
   SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
-  DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
-  DCHECK(IsAligned<accounting::CardTable::kCardSize>(End()));
-  DCHECK(IsAligned<kPageSize>(begin_));
-  DCHECK(IsAligned<kPageSize>(End()));
+  DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize);
+  DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize);
+  DCHECK_ALIGNED(begin_, kPageSize);
+  DCHECK_ALIGNED(End(), kPageSize);
   size_t size = RoundUp(Size(), kPageSize);
   // Trimming the heap should be done by the caller since we may have invalidated the accounting
   // stored in between objects.
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 1cdf69d..db005f7 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -43,7 +43,7 @@
 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
                                                     size_t* usable_size,
                                                     size_t* bytes_tl_bulk_allocated) {
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   mirror::Object* obj;
   if (LIKELY(num_bytes <= kRegionSize)) {
     // Non-large object.
@@ -115,7 +115,7 @@
                                                   size_t* usable_size,
                                                   size_t* bytes_tl_bulk_allocated) {
   DCHECK(IsAllocated() && IsInToSpace());
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
   uint8_t* old_top;
   uint8_t* new_top;
@@ -266,7 +266,7 @@
 mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
                                         size_t* usable_size,
                                         size_t* bytes_tl_bulk_allocated) {
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   DCHECK_GT(num_bytes, kRegionSize);
   size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
   DCHECK_GT(num_regs, 0U);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 814ab6c..9a2d0c6 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -287,7 +287,7 @@
 
 void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
   DCHECK(Contains(large_obj));
-  DCHECK(IsAligned<kRegionSize>(large_obj));
+  DCHECK_ALIGNED(large_obj, kRegionSize);
   MutexLock mu(Thread::Current(), region_lock_);
   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
   uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
@@ -366,7 +366,7 @@
   uint8_t* tlab_start = thread->GetTlabStart();
   DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
   if (tlab_start != nullptr) {
-    DCHECK(IsAligned<kRegionSize>(tlab_start));
+    DCHECK_ALIGNED(tlab_start, kRegionSize);
     Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
     DCHECK(r->IsAllocated());
     DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize);