Use (D)CHECK_ALIGNED more.
Change-Id: I9d740f6a88d01e028d4ddc3e4e62b0a73ea050af
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index aba3762..03980e3 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -66,7 +66,7 @@
while (max_ < new_max) {
// If we have reached the maximum number of buckets, merge buckets together.
if (frequency_.size() >= max_buckets_) {
- CHECK(IsAligned<2>(frequency_.size()));
+ CHECK_ALIGNED(frequency_.size(), 2);
// We double the width of each bucket to reduce the number of buckets by a factor of 2.
bucket_width_ *= 2;
const size_t limit = frequency_.size() / 2;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1ec02aa..122c35f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -249,13 +249,13 @@
if (!gaps->empty() && gaps->top().size >= n) {
FieldGap gap = gaps->top();
gaps->pop();
- DCHECK(IsAligned<n>(gap.start_offset));
+ DCHECK_ALIGNED(gap.start_offset, n);
field->SetOffset(MemberOffset(gap.start_offset));
if (gap.size > n) {
AddFieldGap(gap.start_offset + n, gap.start_offset + gap.size, gaps);
}
} else {
- DCHECK(IsAligned<n>(field_offset->Uint32Value()));
+ DCHECK_ALIGNED(field_offset->Uint32Value(), n);
field->SetOffset(*field_offset);
*field_offset = MemberOffset(field_offset->Uint32Value() + n);
}
@@ -5174,7 +5174,7 @@
field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4));
AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps);
}
- DCHECK(IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(field_offset.Uint32Value()));
+ DCHECK_ALIGNED(field_offset.Uint32Value(), sizeof(mirror::HeapReference<mirror::Object>));
grouped_and_sorted_fields.pop_front();
num_reference_fields++;
field->SetOffset(field_offset);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index bc3ba21..de4b3f4 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -93,7 +93,7 @@
// NOTE: Don't align the code (it will not be executed) but check that the Thumb2
// adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer().
- CHECK_EQ(mapping_table_offset & 1u, 0u);
+ CHECK_ALIGNED(mapping_table_offset, 2);
const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset];
method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 5f91566..47f9b1b 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -331,7 +331,7 @@
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
// TODO: Check linear alloc and image.
- DCHECK(IsAligned<sizeof(void*)>(ArtMethod::ObjectSize(sizeof(void*))))
+ DCHECK_ALIGNED(ArtMethod::ObjectSize(sizeof(void*)), sizeof(void*))
<< "ArtMethod is not pointer aligned";
if (method_obj == nullptr || !IsAligned<sizeof(void*)>(method_obj)) {
VLOG(signals) << "no method";
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 436df92..86266e2 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -51,8 +51,8 @@
void Clear(uint8_t* start_addr, uint8_t* end_addr) {
DCHECK(IsValidHeapAddr(start_addr)) << start_addr;
DCHECK(IsValidHeapAddr(end_addr)) << end_addr;
- DCHECK(IsAligned<kRegionSize>(start_addr));
- DCHECK(IsAligned<kRegionSize>(end_addr));
+ DCHECK_ALIGNED(start_addr, kRegionSize);
+ DCHECK_ALIGNED(end_addr, kRegionSize);
uint8_t* entry_start = EntryFromAddr(start_addr);
uint8_t* entry_end = EntryFromAddr(end_addr);
memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 6546eb4..cdeaa50 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -79,7 +79,7 @@
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) {
- DCHECK(IsAligned<kBitsPerIntPtrT * kAlignment>(new_end));
+ DCHECK_ALIGNED(new_end, kBitsPerIntPtrT * kAlignment);
size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t);
if (new_size < bitmap_size_) {
bitmap_size_ = new_size;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index bd10f7b..abaa97f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -63,7 +63,7 @@
DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
CHECK_LE(capacity, max_capacity);
- CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
+ CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
if (!initialized_) {
Initialize();
}
@@ -349,7 +349,7 @@
fpr->magic_num_ = kMagicNumFree;
}
fpr->SetByteSize(this, byte_size);
- DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
+ DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
if (!free_page_runs_.empty()) {
@@ -1567,7 +1567,7 @@
FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
size_t fpr_size = fpr->ByteSize(this);
- DCHECK(IsAligned<kPageSize>(fpr_size));
+ DCHECK_ALIGNED(fpr_size, kPageSize);
void* start = fpr;
if (kIsDebugBuild) {
// In the debug build, the first page of a free page run
@@ -1916,7 +1916,7 @@
CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
<< "An empty page must belong to the free page run set";
size_t fpr_size = fpr->ByteSize(this);
- CHECK(IsAligned<kPageSize>(fpr_size))
+ CHECK_ALIGNED(fpr_size, kPageSize)
<< "A free page run size isn't page-aligned : " << fpr_size;
size_t num_pages = fpr_size / kPageSize;
CHECK_GT(num_pages, static_cast<uintptr_t>(0))
@@ -2163,7 +2163,7 @@
// to the next page.
if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
size_t fpr_size = fpr->ByteSize(this);
- DCHECK(IsAligned<kPageSize>(fpr_size));
+ DCHECK_ALIGNED(fpr_size, kPageSize);
uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
size_t pages = fpr_size / kPageSize;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b5d5c34..8bbace9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1585,7 +1585,7 @@
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
- CHECK(IsAligned<kObjectAlignment>(byte_size));
+ CHECK_ALIGNED(byte_size, kObjectAlignment);
memset(dummy_obj, 0, byte_size);
mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
CHECK(int_array_class != nullptr);
@@ -1618,7 +1618,7 @@
// Reuse the memory blocks that were copy of objects that were lost in race.
mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
// Try to reuse the blocks that were unused due to CAS failures.
- CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
+ CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Thread* self = Thread::Current();
size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
MutexLock mu(self, skipped_blocks_lock_);
@@ -1637,7 +1637,7 @@
// Not found.
return nullptr;
}
- CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
+ CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
CHECK_GE(it->first - alloc_size, min_object_size)
<< "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
}
@@ -1648,7 +1648,7 @@
uint8_t* addr = it->second;
CHECK_GE(byte_size, alloc_size);
CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
- CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
+ CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
if (kVerboseMode) {
LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
}
@@ -1656,7 +1656,7 @@
memset(addr, 0, byte_size);
if (byte_size > alloc_size) {
// Return the remainder to the map.
- CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
+ CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
CHECK_GE(byte_size - alloc_size, min_object_size);
FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
byte_size - alloc_size);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index e0d6d6b..4eb15e2 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -831,8 +831,8 @@
// Align up the end address. For example, the image space's end
// may not be card-size-aligned.
card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
+ DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
+ DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
// Calculate how many bytes of heap we will scan,
const size_t address_range = card_end - card_begin;
// Calculate how much address range each task gets.
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 7b19dc9..a7de44f 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -34,7 +34,7 @@
void operator()(const mirror::Object* obj) const {
CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
// Marking a large object, make sure its aligned as a sanity check.
- CHECK(IsAligned<kPageSize>(obj));
+ CHECK_ALIGNED(obj, kPageSize);
}
private:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 795d2a2..2b94cf1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1431,10 +1431,10 @@
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
}
- CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+ CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
CHECK(c != nullptr) << "Null class in object " << obj;
- CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
+ CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
CHECK(VerifyClassClass(c));
if (verify_object_mode_ > kVerifyObjectModeFast) {
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index d9ad9a3..338a41e 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -63,7 +63,7 @@
}
inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
uint8_t* old_end;
uint8_t* new_end;
do {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a913e59..2798b21 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -440,7 +440,7 @@
AllocationInfo* next_next_info = next_info->GetNextInfo();
// Next next info can't be free since we always coalesce.
DCHECK(!next_next_info->IsFree());
- DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
+ DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
new_free_info = next_next_info;
new_free_size += next_next_info->GetPrevFreeBytes();
RemoveFreePrev(next_next_info);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index b014217..3a0d814 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -46,8 +46,8 @@
if (create_bitmaps) {
size_t bitmap_index = bitmap_index_++;
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
- CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
- CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
@@ -164,10 +164,10 @@
// alloc spaces.
RevokeAllThreadLocalBuffers();
SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(End()));
- DCHECK(IsAligned<kPageSize>(begin_));
- DCHECK(IsAligned<kPageSize>(End()));
+ DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize);
+ DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize);
+ DCHECK_ALIGNED(begin_, kPageSize);
+ DCHECK_ALIGNED(End(), kPageSize);
size_t size = RoundUp(Size(), kPageSize);
// Trimming the heap should be done by the caller since we may have invalidated the accounting
// stored in between objects.
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 1cdf69d..db005f7 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -43,7 +43,7 @@
inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
mirror::Object* obj;
if (LIKELY(num_bytes <= kRegionSize)) {
// Non-large object.
@@ -115,7 +115,7 @@
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAllocated() && IsInToSpace());
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
uint8_t* old_top;
uint8_t* new_top;
@@ -266,7 +266,7 @@
mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
DCHECK_GT(num_regs, 0U);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 814ab6c..9a2d0c6 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -287,7 +287,7 @@
void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
DCHECK(Contains(large_obj));
- DCHECK(IsAligned<kRegionSize>(large_obj));
+ DCHECK_ALIGNED(large_obj, kRegionSize);
MutexLock mu(Thread::Current(), region_lock_);
uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
@@ -366,7 +366,7 @@
uint8_t* tlab_start = thread->GetTlabStart();
DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
if (tlab_start != nullptr) {
- DCHECK(IsAligned<kRegionSize>(tlab_start));
+ DCHECK_ALIGNED(tlab_start, kRegionSize);
Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
DCHECK(r->IsAllocated());
DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a12a58d..776b6a3 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -446,10 +446,10 @@
return 3;
}
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
+ DCHECK_ALIGNED(keys, 4);
int32_t first_key = keys[0];
const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
- DCHECK(IsAligned<4>(targets));
+ DCHECK_ALIGNED(targets, 4);
int32_t index = test_val - first_key;
if (index >= 0 && index < size) {
return targets[index];
@@ -474,9 +474,9 @@
return 3;
}
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
+ DCHECK_ALIGNED(keys, 4);
const int32_t* entries = keys + size;
- DCHECK(IsAligned<4>(entries));
+ DCHECK_ALIGNED(entries, 4);
int lo = 0;
int hi = size - 1;
while (lo <= hi) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index a290575..245f8b8 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -118,7 +118,7 @@
}
static LockWord FromForwardingAddress(size_t target) {
- DCHECK(IsAligned < 1 << kStateSize>(target));
+ DCHECK_ALIGNED(target, (1 << kStateSize));
return LockWord((target >> kStateSize) | (kStateForwardingAddress << kStateShift));
}
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index dbae7f8..8df8f96 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -585,10 +585,10 @@
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
- DCHECK(IsAligned<kPageSize>(begin_));
- DCHECK(IsAligned<kPageSize>(base_begin_));
- DCHECK(IsAligned<kPageSize>(reinterpret_cast<uint8_t*>(base_begin_) + base_size_));
- DCHECK(IsAligned<kPageSize>(new_end));
+ DCHECK_ALIGNED(begin_, kPageSize);
+ DCHECK_ALIGNED(base_begin_, kPageSize);
+ DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
+ DCHECK_ALIGNED(new_end, kPageSize);
uint8_t* old_end = begin_ + size_;
uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
uint8_t* new_base_end = new_end;
@@ -603,7 +603,7 @@
uint8_t* tail_base_begin = new_base_end;
size_t tail_base_size = old_base_end - new_base_end;
DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
- DCHECK(IsAligned<kPageSize>(tail_base_size));
+ DCHECK_ALIGNED(tail_base_size, kPageSize);
#ifdef USE_ASHMEM
// android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
@@ -726,7 +726,7 @@
size_t num_gaps = 0;
size_t num = 1u;
size_t size = map->BaseSize();
- CHECK(IsAligned<kPageSize>(size));
+ CHECK_ALIGNED(size, kPageSize);
void* end = map->BaseEnd();
while (it != maps_end &&
it->second->GetProtect() == map->GetProtect() &&
@@ -740,12 +740,12 @@
}
size_t gap =
reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
- CHECK(IsAligned<kPageSize>(gap));
+ CHECK_ALIGNED(gap, kPageSize);
os << "~0x" << std::hex << (gap / kPageSize) << "P";
num = 0u;
size = 0u;
}
- CHECK(IsAligned<kPageSize>(it->second->BaseSize()));
+ CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
++num;
size += it->second->BaseSize();
end = it->second->BaseEnd();
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 1dd2aad..5725b6f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -97,7 +97,7 @@
image_file_location_oat_checksum_ = image_file_location_oat_checksum;
UpdateChecksum(&image_file_location_oat_checksum_, sizeof(image_file_location_oat_checksum_));
- CHECK(IsAligned<kPageSize>(image_file_location_oat_data_begin));
+ CHECK_ALIGNED(image_file_location_oat_data_begin, kPageSize);
image_file_location_oat_data_begin_ = image_file_location_oat_data_begin;
UpdateChecksum(&image_file_location_oat_data_begin_, sizeof(image_file_location_oat_data_begin_));
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 6f3b0a3..fede91c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -904,7 +904,7 @@
CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
}
}
- DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK_NE(reg, -1);
int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
+ POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)