Use (D)CHECK_ALIGNED more.
Change-Id: I9d740f6a88d01e028d4ddc3e4e62b0a73ea050af
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index df4a9f2..5f911db 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1298,7 +1298,7 @@
*/
delta &= ~0x3;
}
- DCHECK_EQ((delta & 0x3), 0);
+ DCHECK_ALIGNED(delta, 4);
// First, a sanity check for cases we shouldn't see now
if (kIsDebugBuild && (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) ||
((lir->opcode == kThumbLdrPcRel) && (delta > 1020)))) {
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 2ef92f8..062f7af 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -880,7 +880,7 @@
LIR* ArmMir2Lir::LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStorage r_base,
int displacement, RegStorage r_src_dest,
RegStorage r_work) {
- DCHECK_EQ(displacement & 3, 0);
+ DCHECK_ALIGNED(displacement, 4);
constexpr int kOffsetMask = 0xff << 2;
int encoded_disp = (displacement & kOffsetMask) >> 2; // Within range of the instruction.
RegStorage r_ptr = r_base;
@@ -942,7 +942,7 @@
already_generated = true;
break;
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
scale = 2;
if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
(displacement >= 0)) {
@@ -959,14 +959,14 @@
}
break;
case kUnsignedHalf:
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
scale = 1;
short_form = all_low && (displacement >> (5 + scale)) == 0;
opcode16 = kThumbLdrhRRI5;
opcode32 = kThumb2LdrhRRI12;
break;
case kSignedHalf:
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
scale = 1;
DCHECK_EQ(opcode16, kThumbBkpt); // Not available.
opcode32 = kThumb2LdrshRRI12;
@@ -1096,7 +1096,7 @@
already_generated = true;
break;
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
scale = 2;
if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
short_form = true;
@@ -1109,7 +1109,7 @@
break;
case kUnsignedHalf:
case kSignedHalf:
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
scale = 1;
short_form = all_low && (displacement >> (5 + scale)) == 0;
opcode16 = kThumbStrhRRI5;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index b78fb80..25c69d1 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -909,7 +909,7 @@
CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
int32_t delta = target - pc;
- DCHECK_EQ(delta & 0x3, 0);
+ DCHECK_ALIGNED(delta, 4);
if (!IS_SIGNED_IMM26(delta >> 2)) {
LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
}
@@ -933,7 +933,7 @@
CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
int32_t delta = target - pc;
- DCHECK_EQ(delta & 0x3, 0);
+ DCHECK_ALIGNED(delta, 4);
if (!IS_SIGNED_IMM19(delta >> 2)) {
LOG(FATAL) << "Invalid jump range in kFixupLoad";
}
@@ -965,7 +965,7 @@
CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
int32_t delta = target - pc;
- DCHECK_EQ(delta & 0x3, 0);
+ DCHECK_ALIGNED(delta, 4);
// Check if branch offset can be encoded in tbz/tbnz.
if (!IS_SIGNED_IMM14(delta >> 2)) {
DexOffset dalvik_offset = lir->dalvik_offset;
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 37e5804..ec2475a 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -714,7 +714,7 @@
} else {
opcode = kMipsFldc1;
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
break;
}
is64bit = true;
@@ -736,15 +736,15 @@
DCHECK(r_dest.IsDouble());
}
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
break;
case kUnsignedHalf:
opcode = kMipsLhu;
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
break;
case kSignedHalf:
opcode = kMipsLh;
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
break;
case kUnsignedByte:
opcode = kMipsLbu;
@@ -891,7 +891,7 @@
} else {
opcode = kMipsFsdc1;
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
break;
}
is64bit = true;
@@ -913,12 +913,12 @@
DCHECK(r_src.IsDouble());
}
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
break;
case kUnsignedHalf:
case kSignedHalf:
opcode = kMipsSh;
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
break;
case kUnsignedByte:
case kSignedByte:
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 61a1bec..b16ae98 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -659,7 +659,7 @@
opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
}
// TODO: double store is to unaligned address
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
break;
case kWord:
if (cu_->target64) {
@@ -677,15 +677,15 @@
opcode = is_array ? kX86MovssRA : kX86MovssRM;
DCHECK(r_dest.IsFloat());
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
break;
case kUnsignedHalf:
opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
break;
case kSignedHalf:
opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
break;
case kUnsignedByte:
opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
@@ -812,7 +812,7 @@
opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
}
// TODO: double store is to unaligned address
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
break;
case kWord:
if (cu_->target64) {
@@ -831,13 +831,13 @@
opcode = is_array ? kX86MovssAR : kX86MovssMR;
DCHECK(r_src.IsSingle());
}
- DCHECK_EQ((displacement & 0x3), 0);
+ DCHECK_ALIGNED(displacement, 4);
consider_non_temporal = true;
break;
case kUnsignedHalf:
case kSignedHalf:
opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
- DCHECK_EQ((displacement & 0x1), 0);
+ DCHECK_ALIGNED(displacement, 2);
break;
case kUnsignedByte:
case kSignedByte:
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index b4aa286..13f67e6 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -50,7 +50,7 @@
// We want to put the method3 at a very precise offset.
const uint32_t method3_offset = method1_offset + distance_without_thunks;
- CHECK(IsAligned<kArmAlignment>(method3_offset - sizeof(OatQuickMethodHeader)));
+ CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment);
// Calculate size of method2 so that we put method3 at the correct place.
const uint32_t method2_offset =
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 29355d6..6b9c530 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -108,7 +108,7 @@
if (!current_method_thunks_.empty()) {
uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64);
if (kIsDebugBuild) {
- CHECK(IsAligned<kAdrpThunkSize>(current_method_thunks_.size()));
+ CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
CHECK_LE(num_thunks, processed_adrp_thunks_);
for (size_t i = 0u; i != num_thunks; ++i) {
@@ -203,7 +203,7 @@
if ((adrp & 0x9f000000u) != 0x90000000u) {
CHECK(fix_cortex_a53_843419_);
CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B <thunk>
- CHECK(IsAligned<kAdrpThunkSize>(current_method_thunks_.size()));
+ CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
CHECK_LE(num_thunks, processed_adrp_thunks_);
uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset;
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 1bad8a9..b3af4c6 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -66,7 +66,7 @@
// We want to put the method3 at a very precise offset.
const uint32_t last_method_offset = method1_offset + distance_without_thunks;
const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
- CHECK(IsAligned<kArm64Alignment>(gap_end));
+ CHECK_ALIGNED(gap_end, kArm64Alignment);
// Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB).
// (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 09d2270..0e3e08c 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -252,11 +252,11 @@
if (offset_ < 0) {
int32_t off = -offset_;
CHECK_LT(off, 1024);
- CHECK_EQ((off & 3 /* 0b11 */), 0); // Must be multiple of 4.
+ CHECK_ALIGNED(off, 4);
encoding = (am ^ (1 << kUShift)) | off >> 2; // Flip U to adjust sign.
} else {
CHECK_LT(offset_, 1024);
- CHECK_EQ((offset_ & 3 /* 0b11 */), 0); // Must be multiple of 4.
+ CHECK_ALIGNED(offset_, 4);
encoding = am | offset_ >> 2;
}
encoding |= static_cast<uint32_t>(rn_) << 16;
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 88b2f2c..5843886 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -101,7 +101,7 @@
}
// Adjust literal pool labels for padding.
- DCHECK_EQ(current_code_size & 1u, 0u);
+ DCHECK_ALIGNED(current_code_size, 2);
uint32_t literals_adjustment = current_code_size + (current_code_size & 2) - buffer_.Size();
if (literals_adjustment != 0u) {
for (Literal& literal : literals_) {
@@ -152,7 +152,7 @@
// Load literal instructions (LDR, LDRD, VLDR) require 4-byte alignment.
// We don't support byte and half-word literals.
uint32_t code_size = buffer_.Size();
- DCHECK_EQ(code_size & 1u, 0u);
+ DCHECK_ALIGNED(code_size, 2);
if ((code_size & 2u) != 0u) {
Emit16(0);
}
@@ -168,7 +168,7 @@
}
inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) {
- DCHECK_EQ(offset & 1, 0);
+ DCHECK_ALIGNED(offset, 2);
int16_t encoding = B15 | B14;
if (cond != AL) {
DCHECK(IsInt<9>(offset));
@@ -181,7 +181,7 @@
}
inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) {
- DCHECK_EQ(offset & 1, 0);
+ DCHECK_ALIGNED(offset, 2);
int32_t s = (offset >> 31) & 1; // Sign bit.
int32_t encoding = B31 | B30 | B29 | B28 | B15 |
(s << 26) | // Sign bit goes to bit 26.
@@ -205,7 +205,7 @@
inline int16_t Thumb2Assembler::CbxzEncoding16(Register rn, int32_t offset, Condition cond) {
DCHECK(!IsHighRegister(rn));
- DCHECK_EQ(offset & 1, 0);
+ DCHECK_ALIGNED(offset, 2);
DCHECK(IsUint<7>(offset));
DCHECK(cond == EQ || cond == NE);
return B15 | B13 | B12 | B8 | (cond == NE ? B11 : 0) | static_cast<int32_t>(rn) |
@@ -250,7 +250,7 @@
inline int16_t Thumb2Assembler::LdrLitEncoding16(Register rt, int32_t offset) {
DCHECK(!IsHighRegister(rt));
- DCHECK_EQ(offset & 3, 0);
+ DCHECK_ALIGNED(offset, 4);
DCHECK(IsUint<10>(offset));
return B14 | B11 | (static_cast<int32_t>(rt) << 8) | (offset >> 2);
}
@@ -261,7 +261,7 @@
}
inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset) {
- DCHECK_EQ(offset & 3, 0);
+ DCHECK_ALIGNED(offset, 4);
CHECK(IsUint<10>(offset));
return B31 | B30 | B29 | B27 |
B24 /* P = 1 */ | B23 /* U = 1 */ | B22 | 0 /* W = 0 */ | B20 |
@@ -270,7 +270,7 @@
}
inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32_t offset) {
- DCHECK_EQ(offset & 3, 0);
+ DCHECK_ALIGNED(offset, 4);
CHECK(IsUint<10>(offset));
return B31 | B30 | B29 | B27 | B26 | B24 |
B23 /* U = 1 */ | B20 | B11 | B9 |
@@ -281,7 +281,7 @@
}
inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32_t offset) {
- DCHECK_EQ(offset & 3, 0);
+ DCHECK_ALIGNED(offset, 4);
CHECK(IsUint<10>(offset));
return B31 | B30 | B29 | B27 | B26 | B24 |
B23 /* U = 1 */ | B20 | B11 | B9 | B8 |
@@ -294,7 +294,7 @@
inline int16_t Thumb2Assembler::LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset) {
DCHECK(!IsHighRegister(rt));
DCHECK(!IsHighRegister(rn));
- DCHECK_EQ(offset & 3, 0);
+ DCHECK_ALIGNED(offset, 4);
DCHECK(IsUint<7>(offset));
return B14 | B13 | B11 |
(static_cast<int32_t>(rn) << 3) | static_cast<int32_t>(rt) |
@@ -1423,7 +1423,7 @@
thumb_opcode = 3U /* 0b11 */;
opcode_shift = 12;
CHECK_LT(immediate, (1u << 9));
- CHECK_EQ((immediate & 3u /* 0b11 */), 0u);
+ CHECK_ALIGNED(immediate, 4);
// Remove rd and rn from instruction by orring it with immed and clearing bits.
rn = R0;
@@ -1437,7 +1437,7 @@
thumb_opcode = 5U /* 0b101 */;
opcode_shift = 11;
CHECK_LT(immediate, (1u << 10));
- CHECK_EQ((immediate & 3u /* 0b11 */), 0u);
+ CHECK_ALIGNED(immediate, 4);
// Remove rn from instruction.
rn = R0;
@@ -1474,7 +1474,7 @@
thumb_opcode = 0x61 /* 0b1100001 */;
opcode_shift = 7;
CHECK_LT(immediate, (1u << 9));
- CHECK_EQ((immediate & 3u /* 0b11 */), 0u);
+ CHECK_ALIGNED(immediate, 4);
// Remove rd and rn from instruction by orring it with immed and clearing bits.
rn = R0;
@@ -1652,7 +1652,7 @@
inline size_t Thumb2Assembler::Fixup::LiteralPoolPaddingSize(uint32_t current_code_size) {
// The code size must be a multiple of 2.
- DCHECK_EQ(current_code_size & 1u, 0u);
+ DCHECK_ALIGNED(current_code_size, 2);
// If it isn't a multiple of 4, we need to add a 2-byte padding before the literal pool.
return current_code_size & 2;
}
@@ -1697,7 +1697,7 @@
// Load literal instructions round down the PC+4 to a multiple of 4, so if the PC
// isn't a multiple of 2, we need to adjust. Since we already adjusted for the target
// being aligned, current PC alignment can be inferred from diff.
- DCHECK_EQ(diff & 1, 0);
+ DCHECK_ALIGNED(diff, 2);
diff = diff + (diff & 2);
DCHECK_GE(diff, 0);
break;
@@ -2045,7 +2045,7 @@
if (sp_relative) {
// SP relative, 10 bit offset.
CHECK_LT(offset, (1 << 10));
- CHECK_EQ((offset & 3 /* 0b11 */), 0);
+ CHECK_ALIGNED(offset, 4);
encoding |= rd << 8 | offset >> 2;
} else {
// No SP relative. The offset is shifted right depending on
@@ -2058,12 +2058,12 @@
} else if (half) {
// 6 bit offset, shifted by 1.
CHECK_LT(offset, (1 << 6));
- CHECK_EQ((offset & 1 /* 0b1 */), 0);
+ CHECK_ALIGNED(offset, 2);
offset >>= 1;
} else {
// 7 bit offset, shifted by 2.
CHECK_LT(offset, (1 << 7));
- CHECK_EQ((offset & 3 /* 0b11 */), 0);
+ CHECK_ALIGNED(offset, 4);
offset >>= 2;
}
encoding |= rn << 3 | offset << 6;
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index aba3762..03980e3 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -66,7 +66,7 @@
while (max_ < new_max) {
// If we have reached the maximum number of buckets, merge buckets together.
if (frequency_.size() >= max_buckets_) {
- CHECK(IsAligned<2>(frequency_.size()));
+ CHECK_ALIGNED(frequency_.size(), 2);
// We double the width of each bucket to reduce the number of buckets by a factor of 2.
bucket_width_ *= 2;
const size_t limit = frequency_.size() / 2;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1ec02aa..122c35f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -249,13 +249,13 @@
if (!gaps->empty() && gaps->top().size >= n) {
FieldGap gap = gaps->top();
gaps->pop();
- DCHECK(IsAligned<n>(gap.start_offset));
+ DCHECK_ALIGNED(gap.start_offset, n);
field->SetOffset(MemberOffset(gap.start_offset));
if (gap.size > n) {
AddFieldGap(gap.start_offset + n, gap.start_offset + gap.size, gaps);
}
} else {
- DCHECK(IsAligned<n>(field_offset->Uint32Value()));
+ DCHECK_ALIGNED(field_offset->Uint32Value(), n);
field->SetOffset(*field_offset);
*field_offset = MemberOffset(field_offset->Uint32Value() + n);
}
@@ -5174,7 +5174,7 @@
field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4));
AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps);
}
- DCHECK(IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(field_offset.Uint32Value()));
+ DCHECK_ALIGNED(field_offset.Uint32Value(), sizeof(mirror::HeapReference<mirror::Object>));
grouped_and_sorted_fields.pop_front();
num_reference_fields++;
field->SetOffset(field_offset);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index bc3ba21..de4b3f4 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -93,7 +93,7 @@
// NOTE: Don't align the code (it will not be executed) but check that the Thumb2
// adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer().
- CHECK_EQ(mapping_table_offset & 1u, 0u);
+ CHECK_ALIGNED(mapping_table_offset, 2);
const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset];
method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 5f91566..47f9b1b 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -331,7 +331,7 @@
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
// TODO: Check linear alloc and image.
- DCHECK(IsAligned<sizeof(void*)>(ArtMethod::ObjectSize(sizeof(void*))))
+ DCHECK_ALIGNED(ArtMethod::ObjectSize(sizeof(void*)), sizeof(void*))
<< "ArtMethod is not pointer aligned";
if (method_obj == nullptr || !IsAligned<sizeof(void*)>(method_obj)) {
VLOG(signals) << "no method";
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 436df92..86266e2 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -51,8 +51,8 @@
void Clear(uint8_t* start_addr, uint8_t* end_addr) {
DCHECK(IsValidHeapAddr(start_addr)) << start_addr;
DCHECK(IsValidHeapAddr(end_addr)) << end_addr;
- DCHECK(IsAligned<kRegionSize>(start_addr));
- DCHECK(IsAligned<kRegionSize>(end_addr));
+ DCHECK_ALIGNED(start_addr, kRegionSize);
+ DCHECK_ALIGNED(end_addr, kRegionSize);
uint8_t* entry_start = EntryFromAddr(start_addr);
uint8_t* entry_end = EntryFromAddr(end_addr);
memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 6546eb4..cdeaa50 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -79,7 +79,7 @@
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) {
- DCHECK(IsAligned<kBitsPerIntPtrT * kAlignment>(new_end));
+ DCHECK_ALIGNED(new_end, kBitsPerIntPtrT * kAlignment);
size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t);
if (new_size < bitmap_size_) {
bitmap_size_ = new_size;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index bd10f7b..abaa97f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -63,7 +63,7 @@
DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
CHECK_LE(capacity, max_capacity);
- CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
+ CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
if (!initialized_) {
Initialize();
}
@@ -349,7 +349,7 @@
fpr->magic_num_ = kMagicNumFree;
}
fpr->SetByteSize(this, byte_size);
- DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
+ DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
if (!free_page_runs_.empty()) {
@@ -1567,7 +1567,7 @@
FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
size_t fpr_size = fpr->ByteSize(this);
- DCHECK(IsAligned<kPageSize>(fpr_size));
+ DCHECK_ALIGNED(fpr_size, kPageSize);
void* start = fpr;
if (kIsDebugBuild) {
// In the debug build, the first page of a free page run
@@ -1916,7 +1916,7 @@
CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
<< "An empty page must belong to the free page run set";
size_t fpr_size = fpr->ByteSize(this);
- CHECK(IsAligned<kPageSize>(fpr_size))
+ CHECK_ALIGNED(fpr_size, kPageSize)
<< "A free page run size isn't page-aligned : " << fpr_size;
size_t num_pages = fpr_size / kPageSize;
CHECK_GT(num_pages, static_cast<uintptr_t>(0))
@@ -2163,7 +2163,7 @@
// to the next page.
if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
size_t fpr_size = fpr->ByteSize(this);
- DCHECK(IsAligned<kPageSize>(fpr_size));
+ DCHECK_ALIGNED(fpr_size, kPageSize);
uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
size_t pages = fpr_size / kPageSize;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b5d5c34..8bbace9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1585,7 +1585,7 @@
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
- CHECK(IsAligned<kObjectAlignment>(byte_size));
+ CHECK_ALIGNED(byte_size, kObjectAlignment);
memset(dummy_obj, 0, byte_size);
mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
CHECK(int_array_class != nullptr);
@@ -1618,7 +1618,7 @@
// Reuse the memory blocks that were copy of objects that were lost in race.
mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
// Try to reuse the blocks that were unused due to CAS failures.
- CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
+ CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Thread* self = Thread::Current();
size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
MutexLock mu(self, skipped_blocks_lock_);
@@ -1637,7 +1637,7 @@
// Not found.
return nullptr;
}
- CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
+ CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
CHECK_GE(it->first - alloc_size, min_object_size)
<< "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
}
@@ -1648,7 +1648,7 @@
uint8_t* addr = it->second;
CHECK_GE(byte_size, alloc_size);
CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
- CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
+ CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
if (kVerboseMode) {
LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
}
@@ -1656,7 +1656,7 @@
memset(addr, 0, byte_size);
if (byte_size > alloc_size) {
// Return the remainder to the map.
- CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
+ CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
CHECK_GE(byte_size - alloc_size, min_object_size);
FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
byte_size - alloc_size);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index e0d6d6b..4eb15e2 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -831,8 +831,8 @@
// Align up the end address. For example, the image space's end
// may not be card-size-aligned.
card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
+ DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
+ DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
// Calculate how many bytes of heap we will scan,
const size_t address_range = card_end - card_begin;
// Calculate how much address range each task gets.
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 7b19dc9..a7de44f 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -34,7 +34,7 @@
void operator()(const mirror::Object* obj) const {
CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
// Marking a large object, make sure its aligned as a sanity check.
- CHECK(IsAligned<kPageSize>(obj));
+ CHECK_ALIGNED(obj, kPageSize);
}
private:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 795d2a2..2b94cf1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1431,10 +1431,10 @@
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
}
- CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+ CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
CHECK(c != nullptr) << "Null class in object " << obj;
- CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
+ CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
CHECK(VerifyClassClass(c));
if (verify_object_mode_ > kVerifyObjectModeFast) {
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index d9ad9a3..338a41e 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -63,7 +63,7 @@
}
inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
uint8_t* old_end;
uint8_t* new_end;
do {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a913e59..2798b21 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -440,7 +440,7 @@
AllocationInfo* next_next_info = next_info->GetNextInfo();
// Next next info can't be free since we always coalesce.
DCHECK(!next_next_info->IsFree());
- DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
+ DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
new_free_info = next_next_info;
new_free_size += next_next_info->GetPrevFreeBytes();
RemoveFreePrev(next_next_info);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index b014217..3a0d814 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -46,8 +46,8 @@
if (create_bitmaps) {
size_t bitmap_index = bitmap_index_++;
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
- CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
- CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
@@ -164,10 +164,10 @@
// alloc spaces.
RevokeAllThreadLocalBuffers();
SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
- DCHECK(IsAligned<accounting::CardTable::kCardSize>(End()));
- DCHECK(IsAligned<kPageSize>(begin_));
- DCHECK(IsAligned<kPageSize>(End()));
+ DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize);
+ DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize);
+ DCHECK_ALIGNED(begin_, kPageSize);
+ DCHECK_ALIGNED(End(), kPageSize);
size_t size = RoundUp(Size(), kPageSize);
// Trimming the heap should be done by the caller since we may have invalidated the accounting
// stored in between objects.
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 1cdf69d..db005f7 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -43,7 +43,7 @@
inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
mirror::Object* obj;
if (LIKELY(num_bytes <= kRegionSize)) {
// Non-large object.
@@ -115,7 +115,7 @@
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAllocated() && IsInToSpace());
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
uint8_t* old_top;
uint8_t* new_top;
@@ -266,7 +266,7 @@
mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
- DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_ALIGNED(num_bytes, kAlignment);
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
DCHECK_GT(num_regs, 0U);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 814ab6c..9a2d0c6 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -287,7 +287,7 @@
void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
DCHECK(Contains(large_obj));
- DCHECK(IsAligned<kRegionSize>(large_obj));
+ DCHECK_ALIGNED(large_obj, kRegionSize);
MutexLock mu(Thread::Current(), region_lock_);
uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
@@ -366,7 +366,7 @@
uint8_t* tlab_start = thread->GetTlabStart();
DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
if (tlab_start != nullptr) {
- DCHECK(IsAligned<kRegionSize>(tlab_start));
+ DCHECK_ALIGNED(tlab_start, kRegionSize);
Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
DCHECK(r->IsAllocated());
DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a12a58d..776b6a3 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -446,10 +446,10 @@
return 3;
}
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
+ DCHECK_ALIGNED(keys, 4);
int32_t first_key = keys[0];
const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
- DCHECK(IsAligned<4>(targets));
+ DCHECK_ALIGNED(targets, 4);
int32_t index = test_val - first_key;
if (index >= 0 && index < size) {
return targets[index];
@@ -474,9 +474,9 @@
return 3;
}
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
+ DCHECK_ALIGNED(keys, 4);
const int32_t* entries = keys + size;
- DCHECK(IsAligned<4>(entries));
+ DCHECK_ALIGNED(entries, 4);
int lo = 0;
int hi = size - 1;
while (lo <= hi) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index a290575..245f8b8 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -118,7 +118,7 @@
}
static LockWord FromForwardingAddress(size_t target) {
- DCHECK(IsAligned < 1 << kStateSize>(target));
+ DCHECK_ALIGNED(target, (1 << kStateSize));
return LockWord((target >> kStateSize) | (kStateForwardingAddress << kStateShift));
}
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index dbae7f8..8df8f96 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -585,10 +585,10 @@
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
- DCHECK(IsAligned<kPageSize>(begin_));
- DCHECK(IsAligned<kPageSize>(base_begin_));
- DCHECK(IsAligned<kPageSize>(reinterpret_cast<uint8_t*>(base_begin_) + base_size_));
- DCHECK(IsAligned<kPageSize>(new_end));
+ DCHECK_ALIGNED(begin_, kPageSize);
+ DCHECK_ALIGNED(base_begin_, kPageSize);
+ DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
+ DCHECK_ALIGNED(new_end, kPageSize);
uint8_t* old_end = begin_ + size_;
uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
uint8_t* new_base_end = new_end;
@@ -603,7 +603,7 @@
uint8_t* tail_base_begin = new_base_end;
size_t tail_base_size = old_base_end - new_base_end;
DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
- DCHECK(IsAligned<kPageSize>(tail_base_size));
+ DCHECK_ALIGNED(tail_base_size, kPageSize);
#ifdef USE_ASHMEM
// android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
@@ -726,7 +726,7 @@
size_t num_gaps = 0;
size_t num = 1u;
size_t size = map->BaseSize();
- CHECK(IsAligned<kPageSize>(size));
+ CHECK_ALIGNED(size, kPageSize);
void* end = map->BaseEnd();
while (it != maps_end &&
it->second->GetProtect() == map->GetProtect() &&
@@ -740,12 +740,12 @@
}
size_t gap =
reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
- CHECK(IsAligned<kPageSize>(gap));
+ CHECK_ALIGNED(gap, kPageSize);
os << "~0x" << std::hex << (gap / kPageSize) << "P";
num = 0u;
size = 0u;
}
- CHECK(IsAligned<kPageSize>(it->second->BaseSize()));
+ CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
++num;
size += it->second->BaseSize();
end = it->second->BaseEnd();
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 1dd2aad..5725b6f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -97,7 +97,7 @@
image_file_location_oat_checksum_ = image_file_location_oat_checksum;
UpdateChecksum(&image_file_location_oat_checksum_, sizeof(image_file_location_oat_checksum_));
- CHECK(IsAligned<kPageSize>(image_file_location_oat_data_begin));
+ CHECK_ALIGNED(image_file_location_oat_data_begin, kPageSize);
image_file_location_oat_data_begin_ = image_file_location_oat_data_begin;
UpdateChecksum(&image_file_location_oat_data_begin_, sizeof(image_file_location_oat_data_begin_));
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 6f3b0a3..fede91c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -904,7 +904,7 @@
CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
}
}
- DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK_NE(reg, -1);
int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
+ POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)