Update compiler/ implications to use (D)CHECK_IMPLIES
Follow-up to aosp/1988868 in which we added the (D)CHECK_IMPLIES
macro. This CL uses it on compiler/ occurrences found by a regex.
Test: art/test/testrunner/testrunner.py --host --64 --optimizing -b
Change-Id: If63aed969bfb8b31d6fbbcb3bca2b04314c894b7
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index b0f025d..3201965 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -375,10 +375,10 @@
return false;
}
static_assert(kMaxConstructorIPuts == 3, "Unexpected limit"); // Code below depends on this.
- DCHECK(iputs[0].field_index != DexFile::kDexNoIndex16 ||
- iputs[1].field_index == DexFile::kDexNoIndex16);
- DCHECK(iputs[1].field_index != DexFile::kDexNoIndex16 ||
- iputs[2].field_index == DexFile::kDexNoIndex16);
+ DCHECK_IMPLIES(iputs[0].field_index == DexFile::kDexNoIndex16,
+ iputs[1].field_index == DexFile::kDexNoIndex16);
+ DCHECK_IMPLIES(iputs[1].field_index == DexFile::kDexNoIndex16,
+ iputs[2].field_index == DexFile::kDexNoIndex16);
#define STORE_IPUT(n) \
do { \
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index be519c1..2a33858 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -396,7 +396,7 @@
// 4.5. Fix differences in result widths.
if (main_jni_conv->RequiresSmallResultTypeExtension()) {
DCHECK(main_jni_conv->HasSmallReturnType());
- CHECK(!is_critical_native || !main_jni_conv->UseTailCall());
+ CHECK_IMPLIES(is_critical_native, !main_jni_conv->UseTailCall());
if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
__ SignExtend(main_jni_conv->ReturnRegister(),
@@ -418,7 +418,7 @@
// If they differ, only then do we have to do anything about it.
// Otherwise the return value is already in the right place when we return.
if (!jni_return_reg.Equals(mr_return_reg)) {
- CHECK(!is_critical_native || !main_jni_conv->UseTailCall());
+ CHECK_IMPLIES(is_critical_native, !main_jni_conv->UseTailCall());
// This is typically only necessary on ARM32 due to native being softfloat
// while managed is hardfloat.
// -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0.
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index a8149f9..2d02e9f 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -595,7 +595,7 @@
// Helper method to assign a new range to an instruction in given basic block.
void AssignRange(HBasicBlock* basic_block, HInstruction* instruction, ValueRange* range) {
- DCHECK(!range->IsMonotonicValueRange() || instruction->IsLoopHeaderPhi());
+ DCHECK_IMPLIES(range->IsMonotonicValueRange(), instruction->IsLoopHeaderPhi());
GetValueRangeMap(basic_block)->Overwrite(instruction->GetId(), range);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index cf59be8..d81a7b5 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -498,7 +498,7 @@
static bool StoreNeedsWriteBarrier(DataType::Type type, HInstruction* value) {
// Check that null value is not represented as an integer constant.
- DCHECK(type != DataType::Type::kReference || !value->IsIntConstant());
+ DCHECK_IMPLIES(type == DataType::Type::kReference, !value->IsIntConstant());
return type == DataType::Type::kReference && !value->IsNullConstant();
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 548b2d4..fc1c07d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2418,9 +2418,9 @@
// operand. Note that VIXL would still manage if it was passed by generating
// the extension as a separate instruction.
// `HNeg` also does not support extension. See comments in `ShifterOperandSupportsExtension()`.
- DCHECK(!right_operand.IsExtendedRegister() ||
- (kind != HInstruction::kAnd && kind != HInstruction::kOr && kind != HInstruction::kXor &&
- kind != HInstruction::kNeg));
+ DCHECK_IMPLIES(right_operand.IsExtendedRegister(),
+ kind != HInstruction::kAnd && kind != HInstruction::kOr &&
+ kind != HInstruction::kXor && kind != HInstruction::kNeg);
switch (kind) {
case HInstruction::kAdd:
__ Add(out, left, right_operand);
@@ -7169,7 +7169,7 @@
// For JIT, the slow path is considered part of the compiled method,
// so JIT should pass null as `debug_name`.
- DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr);
+ DCHECK_IMPLIES(GetCompilerOptions().IsJitCompiler(), debug_name == nullptr);
if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index bafa89f..f65890b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -1886,7 +1886,7 @@
vixl32::Label* CodeGeneratorARMVIXL::GetFinalLabel(HInstruction* instruction,
vixl32::Label* final_label) {
DCHECK(!instruction->IsControlFlow() && !instruction->IsSuspendCheck());
- DCHECK(!instruction->IsInvoke() || !instruction->GetLocations()->CanCall());
+ DCHECK_IMPLIES(instruction->IsInvoke(), !instruction->GetLocations()->CanCall());
const HBasicBlock* const block = instruction->GetBlock();
const HLoopInformation* const info = block->GetLoopInformation();
@@ -2949,7 +2949,7 @@
!out.Equals(second) &&
(condition->GetLocations()->InAt(0).Equals(out) ||
condition->GetLocations()->InAt(1).Equals(out));
- DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition());
+ DCHECK_IMPLIES(output_overlaps_with_condition_inputs, condition->IsCondition());
Location src;
if (condition->IsIntConstant()) {
@@ -10124,7 +10124,7 @@
// For JIT, the slow path is considered part of the compiled method,
// so JIT should pass null as `debug_name`.
- DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr);
+ DCHECK_IMPLIES(GetCompilerOptions().IsJitCompiler(), debug_name == nullptr);
if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index aa40755..f385b34 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -856,7 +856,7 @@
bool narrow) {
CheckValidReg(base_reg);
CheckValidReg(holder_reg);
- DCHECK(!narrow || base_reg < 8u) << base_reg;
+ DCHECK_IMPLIES(narrow, base_reg < 8u) << base_reg;
BakerReadBarrierWidth width =
narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
@@ -875,7 +875,7 @@
static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) {
CheckValidReg(root_reg);
- DCHECK(!narrow || root_reg < 8u) << root_reg;
+ DCHECK_IMPLIES(narrow, root_reg < 8u) << root_reg;
BakerReadBarrierWidth width =
narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 33f2491..c46f9b7 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -958,7 +958,8 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register scratch;
- DCHECK(instruction->GetPackedType() != DataType::Type::kUint16 || !instruction->IsStringCharAt());
+ DCHECK_IMPLIES(instruction->GetPackedType() == DataType::Type::kUint16,
+ !instruction->IsStringCharAt());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index 49acab6..2b56c88 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -198,7 +198,8 @@
}
for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
DCHECK(!use.GetUser()->GetHolder()->IsPhi());
- DCHECK(!filter || !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated));
+ DCHECK_IMPLIES(filter,
+ !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated));
finder.Update(use.GetUser()->GetHolder()->GetBlock());
}
HBasicBlock* target_block = finder.Get();
diff --git a/compiler/optimizing/execution_subgraph.h b/compiler/optimizing/execution_subgraph.h
index 05855c3..7d2a660 100644
--- a/compiler/optimizing/execution_subgraph.h
+++ b/compiler/optimizing/execution_subgraph.h
@@ -237,7 +237,7 @@
// Finalization is needed to call this function.
// See RemoveConcavity and Prune for more information.
bool ContainsBlock(const HBasicBlock* blk) const {
- DCHECK(!finalized_ || !needs_prune_) << "finalized: " << finalized_;
+ DCHECK_IMPLIES(finalized_, !needs_prune_);
if (!valid_) {
return false;
}
@@ -267,7 +267,7 @@
}
ArrayRef<const ExcludedCohort> GetExcludedCohorts() const {
- DCHECK(!valid_ || !needs_prune_);
+ DCHECK_IMPLIES(valid_, !needs_prune_);
if (!valid_ || !unreachable_blocks_.IsAnyBitSet()) {
return ArrayRef<const ExcludedCohort>();
} else {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 8867792..1df313d 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -961,7 +961,7 @@
// In monomorphic cases when UseOnlyPolymorphicInliningWithNoDeopt() is true, we call
// `TryInlinePolymorphicCall` even though we are monomorphic.
const bool actually_monomorphic = number_of_types == 1;
- DCHECK(!actually_monomorphic || UseOnlyPolymorphicInliningWithNoDeopt());
+ DCHECK_IMPLIES(actually_monomorphic, UseOnlyPolymorphicInliningWithNoDeopt());
// We only want to limit recursive polymorphic cases, not monomorphic ones.
const bool too_many_polymorphic_recursive_calls =
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index aabc433..2454125 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1829,7 +1829,8 @@
const InstructionOperands& operands,
const char* shorty,
bool is_unresolved) {
- DCHECK(!invoke->IsInvokeStaticOrDirect() || !invoke->AsInvokeStaticOrDirect()->IsStringInit());
+ DCHECK_IMPLIES(invoke->IsInvokeStaticOrDirect(),
+ !invoke->AsInvokeStaticOrDirect()->IsStringInit());
ReceiverArg receiver_arg = (invoke->GetInvokeType() == InvokeType::kStatic)
? ReceiverArg::kNone
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 69e4fcf..1130f3a 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1228,14 +1228,14 @@
Register expected2 = Register()) {
// The `expected2` is valid only for reference slow path and represents the unmarked old value
// from the main path attempt to emit CAS when the marked old value matched `expected`.
- DCHECK(type == DataType::Type::kReference || !expected2.IsValid());
+ DCHECK_IMPLIES(expected2.IsValid(), type == DataType::Type::kReference);
DCHECK(ptr.IsX());
DCHECK_EQ(new_value.IsX(), type == DataType::Type::kInt64);
DCHECK_EQ(old_value.IsX(), type == DataType::Type::kInt64);
DCHECK(store_result.IsW());
DCHECK_EQ(expected.IsX(), type == DataType::Type::kInt64);
- DCHECK(!expected2.IsValid() || expected2.IsW());
+ DCHECK_IMPLIES(expected2.IsValid(), expected2.IsW());
Arm64Assembler* assembler = codegen->GetAssembler();
MacroAssembler* masm = assembler->GetVIXLAssembler();
@@ -1361,7 +1361,7 @@
// representing the to-space and from-space references for the same object.
UseScratchRegisterScope temps(masm);
- DCHECK(!store_result_.IsValid() || !temps.IsAvailable(store_result_));
+ DCHECK_IMPLIES(store_result_.IsValid(), !temps.IsAvailable(store_result_));
Register tmp_ptr = temps.AcquireX();
Register store_result = store_result_.IsValid() ? store_result_ : temps.AcquireW();
@@ -1578,7 +1578,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
GenUnsafeCas(invoke, DataType::Type::kReference, codegen_);
}
@@ -2884,7 +2884,7 @@
void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 1f2ba46..aa4c139 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1339,7 +1339,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
ArmVIXLAssembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -3797,7 +3797,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers (b/173104084).
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
GenUnsafeCas(invoke, DataType::Type::kReference, codegen_);
}
@@ -4623,7 +4623,8 @@
size_t temp_start = 0u;
if (Use64BitExclusiveLoadStore(atomic, codegen)) {
// Clear `maybe_temp3` which was initialized above for Float64.
- DCHECK(value_type != DataType::Type::kFloat64 || maybe_temp3.Equals(locations->GetTemp(2)));
+ DCHECK_IMPLIES(value_type == DataType::Type::kFloat64,
+ maybe_temp3.Equals(locations->GetTemp(2)));
maybe_temp3 = Location::NoLocation();
temp_start = 2u;
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 6306720..7d90aae 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2391,7 +2391,7 @@
if (type == DataType::Type::kReference) {
// The only read barrier implementation supporting the
// UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register temp2 = locations->GetTemp(1).AsRegister<Register>();
@@ -2413,7 +2413,7 @@
void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
// UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -2443,7 +2443,7 @@
void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -2875,7 +2875,7 @@
void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -3836,7 +3836,7 @@
static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -3990,7 +3990,7 @@
static void GenerateVarHandleSet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4135,7 +4135,7 @@
static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4322,7 +4322,7 @@
static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4490,7 +4490,7 @@
static void GenerateVarHandleGetAndAdd(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4659,7 +4659,7 @@
static void GenerateVarHandleGetAndBitwiseOp(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 15df11d..c65e467 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -887,7 +887,7 @@
void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -2438,7 +2438,7 @@
CpuRegister temp3,
bool is_cmpxchg) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
@@ -2624,7 +2624,7 @@
void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -4050,7 +4050,7 @@
CodeGeneratorX86_64* codegen,
bool is_cmpxchg,
bool byte_swap = false) {
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4351,7 +4351,7 @@
codegen->GetInstructionCodegen()->Bswap(temp_loc, type);
}
- DCHECK(!value.IsConstant() || !is64Bit);
+ DCHECK_IMPLIES(value.IsConstant(), !is64Bit);
int32_t const_value = value.IsConstant()
? CodeGenerator::GetInt32ValueOf(value.GetConstant())
: 0;
@@ -4609,7 +4609,7 @@
bool need_any_store_barrier,
bool need_any_any_barrier,
bool byte_swap = false) {
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+ DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index b767032..4bca2bd 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -683,15 +683,15 @@
return Value::ForInstruction(substitute);
}
}
- DCHECK(!value.IsInstruction() ||
- FindSubstitute(value.GetInstruction()) == value.GetInstruction());
+ DCHECK_IMPLIES(value.IsInstruction(),
+ FindSubstitute(value.GetInstruction()) == value.GetInstruction());
return value;
}
if (value.NeedsPhi() && phi_placeholder_replacements_[PhiPlaceholderIndex(value)].IsValid()) {
return Replacement(value);
} else {
- DCHECK(!value.IsInstruction() ||
- FindSubstitute(value.GetInstruction()) == value.GetInstruction());
+ DCHECK_IMPLIES(value.IsInstruction(),
+ FindSubstitute(value.GetInstruction()) == value.GetInstruction());
return value;
}
}
@@ -748,7 +748,7 @@
size_t id = static_cast<size_t>(instruction->GetId());
if (id >= substitute_instructions_for_loads_.size()) {
// New Phi (may not be in the graph yet), default value or PredicatedInstanceFieldGet.
- DCHECK(!IsLoad(instruction) || instruction->IsPredicatedInstanceFieldGet());
+ DCHECK_IMPLIES(IsLoad(instruction), instruction->IsPredicatedInstanceFieldGet());
return instruction;
}
HInstruction* substitute = substitute_instructions_for_loads_[id];
@@ -1597,7 +1597,7 @@
merged_value = ReplacementOrValue(Value::ForPhiPlaceholder(phi_placeholder, needs_loop_phi));
}
}
- DCHECK(!merged_value.IsPureUnknown() || block->GetPredecessors().size() <= 1)
+ DCHECK_IMPLIES(merged_value.IsPureUnknown(), block->GetPredecessors().size() <= 1)
<< merged_value << " in " << GetGraph()->PrettyMethod();
return merged_value;
}
@@ -1800,8 +1800,8 @@
HBasicBlock* block = instruction->GetBlock();
ScopedArenaVector<ValueRecord>& heap_values = heap_values_for_[block->GetBlockId()];
ValueRecord& record = heap_values[idx];
- DCHECK(!record.value.IsInstruction() ||
- FindSubstitute(record.value.GetInstruction()) == record.value.GetInstruction());
+ DCHECK_IMPLIES(record.value.IsInstruction(),
+ FindSubstitute(record.value.GetInstruction()) == record.value.GetInstruction());
if (record.value.Equals(value)) {
// Store into the heap location with the same value.
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 02ee4ec..23c86ce 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -1175,7 +1175,7 @@
// for ( ; i < stc; i += 1)
// <loop-body>
if (needs_cleanup) {
- DCHECK(!IsInPredicatedVectorizationMode() || vector_runtime_test_a_ != nullptr);
+ DCHECK_IMPLIES(IsInPredicatedVectorizationMode(), vector_runtime_test_a_ != nullptr);
vector_mode_ = kSequential;
GenerateNewLoop(node,
block,
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index d3583ed..3acd5b1 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -192,7 +192,7 @@
bool TrySetVectorLength(DataType::Type type, uint32_t length) {
bool res = TrySetVectorLengthImpl(length);
// Currently the vectorizer supports only the mode when full SIMD registers are used.
- DCHECK(!res || (DataType::Size(type) * length == GetVectorSizeInBytes()));
+ DCHECK_IMPLIES(res, DataType::Size(type) * length == GetVectorSizeInBytes());
return res;
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 00bfc92..0018a5b 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -719,7 +719,8 @@
// the first predecessor can never be a back edge and therefore it must have
// been visited already and had its try membership set.
HBasicBlock* first_predecessor = block->GetPredecessors()[0];
- DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
+ DCHECK_IMPLIES(block->IsLoopHeader(),
+ !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors();
if (try_entry != nullptr &&
(block->GetTryCatchInformation() == nullptr ||
@@ -2468,7 +2469,7 @@
// control-flow instructions.
for (HBasicBlock* predecessor : predecessors_) {
// We should not see any back edges as they would have been removed by step (3).
- DCHECK(!IsInLoop() || !GetLoopInformation()->IsBackEdge(*predecessor));
+ DCHECK_IMPLIES(IsInLoop(), !GetLoopInformation()->IsBackEdge(*predecessor));
HInstruction* last_instruction = predecessor->GetLastInstruction();
if (last_instruction->IsTryBoundary() && !IsCatchBlock()) {
@@ -3069,7 +3070,7 @@
DCHECK(upper_bound_rti.IsSupertypeOf(rti))
<< " upper_bound_rti: " << upper_bound_rti
<< " rti: " << rti;
- DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact())
+ DCHECK_IMPLIES(upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes(), rti.IsExact())
<< " upper_bound_rti: " << upper_bound_rti
<< " rti: " << rti;
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fd3a275..42f03a0 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -6828,7 +6828,7 @@
klass_(klass) {
// Referrers class should not need access check. We never inline unverified
// methods so we can't possibly end up in this situation.
- DCHECK(!is_referrers_class || !needs_access_check);
+ DCHECK_IMPLIES(is_referrers_class, !needs_access_check);
SetPackedField<LoadKindField>(
is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kRuntimeCall);
@@ -8261,7 +8261,7 @@
}
bool IsEliminated() const {
- DCHECK(!source_.IsInvalid() || destination_.IsInvalid());
+ DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid());
return source_.IsInvalid();
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index cb40a51..1bf1586 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -817,7 +817,7 @@
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current());
- DCHECK(compilation_kind != CompilationKind::kBaseline || info != nullptr)
+ DCHECK_IMPLIES(compilation_kind == CompilationKind::kBaseline, info != nullptr)
<< "Compiling a method baseline should always have a ProfilingInfo";
graph->SetProfilingInfo(info);
}
@@ -1107,7 +1107,7 @@
// regressing.
std::string method_name = dex_file.PrettyMethod(method_idx);
bool shouldCompile = method_name.find("$opt$") != std::string::npos;
- DCHECK((compiled_method != nullptr) || !shouldCompile) << "Didn't compile " << method_name;
+ DCHECK_IMPLIES(compiled_method == nullptr, !shouldCompile) << "Didn't compile " << method_name;
}
return compiled_method;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 1b2f71f..e6024b0 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -147,7 +147,7 @@
} else if (instr->IsLoadClass()) {
HLoadClass* cls = instr->AsLoadClass();
DCHECK(cls->GetReferenceTypeInfo().IsExact());
- DCHECK(!cls->GetLoadedClassRTI().IsValid() || cls->GetLoadedClassRTI().IsExact());
+ DCHECK_IMPLIES(cls->GetLoadedClassRTI().IsValid(), cls->GetLoadedClassRTI().IsExact());
} else if (instr->IsNullCheck()) {
DCHECK(instr->GetReferenceTypeInfo().IsEqual(instr->InputAt(0)->GetReferenceTypeInfo()))
<< "NullCheck " << instr->GetReferenceTypeInfo()
@@ -155,10 +155,11 @@
}
} else if (instr->IsInstanceOf()) {
HInstanceOf* iof = instr->AsInstanceOf();
- DCHECK(!iof->GetTargetClassRTI().IsValid() || iof->GetTargetClassRTI().IsExact());
+ DCHECK_IMPLIES(iof->GetTargetClassRTI().IsValid(), iof->GetTargetClassRTI().IsExact());
} else if (instr->IsCheckCast()) {
HCheckCast* check = instr->AsCheckCast();
- DCHECK(!check->GetTargetClassRTI().IsValid() || check->GetTargetClassRTI().IsExact());
+ DCHECK_IMPLIES(check->GetTargetClassRTI().IsValid(),
+ check->GetTargetClassRTI().IsExact());
}
}
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 670db42..875c633 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -75,7 +75,7 @@
}
} else if (instruction->IsCurrentMethod()) {
// The current method is always at offset 0.
- DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
+ DCHECK_IMPLIES(current->HasSpillSlot(), (current->GetSpillSlot() == 0));
} else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
DCHECK(current->HasSpillSlot());
size_t slot = current->GetSpillSlot()
@@ -306,9 +306,9 @@
size_t num_of_slots = interval->NumberOfSpillSlotsNeeded();
loc = Location::StackSlotByNumOfSlots(num_of_slots, interval->GetParent()->GetSpillSlot());
- CHECK(!loc.IsSIMDStackSlot() ||
- (codegen_->GetSIMDRegisterWidth() / kVRegSize == num_of_slots)) <<
- "Unexpected number of spill slots";
+ CHECK_IMPLIES(loc.IsSIMDStackSlot(),
+ (codegen_->GetSIMDRegisterWidth() / kVRegSize == num_of_slots))
+ << "Unexpected number of spill slots";
InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc);
}
UsePositionList::const_iterator use_it = current->GetUses().begin();
@@ -468,9 +468,9 @@
DCHECK(defined_by->IsCurrentMethod());
size_t num_of_slots = parent->NumberOfSpillSlotsNeeded();
location_source = Location::StackSlotByNumOfSlots(num_of_slots, parent->GetSpillSlot());
- CHECK(!location_source.IsSIMDStackSlot() ||
- (codegen_->GetSIMDRegisterWidth() == num_of_slots * kVRegSize)) <<
- "Unexpected number of spill slots";
+ CHECK_IMPLIES(location_source.IsSIMDStackSlot(),
+ (codegen_->GetSIMDRegisterWidth() == num_of_slots * kVRegSize))
+ << "Unexpected number of spill slots";
}
} else {
DCHECK(source != nullptr);
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index b9b8f8b..684aaf5 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -286,7 +286,7 @@
size_t GetOutDegree() const {
// Pre-colored nodes have infinite degree.
- DCHECK(!IsPrecolored() || out_degree_ == std::numeric_limits<size_t>::max());
+ DCHECK_IMPLIES(IsPrecolored(), out_degree_ == std::numeric_limits<size_t>::max());
return out_degree_;
}
@@ -704,7 +704,8 @@
codegen_->AddAllocatedRegister(high_reg);
}
} else {
- DCHECK(!interval->HasHighInterval() || !interval->GetHighInterval()->HasRegister());
+ DCHECK_IMPLIES(interval->HasHighInterval(),
+ !interval->GetHighInterval()->HasRegister());
}
}
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index b481f64..833c24d 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -543,7 +543,7 @@
// Make sure we are going in the right order.
DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() >= current->GetStart());
// Make sure a low interval is always with a high.
- DCHECK(!current->IsLowInterval() || unhandled_->back()->IsHighInterval());
+ DCHECK_IMPLIES(current->IsLowInterval(), unhandled_->back()->IsHighInterval());
// Make sure a high interval is always with a low.
DCHECK(current->IsLowInterval() ||
unhandled_->empty() ||
@@ -914,7 +914,7 @@
// We must still proceed in order to split currently active and inactive
// uses of the high interval's register, and put the high interval in the
// active set.
- DCHECK(first_register_use != kNoLifetime || (current->GetNextSibling() != nullptr));
+ DCHECK_IMPLIES(first_register_use == kNoLifetime, current->GetNextSibling() != nullptr);
} else if (first_register_use == kNoLifetime) {
AllocateSpillSlotFor(current);
return false;
@@ -1128,7 +1128,7 @@
}
HInstruction* defined_by = parent->GetDefinedBy();
- DCHECK(!defined_by->IsPhi() || !defined_by->AsPhi()->IsCatchPhi());
+ DCHECK_IMPLIES(defined_by->IsPhi(), !defined_by->AsPhi()->IsCatchPhi());
if (defined_by->IsParameterValue()) {
// Parameters have their own stack slot.
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index cb340f5..8f18ccf 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -316,7 +316,7 @@
}
// Scheduling barrier dependencies.
- DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_);
+ DCHECK_IMPLIES(is_scheduling_barrier, contains_scheduling_barrier_);
if (contains_scheduling_barrier_) {
// A barrier depends on instructions after it. And instructions before the
// barrier depend on it.
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index f9004d8..965e1bd 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -1050,8 +1050,8 @@
void SchedulingLatencyVisitorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
HBasicBlock* block = instruction->GetBlock();
- DCHECK((block->GetLoopInformation() != nullptr) ||
- (block->IsEntryBlock() && instruction->GetNext()->IsGoto()));
+ DCHECK_IMPLIES(block->GetLoopInformation() == nullptr,
+ block->IsEntryBlock() && instruction->GetNext()->IsGoto());
// Users do not use any data results.
last_visited_latency_ = 0;
}
diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc
index 7bcf4e7..4f504c2 100644
--- a/compiler/optimizing/scheduler_arm64.cc
+++ b/compiler/optimizing/scheduler_arm64.cc
@@ -187,8 +187,8 @@
void SchedulingLatencyVisitorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
HBasicBlock* block = instruction->GetBlock();
- DCHECK((block->GetLoopInformation() != nullptr) ||
- (block->IsEntryBlock() && instruction->GetNext()->IsGoto()));
+ DCHECK_IMPLIES(block->GetLoopInformation() == nullptr,
+ block->IsEntryBlock() && instruction->GetNext()->IsGoto());
// Users do not use any data results.
last_visited_latency_ = 0;
}
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 3fcb72e..8fd6962 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -189,8 +189,8 @@
// We iterate over the array as long as it grows.
for (size_t i = 0; i < cycle_worklist.size(); ++i) {
HPhi* current = cycle_worklist[i];
- DCHECK(!current->IsLoopHeaderPhi() ||
- current->GetBlock()->IsLoopPreHeaderFirstPredecessor());
+ DCHECK_IMPLIES(current->IsLoopHeaderPhi(),
+ current->GetBlock()->IsLoopPreHeaderFirstPredecessor());
for (HInstruction* input : current->GetInputs()) {
if (input == current) {
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index b968491..b46d193 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -193,7 +193,7 @@
// orig_block will be put at the end of the copy_succ's predecessors list; that corresponds
// to the previously added phi inputs position.
orig_block->ReplaceSuccessor(orig_succ, copy_succ);
- DCHECK(!first_phi_met || copy_succ->GetPredecessors().size() == phi_input_count);
+ DCHECK_IMPLIES(first_phi_met, copy_succ->GetPredecessors().size() == phi_input_count);
}
void SuperblockCloner::AddCopyInternalEdge(HBasicBlock* orig_block,
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index a472943..827e9a6 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -26,6 +26,7 @@
#include <android-base/logging.h>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -179,7 +180,7 @@
DCHECK_LE(n, max_size());
if (swap_space_ == nullptr) {
T* result = reinterpret_cast<T*>(malloc(n * sizeof(T)));
- CHECK(result != nullptr || n == 0u); // Abort if malloc() fails.
+ CHECK_IMPLIES(result == nullptr, n == 0u); // Abort if malloc() fails.
return result;
} else {
return reinterpret_cast<T*>(swap_space_->Alloc(n * sizeof(T)));