diff options
Diffstat (limited to 'compiler/optimizing')
29 files changed, 88 insertions, 81 deletions
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index a8149f9163..2d02e9f427 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -595,7 +595,7 @@ class BCEVisitor : public HGraphVisitor { // Helper method to assign a new range to an instruction in given basic block. void AssignRange(HBasicBlock* basic_block, HInstruction* instruction, ValueRange* range) { - DCHECK(!range->IsMonotonicValueRange() || instruction->IsLoopHeaderPhi()); + DCHECK_IMPLIES(range->IsMonotonicValueRange(), instruction->IsLoopHeaderPhi()); GetValueRangeMap(basic_block)->Overwrite(instruction->GetId(), range); } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index cf59be86bf..d81a7b5382 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -498,7 +498,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { static bool StoreNeedsWriteBarrier(DataType::Type type, HInstruction* value) { // Check that null value is not represented as an integer constant. - DCHECK(type != DataType::Type::kReference || !value->IsIntConstant()); + DCHECK_IMPLIES(type == DataType::Type::kReference, !value->IsIntConstant()); return type == DataType::Type::kReference && !value->IsNullConstant(); } diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 548b2d42bd..fc1c07dcbb 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2418,9 +2418,9 @@ void InstructionCodeGeneratorARM64::VisitDataProcWithShifterOp( // operand. Note that VIXL would still manage if it was passed by generating // the extension as a separate instruction. // `HNeg` also does not support extension. See comments in `ShifterOperandSupportsExtension()`. - DCHECK(!right_operand.IsExtendedRegister() || - (kind != HInstruction::kAnd && kind != HInstruction::kOr && kind != HInstruction::kXor && - kind != HInstruction::kNeg)); + DCHECK_IMPLIES(right_operand.IsExtendedRegister(), + kind != HInstruction::kAnd && kind != HInstruction::kOr && + kind != HInstruction::kXor && kind != HInstruction::kNeg); switch (kind) { case HInstruction::kAdd: __ Add(out, left, right_operand); @@ -7169,7 +7169,7 @@ void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler, // For JIT, the slow path is considered part of the compiled method, // so JIT should pass null as `debug_name`. - DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr); + DCHECK_IMPLIES(GetCompilerOptions().IsJitCompiler(), debug_name == nullptr); if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) { std::ostringstream oss; oss << "BakerReadBarrierThunk"; diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index bafa89fd32..f65890bcb8 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -1886,7 +1886,7 @@ static bool CanGenerateConditionalMove(const Location& out, const Location& src) vixl32::Label* CodeGeneratorARMVIXL::GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label) { DCHECK(!instruction->IsControlFlow() && !instruction->IsSuspendCheck()); - DCHECK(!instruction->IsInvoke() || !instruction->GetLocations()->CanCall()); + DCHECK_IMPLIES(instruction->IsInvoke(), !instruction->GetLocations()->CanCall()); const HBasicBlock* const block = instruction->GetBlock(); const HLoopInformation* const info = block->GetLoopInformation(); @@ -2949,7 +2949,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { !out.Equals(second) && (condition->GetLocations()->InAt(0).Equals(out) || condition->GetLocations()->InAt(1).Equals(out)); - DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition()); + DCHECK_IMPLIES(output_overlaps_with_condition_inputs, condition->IsCondition()); Location src; if (condition->IsIntConstant()) { @@ -10124,7 +10124,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb // For JIT, the slow path is considered part of the compiled method, // so JIT should pass null as `debug_name`. - DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr); + DCHECK_IMPLIES(GetCompilerOptions().IsJitCompiler(), debug_name == nullptr); if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) { std::ostringstream oss; oss << "BakerReadBarrierThunk"; diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index aa40755b29..f385b3473c 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -856,7 +856,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator { bool narrow) { CheckValidReg(base_reg); CheckValidReg(holder_reg); - DCHECK(!narrow || base_reg < 8u) << base_reg; + DCHECK_IMPLIES(narrow, base_reg < 8u) << base_reg; BakerReadBarrierWidth width = narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | @@ -875,7 +875,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator { static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) { CheckValidReg(root_reg); - DCHECK(!narrow || root_reg < 8u) << root_reg; + DCHECK_IMPLIES(narrow, root_reg < 8u) << root_reg; BakerReadBarrierWidth width = narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc index 33f2491418..c46f9b7986 100644 --- a/compiler/optimizing/code_generator_vector_arm_vixl.cc +++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc @@ -958,7 +958,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) { UseScratchRegisterScope temps(GetVIXLAssembler()); vixl32::Register scratch; - DCHECK(instruction->GetPackedType() != DataType::Type::kUint16 || !instruction->IsStringCharAt()); + DCHECK_IMPLIES(instruction->GetPackedType() == DataType::Type::kUint16, + !instruction->IsStringCharAt()); switch (instruction->GetPackedType()) { case DataType::Type::kBool: diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index 49acab6b0a..2b56c88c25 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -198,7 +198,8 @@ static HInstruction* FindIdealPosition(HInstruction* instruction, } for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { DCHECK(!use.GetUser()->GetHolder()->IsPhi()); - DCHECK(!filter || !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated)); + DCHECK_IMPLIES(filter, + !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated)); finder.Update(use.GetUser()->GetHolder()->GetBlock()); } HBasicBlock* target_block = finder.Get(); diff --git a/compiler/optimizing/execution_subgraph.h b/compiler/optimizing/execution_subgraph.h index 05855c30d4..7d2a66077d 100644 --- a/compiler/optimizing/execution_subgraph.h +++ b/compiler/optimizing/execution_subgraph.h @@ -237,7 +237,7 @@ class ExecutionSubgraph : public DeletableArenaObject<kArenaAllocLSA> { // Finalization is needed to call this function. // See RemoveConcavity and Prune for more information. bool ContainsBlock(const HBasicBlock* blk) const { - DCHECK(!finalized_ || !needs_prune_) << "finalized: " << finalized_; + DCHECK_IMPLIES(finalized_, !needs_prune_); if (!valid_) { return false; } @@ -267,7 +267,7 @@ class ExecutionSubgraph : public DeletableArenaObject<kArenaAllocLSA> { } ArrayRef<const ExcludedCohort> GetExcludedCohorts() const { - DCHECK(!valid_ || !needs_prune_); + DCHECK_IMPLIES(valid_, !needs_prune_); if (!valid_ || !unreachable_blocks_.IsAnyBitSet()) { return ArrayRef<const ExcludedCohort>(); } else { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 88677923b5..1df313d7fc 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -961,7 +961,7 @@ bool HInliner::TryInlinePolymorphicCall( // In monomorphic cases when UseOnlyPolymorphicInliningWithNoDeopt() is true, we call // `TryInlinePolymorphicCall` even though we are monomorphic. const bool actually_monomorphic = number_of_types == 1; - DCHECK(!actually_monomorphic || UseOnlyPolymorphicInliningWithNoDeopt()); + DCHECK_IMPLIES(actually_monomorphic, UseOnlyPolymorphicInliningWithNoDeopt()); // We only want to limit recursive polymorphic cases, not monomorphic ones. const bool too_many_polymorphic_recursive_calls = diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index aabc433448..2454125589 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -1829,7 +1829,8 @@ bool HInstructionBuilder::HandleInvoke(HInvoke* invoke, const InstructionOperands& operands, const char* shorty, bool is_unresolved) { - DCHECK(!invoke->IsInvokeStaticOrDirect() || !invoke->AsInvokeStaticOrDirect()->IsStringInit()); + DCHECK_IMPLIES(invoke->IsInvokeStaticOrDirect(), + !invoke->AsInvokeStaticOrDirect()->IsStringInit()); ReceiverArg receiver_arg = (invoke->GetInvokeType() == InvokeType::kStatic) ? ReceiverArg::kNone diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 69e4fcf64b..1130f3a702 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -1228,14 +1228,14 @@ static void GenerateCompareAndSet(CodeGeneratorARM64* codegen, Register expected2 = Register()) { // The `expected2` is valid only for reference slow path and represents the unmarked old value // from the main path attempt to emit CAS when the marked old value matched `expected`. - DCHECK(type == DataType::Type::kReference || !expected2.IsValid()); + DCHECK_IMPLIES(expected2.IsValid(), type == DataType::Type::kReference); DCHECK(ptr.IsX()); DCHECK_EQ(new_value.IsX(), type == DataType::Type::kInt64); DCHECK_EQ(old_value.IsX(), type == DataType::Type::kInt64); DCHECK(store_result.IsW()); DCHECK_EQ(expected.IsX(), type == DataType::Type::kInt64); - DCHECK(!expected2.IsValid() || expected2.IsW()); + DCHECK_IMPLIES(expected2.IsValid(), expected2.IsW()); Arm64Assembler* assembler = codegen->GetAssembler(); MacroAssembler* masm = assembler->GetVIXLAssembler(); @@ -1361,7 +1361,7 @@ class ReadBarrierCasSlowPathARM64 : public SlowPathCodeARM64 { // representing the to-space and from-space references for the same object. UseScratchRegisterScope temps(masm); - DCHECK(!store_result_.IsValid() || !temps.IsAvailable(store_result_)); + DCHECK_IMPLIES(store_result_.IsValid(), !temps.IsAvailable(store_result_)); Register tmp_ptr = temps.AcquireX(); Register store_result = store_result_.IsValid() ? store_result_ : temps.AcquireW(); @@ -1578,7 +1578,7 @@ void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invok } void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); GenUnsafeCas(invoke, DataType::Type::kReference, codegen_); } @@ -2884,7 +2884,7 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); MacroAssembler* masm = GetVIXLAssembler(); LocationSummary* locations = invoke->GetLocations(); diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 1f2ba466be..aa4c139d55 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -1339,7 +1339,7 @@ static void CheckPosition(ArmVIXLAssembler* assembler, void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); ArmVIXLAssembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -3797,7 +3797,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetInt(HInvoke* invo } void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers (b/173104084). - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); GenUnsafeCas(invoke, DataType::Type::kReference, codegen_); } @@ -4623,7 +4623,8 @@ static void GenerateVarHandleSet(HInvoke* invoke, size_t temp_start = 0u; if (Use64BitExclusiveLoadStore(atomic, codegen)) { // Clear `maybe_temp3` which was initialized above for Float64. - DCHECK(value_type != DataType::Type::kFloat64 || maybe_temp3.Equals(locations->GetTemp(2))); + DCHECK_IMPLIES(value_type == DataType::Type::kFloat64, + maybe_temp3.Equals(locations->GetTemp(2))); maybe_temp3 = Location::NoLocation(); temp_start = 2u; } diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 6306720512..7d90aae984 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -2391,7 +2391,7 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codeg if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); Register temp = locations->GetTemp(0).AsRegister<Register>(); Register temp2 = locations->GetTemp(1).AsRegister<Register>(); @@ -2413,7 +2413,7 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeCASLong(HInvoke* invoke) { void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -2443,7 +2443,7 @@ void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetLong(HInvoke* invoke) void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -2875,7 +2875,7 @@ void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) { void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -3836,7 +3836,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke) { static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -3990,7 +3990,7 @@ static void CreateVarHandleSetLocations(HInvoke* invoke) { static void GenerateVarHandleSet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4135,7 +4135,7 @@ static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) { static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4322,7 +4322,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) { static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4490,7 +4490,7 @@ static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) { static void GenerateVarHandleGetAndAdd(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4659,7 +4659,7 @@ static void GenerateBitwiseOp(HInvoke* invoke, static void GenerateVarHandleGetAndBitwiseOp(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 15df11d952..c65e467411 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -887,7 +887,7 @@ static void GenSystemArrayCopyAddresses(X86_64Assembler* assembler, void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -2438,7 +2438,7 @@ static void GenCompareAndSetOrExchangeRef(CodeGeneratorX86_64* codegen, CpuRegister temp3, bool is_cmpxchg) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); @@ -2624,7 +2624,7 @@ void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invo void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -4050,7 +4050,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86_64* codegen, bool is_cmpxchg, bool byte_swap = false) { - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4351,7 +4351,7 @@ static void GenerateVarHandleGetAndOp(HInvoke* invoke, codegen->GetInstructionCodegen()->Bswap(temp_loc, type); } - DCHECK(!value.IsConstant() || !is64Bit); + DCHECK_IMPLIES(value.IsConstant(), !is64Bit); int32_t const_value = value.IsConstant() ? CodeGenerator::GetInt32ValueOf(value.GetConstant()) : 0; @@ -4609,7 +4609,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, bool need_any_store_barrier, bool need_any_any_barrier, bool byte_swap = false) { - DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); + DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index b7670329e6..4bca2bd323 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -683,15 +683,15 @@ class LSEVisitor final : private HGraphDelegateVisitor { return Value::ForInstruction(substitute); } } - DCHECK(!value.IsInstruction() || - FindSubstitute(value.GetInstruction()) == value.GetInstruction()); + DCHECK_IMPLIES(value.IsInstruction(), + FindSubstitute(value.GetInstruction()) == value.GetInstruction()); return value; } if (value.NeedsPhi() && phi_placeholder_replacements_[PhiPlaceholderIndex(value)].IsValid()) { return Replacement(value); } else { - DCHECK(!value.IsInstruction() || - FindSubstitute(value.GetInstruction()) == value.GetInstruction()); + DCHECK_IMPLIES(value.IsInstruction(), + FindSubstitute(value.GetInstruction()) == value.GetInstruction()); return value; } } @@ -748,7 +748,7 @@ class LSEVisitor final : private HGraphDelegateVisitor { size_t id = static_cast<size_t>(instruction->GetId()); if (id >= substitute_instructions_for_loads_.size()) { // New Phi (may not be in the graph yet), default value or PredicatedInstanceFieldGet. - DCHECK(!IsLoad(instruction) || instruction->IsPredicatedInstanceFieldGet()); + DCHECK_IMPLIES(IsLoad(instruction), instruction->IsPredicatedInstanceFieldGet()); return instruction; } HInstruction* substitute = substitute_instructions_for_loads_[id]; @@ -1597,7 +1597,7 @@ LSEVisitor::Value LSEVisitor::MergePredecessorValues(HBasicBlock* block, size_t merged_value = ReplacementOrValue(Value::ForPhiPlaceholder(phi_placeholder, needs_loop_phi)); } } - DCHECK(!merged_value.IsPureUnknown() || block->GetPredecessors().size() <= 1) + DCHECK_IMPLIES(merged_value.IsPureUnknown(), block->GetPredecessors().size() <= 1) << merged_value << " in " << GetGraph()->PrettyMethod(); return merged_value; } @@ -1800,8 +1800,8 @@ void LSEVisitor::VisitSetLocation(HInstruction* instruction, size_t idx, HInstru HBasicBlock* block = instruction->GetBlock(); ScopedArenaVector<ValueRecord>& heap_values = heap_values_for_[block->GetBlockId()]; ValueRecord& record = heap_values[idx]; - DCHECK(!record.value.IsInstruction() || - FindSubstitute(record.value.GetInstruction()) == record.value.GetInstruction()); + DCHECK_IMPLIES(record.value.IsInstruction(), + FindSubstitute(record.value.GetInstruction()) == record.value.GetInstruction()); if (record.value.Equals(value)) { // Store into the heap location with the same value. diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index 02ee4ec057..23c86ce3f9 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -1175,7 +1175,7 @@ void HLoopOptimization::Vectorize(LoopNode* node, // for ( ; i < stc; i += 1) // <loop-body> if (needs_cleanup) { - DCHECK(!IsInPredicatedVectorizationMode() || vector_runtime_test_a_ != nullptr); + DCHECK_IMPLIES(IsInPredicatedVectorizationMode(), vector_runtime_test_a_ != nullptr); vector_mode_ = kSequential; GenerateNewLoop(node, block, diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h index d3583ed8a6..3acd5b191b 100644 --- a/compiler/optimizing/loop_optimization.h +++ b/compiler/optimizing/loop_optimization.h @@ -192,7 +192,7 @@ class HLoopOptimization : public HOptimization { bool TrySetVectorLength(DataType::Type type, uint32_t length) { bool res = TrySetVectorLengthImpl(length); // Currently the vectorizer supports only the mode when full SIMD registers are used. - DCHECK(!res || (DataType::Size(type) * length == GetVectorSizeInBytes())); + DCHECK_IMPLIES(res, DataType::Size(type) * length == GetVectorSizeInBytes()); return res; } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 00bfc92c2f..0018a5b970 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -719,7 +719,8 @@ void HGraph::ComputeTryBlockInformation() { // the first predecessor can never be a back edge and therefore it must have // been visited already and had its try membership set. HBasicBlock* first_predecessor = block->GetPredecessors()[0]; - DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor)); + DCHECK_IMPLIES(block->IsLoopHeader(), + !block->GetLoopInformation()->IsBackEdge(*first_predecessor)); const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors(); if (try_entry != nullptr && (block->GetTryCatchInformation() == nullptr || @@ -2468,7 +2469,7 @@ void HBasicBlock::DisconnectAndDelete() { // control-flow instructions. for (HBasicBlock* predecessor : predecessors_) { // We should not see any back edges as they would have been removed by step (3). - DCHECK(!IsInLoop() || !GetLoopInformation()->IsBackEdge(*predecessor)); + DCHECK_IMPLIES(IsInLoop(), !GetLoopInformation()->IsBackEdge(*predecessor)); HInstruction* last_instruction = predecessor->GetLastInstruction(); if (last_instruction->IsTryBoundary() && !IsCatchBlock()) { @@ -3069,7 +3070,7 @@ static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo uppe DCHECK(upper_bound_rti.IsSupertypeOf(rti)) << " upper_bound_rti: " << upper_bound_rti << " rti: " << rti; - DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact()) + DCHECK_IMPLIES(upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes(), rti.IsExact()) << " upper_bound_rti: " << upper_bound_rti << " rti: " << rti; } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index fd3a2757ab..42f03a0bc7 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -6828,7 +6828,7 @@ class HLoadClass final : public HInstruction { klass_(klass) { // Referrers class should not need access check. We never inline unverified // methods so we can't possibly end up in this situation. - DCHECK(!is_referrers_class || !needs_access_check); + DCHECK_IMPLIES(is_referrers_class, !needs_access_check); SetPackedField<LoadKindField>( is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kRuntimeCall); @@ -8261,7 +8261,7 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { } bool IsEliminated() const { - DCHECK(!source_.IsInvalid() || destination_.IsInvalid()); + DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid()); return source_.IsInvalid(); } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index cb40a51aee..1bf1586d37 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -817,7 +817,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr) { ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current()); - DCHECK(compilation_kind != CompilationKind::kBaseline || info != nullptr) + DCHECK_IMPLIES(compilation_kind == CompilationKind::kBaseline, info != nullptr) << "Compiling a method baseline should always have a ProfilingInfo"; graph->SetProfilingInfo(info); } @@ -1107,7 +1107,7 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, // regressing. std::string method_name = dex_file.PrettyMethod(method_idx); bool shouldCompile = method_name.find("$opt$") != std::string::npos; - DCHECK((compiled_method != nullptr) || !shouldCompile) << "Didn't compile " << method_name; + DCHECK_IMPLIES(compiled_method == nullptr, !shouldCompile) << "Didn't compile " << method_name; } return compiled_method; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 1b2f71f7a7..e6024b08cb 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -147,7 +147,7 @@ void ReferenceTypePropagation::ValidateTypes() { } else if (instr->IsLoadClass()) { HLoadClass* cls = instr->AsLoadClass(); DCHECK(cls->GetReferenceTypeInfo().IsExact()); - DCHECK(!cls->GetLoadedClassRTI().IsValid() || cls->GetLoadedClassRTI().IsExact()); + DCHECK_IMPLIES(cls->GetLoadedClassRTI().IsValid(), cls->GetLoadedClassRTI().IsExact()); } else if (instr->IsNullCheck()) { DCHECK(instr->GetReferenceTypeInfo().IsEqual(instr->InputAt(0)->GetReferenceTypeInfo())) << "NullCheck " << instr->GetReferenceTypeInfo() @@ -155,10 +155,11 @@ void ReferenceTypePropagation::ValidateTypes() { } } else if (instr->IsInstanceOf()) { HInstanceOf* iof = instr->AsInstanceOf(); - DCHECK(!iof->GetTargetClassRTI().IsValid() || iof->GetTargetClassRTI().IsExact()); + DCHECK_IMPLIES(iof->GetTargetClassRTI().IsValid(), iof->GetTargetClassRTI().IsExact()); } else if (instr->IsCheckCast()) { HCheckCast* check = instr->AsCheckCast(); - DCHECK(!check->GetTargetClassRTI().IsValid() || check->GetTargetClassRTI().IsExact()); + DCHECK_IMPLIES(check->GetTargetClassRTI().IsValid(), + check->GetTargetClassRTI().IsExact()); } } } diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc index 670db42f6f..875c633889 100644 --- a/compiler/optimizing/register_allocation_resolver.cc +++ b/compiler/optimizing/register_allocation_resolver.cc @@ -75,7 +75,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint } } else if (instruction->IsCurrentMethod()) { // The current method is always at offset 0. - DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0)); + DCHECK_IMPLIES(current->HasSpillSlot(), (current->GetSpillSlot() == 0)); } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) { DCHECK(current->HasSpillSlot()); size_t slot = current->GetSpillSlot() @@ -306,9 +306,9 @@ void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) { size_t num_of_slots = interval->NumberOfSpillSlotsNeeded(); loc = Location::StackSlotByNumOfSlots(num_of_slots, interval->GetParent()->GetSpillSlot()); - CHECK(!loc.IsSIMDStackSlot() || - (codegen_->GetSIMDRegisterWidth() / kVRegSize == num_of_slots)) << - "Unexpected number of spill slots"; + CHECK_IMPLIES(loc.IsSIMDStackSlot(), + (codegen_->GetSIMDRegisterWidth() / kVRegSize == num_of_slots)) + << "Unexpected number of spill slots"; InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc); } UsePositionList::const_iterator use_it = current->GetUses().begin(); @@ -468,9 +468,9 @@ void RegisterAllocationResolver::ConnectSplitSiblings(LiveInterval* interval, DCHECK(defined_by->IsCurrentMethod()); size_t num_of_slots = parent->NumberOfSpillSlotsNeeded(); location_source = Location::StackSlotByNumOfSlots(num_of_slots, parent->GetSpillSlot()); - CHECK(!location_source.IsSIMDStackSlot() || - (codegen_->GetSIMDRegisterWidth() == num_of_slots * kVRegSize)) << - "Unexpected number of spill slots"; + CHECK_IMPLIES(location_source.IsSIMDStackSlot(), + (codegen_->GetSIMDRegisterWidth() == num_of_slots * kVRegSize)) + << "Unexpected number of spill slots"; } } else { DCHECK(source != nullptr); diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc index b9b8f8b2f9..684aaf5750 100644 --- a/compiler/optimizing/register_allocator_graph_color.cc +++ b/compiler/optimizing/register_allocator_graph_color.cc @@ -286,7 +286,7 @@ class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { size_t GetOutDegree() const { // Pre-colored nodes have infinite degree. - DCHECK(!IsPrecolored() || out_degree_ == std::numeric_limits<size_t>::max()); + DCHECK_IMPLIES(IsPrecolored(), out_degree_ == std::numeric_limits<size_t>::max()); return out_degree_; } @@ -704,7 +704,8 @@ void RegisterAllocatorGraphColor::AllocateRegisters() { codegen_->AddAllocatedRegister(high_reg); } } else { - DCHECK(!interval->HasHighInterval() || !interval->GetHighInterval()->HasRegister()); + DCHECK_IMPLIES(interval->HasHighInterval(), + !interval->GetHighInterval()->HasRegister()); } } diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc index b481f64489..833c24d5bb 100644 --- a/compiler/optimizing/register_allocator_linear_scan.cc +++ b/compiler/optimizing/register_allocator_linear_scan.cc @@ -543,7 +543,7 @@ void RegisterAllocatorLinearScan::LinearScan() { // Make sure we are going in the right order. DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() >= current->GetStart()); // Make sure a low interval is always with a high. - DCHECK(!current->IsLowInterval() || unhandled_->back()->IsHighInterval()); + DCHECK_IMPLIES(current->IsLowInterval(), unhandled_->back()->IsHighInterval()); // Make sure a high interval is always with a low. DCHECK(current->IsLowInterval() || unhandled_->empty() || @@ -914,7 +914,7 @@ bool RegisterAllocatorLinearScan::AllocateBlockedReg(LiveInterval* current) { // We must still proceed in order to split currently active and inactive // uses of the high interval's register, and put the high interval in the // active set. - DCHECK(first_register_use != kNoLifetime || (current->GetNextSibling() != nullptr)); + DCHECK_IMPLIES(first_register_use == kNoLifetime, current->GetNextSibling() != nullptr); } else if (first_register_use == kNoLifetime) { AllocateSpillSlotFor(current); return false; @@ -1128,7 +1128,7 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) { } HInstruction* defined_by = parent->GetDefinedBy(); - DCHECK(!defined_by->IsPhi() || !defined_by->AsPhi()->IsCatchPhi()); + DCHECK_IMPLIES(defined_by->IsPhi(), !defined_by->AsPhi()->IsCatchPhi()); if (defined_by->IsParameterValue()) { // Parameters have their own stack slot. diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index cb340f530b..8f18ccff5f 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -316,7 +316,7 @@ void SchedulingGraph::AddDependencies(SchedulingNode* instruction_node, } // Scheduling barrier dependencies. - DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_); + DCHECK_IMPLIES(is_scheduling_barrier, contains_scheduling_barrier_); if (contains_scheduling_barrier_) { // A barrier depends on instructions after it. And instructions before the // barrier depend on it. diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc index f9004d867b..965e1bd9f4 100644 --- a/compiler/optimizing/scheduler_arm.cc +++ b/compiler/optimizing/scheduler_arm.cc @@ -1050,8 +1050,8 @@ void SchedulingLatencyVisitorARM::VisitStaticFieldSet(HStaticFieldSet* instructi void SchedulingLatencyVisitorARM::VisitSuspendCheck(HSuspendCheck* instruction) { HBasicBlock* block = instruction->GetBlock(); - DCHECK((block->GetLoopInformation() != nullptr) || - (block->IsEntryBlock() && instruction->GetNext()->IsGoto())); + DCHECK_IMPLIES(block->GetLoopInformation() == nullptr, + block->IsEntryBlock() && instruction->GetNext()->IsGoto()); // Users do not use any data results. last_visited_latency_ = 0; } diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc index 7bcf4e75a9..4f504c2100 100644 --- a/compiler/optimizing/scheduler_arm64.cc +++ b/compiler/optimizing/scheduler_arm64.cc @@ -187,8 +187,8 @@ void SchedulingLatencyVisitorARM64::VisitStaticFieldGet(HStaticFieldGet* ATTRIBU void SchedulingLatencyVisitorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { HBasicBlock* block = instruction->GetBlock(); - DCHECK((block->GetLoopInformation() != nullptr) || - (block->IsEntryBlock() && instruction->GetNext()->IsGoto())); + DCHECK_IMPLIES(block->GetLoopInformation() == nullptr, + block->IsEntryBlock() && instruction->GetNext()->IsGoto()); // Users do not use any data results. last_visited_latency_ = 0; } diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index 3fcb72e4fb..8fd6962500 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -189,8 +189,8 @@ bool SsaRedundantPhiElimination::Run() { // We iterate over the array as long as it grows. for (size_t i = 0; i < cycle_worklist.size(); ++i) { HPhi* current = cycle_worklist[i]; - DCHECK(!current->IsLoopHeaderPhi() || - current->GetBlock()->IsLoopPreHeaderFirstPredecessor()); + DCHECK_IMPLIES(current->IsLoopHeaderPhi(), + current->GetBlock()->IsLoopPreHeaderFirstPredecessor()); for (HInstruction* input : current->GetInputs()) { if (input == current) { diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc index b9684917f1..b46d193541 100644 --- a/compiler/optimizing/superblock_cloner.cc +++ b/compiler/optimizing/superblock_cloner.cc @@ -193,7 +193,7 @@ void SuperblockCloner::RemapOrigInternalOrIncomingEdge(HBasicBlock* orig_block, // orig_block will be put at the end of the copy_succ's predecessors list; that corresponds // to the previously added phi inputs position. orig_block->ReplaceSuccessor(orig_succ, copy_succ); - DCHECK(!first_phi_met || copy_succ->GetPredecessors().size() == phi_input_count); + DCHECK_IMPLIES(first_phi_met, copy_succ->GetPredecessors().size() == phi_input_count); } void SuperblockCloner::AddCopyInternalEdge(HBasicBlock* orig_block, |