summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Santiago Aboy Solanes <solanes@google.com> 2023-11-16 15:35:26 +0000
committer Santiago Aboy Solanes <solanes@google.com> 2023-11-30 14:49:43 +0000
commit060ad735fdeb2d4b8cf5a115762e994def2eba67 (patch)
tree6c457e5508e51b5d24d0ca0949d303fc9859a477 /compiler
parent3cd227c95bc213183899dca912d2a96fba7307a8 (diff)
Constant fold DivideUnsigned intrinsic
Bug: 309886589 Test: art/test/testrunner/testrunner.py --host --64 -b --optimizing Change-Id: If60ccbea30b3fe16b86c1630409b540b05b14c56
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/constant_folding.cc51
-rw-r--r--compiler/optimizing/inliner.cc15
2 files changed, 61 insertions, 5 deletions
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 29f5d5caee..66bbf548bb 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -60,6 +60,7 @@ class HConstantFoldingVisitor final : public HGraphDelegateVisitor {
void FoldReverseIntrinsic(HInvoke* invoke);
void FoldReverseBytesIntrinsic(HInvoke* invoke);
void FoldBitCountIntrinsic(HInvoke* invoke);
+ void FoldDivideUnsignedIntrinsic(HInvoke* invoke);
void FoldHighestOneBitIntrinsic(HInvoke* invoke);
void FoldLowestOneBitIntrinsic(HInvoke* invoke);
void FoldNumberOfLeadingZerosIntrinsic(HInvoke* invoke);
@@ -379,6 +380,10 @@ void HConstantFoldingVisitor::VisitInvoke(HInvoke* inst) {
case Intrinsics::kLongBitCount:
FoldBitCountIntrinsic(inst);
break;
+ case Intrinsics::kIntegerDivideUnsigned:
+ case Intrinsics::kLongDivideUnsigned:
+ FoldDivideUnsignedIntrinsic(inst);
+ break;
case Intrinsics::kIntegerHighestOneBit:
case Intrinsics::kLongHighestOneBit:
FoldHighestOneBitIntrinsic(inst);
@@ -467,6 +472,52 @@ void HConstantFoldingVisitor::FoldBitCountIntrinsic(HInvoke* inst) {
inst->GetBlock()->RemoveInstruction(inst);
}
+void HConstantFoldingVisitor::FoldDivideUnsignedIntrinsic(HInvoke* inst) {
+ DCHECK(inst->GetIntrinsic() == Intrinsics::kIntegerDivideUnsigned ||
+ inst->GetIntrinsic() == Intrinsics::kLongDivideUnsigned);
+
+ HInstruction* divisor = inst->InputAt(1);
+ if (!divisor->IsConstant()) {
+ return;
+ }
+ DCHECK_IMPLIES(inst->GetIntrinsic() == Intrinsics::kIntegerDivideUnsigned,
+ divisor->IsIntConstant());
+ DCHECK_IMPLIES(inst->GetIntrinsic() == Intrinsics::kLongDivideUnsigned,
+ divisor->IsLongConstant());
+ const bool is_int_intrinsic = inst->GetIntrinsic() == Intrinsics::kIntegerDivideUnsigned;
+ if ((is_int_intrinsic && divisor->AsIntConstant()->IsArithmeticZero()) ||
+ (!is_int_intrinsic && divisor->AsLongConstant()->IsArithmeticZero())) {
+ // We will be throwing, don't constant fold.
+ inst->SetAlwaysThrows(true);
+ GetGraph()->SetHasAlwaysThrowingInvokes(true);
+ return;
+ }
+
+ HInstruction* dividend = inst->InputAt(0);
+ if (!dividend->IsConstant()) {
+ return;
+ }
+ DCHECK_IMPLIES(inst->GetIntrinsic() == Intrinsics::kIntegerDivideUnsigned,
+ dividend->IsIntConstant());
+ DCHECK_IMPLIES(inst->GetIntrinsic() == Intrinsics::kLongDivideUnsigned,
+ dividend->IsLongConstant());
+
+ if (is_int_intrinsic) {
+ uint32_t dividend_val =
+ dchecked_integral_cast<uint32_t>(dividend->AsIntConstant()->GetValueAsUint64());
+ uint32_t divisor_val =
+ dchecked_integral_cast<uint32_t>(divisor->AsIntConstant()->GetValueAsUint64());
+ inst->ReplaceWith(GetGraph()->GetIntConstant(static_cast<int32_t>(dividend_val / divisor_val)));
+ } else {
+ uint64_t dividend_val = dividend->AsLongConstant()->GetValueAsUint64();
+ uint64_t divisor_val = divisor->AsLongConstant()->GetValueAsUint64();
+ inst->ReplaceWith(
+ GetGraph()->GetLongConstant(static_cast<int64_t>(dividend_val / divisor_val)));
+ }
+
+ inst->GetBlock()->RemoveInstruction(inst);
+}
+
void HConstantFoldingVisitor::FoldHighestOneBitIntrinsic(HInvoke* inst) {
DCHECK(inst->GetIntrinsic() == Intrinsics::kIntegerHighestOneBit ||
inst->GetIntrinsic() == Intrinsics::kLongHighestOneBit);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9d80db0de0..f65941090d 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -144,11 +144,6 @@ bool HInliner::Run() {
}
bool did_inline = false;
- // The inliner is the only phase that sets invokes as `always throwing`, and since we only run the
- // inliner once per graph this value should always be false at the beginning of the inlining
- // phase. This is important since we use `HasAlwaysThrowingInvokes` to know whether the inliner
- // phase performed a relevant change in the graph.
- DCHECK(!graph_->HasAlwaysThrowingInvokes());
// Initialize the number of instructions for the method being compiled. Recursive calls
// to HInliner::Run have already updated the instruction count.
@@ -210,6 +205,16 @@ bool HInliner::Run() {
// We return true if we either inlined at least one method, or we marked one of our methods as
// always throwing.
+ // To check if we added an always throwing method we can either:
+ // 1) Pass a boolean throughout the pipeline and get an accurate result, or
+ // 2) Just check that the `HasAlwaysThrowingInvokes()` flag is true now. This is not 100%
+ // accurate but the only other part where we set `HasAlwaysThrowingInvokes` is constant
+ // folding the DivideUnsigned intrinsics for when the divisor is known to be 0. This case is
+ // rare enough that changing the pipeline for this is not worth it. In the case of the false
+ // positive (i.e. A) we didn't inline at all, B) the graph already had an always throwing
+ // invoke, and C) we didn't set any new always throwing invokes), we will be running constant
+ // folding, instruction simplifier, and dead code elimination one more time even though it
+ // shouldn't change things. There's no false negative case.
return did_inline || graph_->HasAlwaysThrowingInvokes();
}