Fix conditional jump over jmp (X86/X86-64/ARM32)
Optimize the code generation for 'if' statements to jump to the
'false' block if the next block to be generated is the 'true' block.
Add an X86-64 test for this case.
Note that ARM64 & MIPS64 have not been updated.
Change-Id: Iebb1352feb9d3bd0142d8b0621a2e3069a708ea7
Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3dc3b7f..6d05293 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1300,20 +1300,29 @@
DCHECK_EQ(cond_value, 0);
}
} else {
- if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
- // Condition has been materialized, compare the output to 0
+ // Can we optimize the jump if we know that the next block is the true case?
+ HCondition* condition = cond->AsCondition();
+ bool can_jump_to_false = CanReverseCondition(always_true_target, false_target, condition);
+ if (condition == nullptr || condition->NeedsMaterialization()) {
+ // Condition has been materialized, compare the output to 0.
DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
+ if (can_jump_to_false) {
+ __ CompareAndBranchIfZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
+ false_target);
+ return;
+ }
__ CompareAndBranchIfNonZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
true_target);
} else {
// Condition has not been materialized, use its inputs as the
// comparison and its condition as the branch condition.
- Primitive::Type type =
- cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
+ Primitive::Type type = (condition != nullptr)
+ ? cond->InputAt(0)->GetType()
+ : Primitive::kPrimInt;
// Is this a long or FP comparison that has been folded into the HCondition?
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
// Generate the comparison directly.
- GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
+ GenerateCompareTestAndBranch(instruction->AsIf(), condition,
true_target, false_target, always_true_target);
return;
}
@@ -1328,7 +1337,12 @@
DCHECK(right.IsConstant());
GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
+ if (can_jump_to_false) {
+ __ b(false_target, ARMCondition(condition->GetOppositeCondition()));
+ return;
+ }
+
+ __ b(true_target, ARMCondition(condition->GetCondition()));
}
}
if (false_target != nullptr) {
diff --git a/compiler/optimizing/code_generator_utils.cc b/compiler/optimizing/code_generator_utils.cc
index 921c1d8..bf354e7 100644
--- a/compiler/optimizing/code_generator_utils.cc
+++ b/compiler/optimizing/code_generator_utils.cc
@@ -15,6 +15,7 @@
*/
#include "code_generator_utils.h"
+#include "nodes.h"
#include "base/logging.h"
@@ -94,4 +95,19 @@
*shift = is_long ? p - 64 : p - 32;
}
+// Is it valid to reverse the condition? Uses the values supplied to
+// GenerateTestAndBranch() in instruction generators.
+bool CanReverseCondition(Label* always_true_target,
+ Label* false_target,
+ HCondition* condition) {
+ // 'always_true_target' is null when the 'true' path is to the next
+ // block to be generated. Check the type of the condition to ensure that
+ // FP conditions are not swapped. This is for future fusing of HCompare and
+ // HCondition.
+ // Note: If the condition is nullptr, then it is always okay to reverse.
+ return always_true_target == nullptr && false_target != nullptr &&
+ (condition == nullptr ||
+ !Primitive::IsFloatingPointType(condition->InputAt(0)->GetType()));
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator_utils.h b/compiler/optimizing/code_generator_utils.h
index 59b495c..628eee8 100644
--- a/compiler/optimizing/code_generator_utils.h
+++ b/compiler/optimizing/code_generator_utils.h
@@ -21,10 +21,19 @@
namespace art {
+class Label;
+class HCondition;
+
// Computes the magic number and the shift needed in the div/rem by constant algorithm, as out
// arguments `magic` and `shift`
void CalculateMagicAndShiftForDivRem(int64_t divisor, bool is_long, int64_t* magic, int* shift);
+// Is it valid to reverse the condition? Uses the values supplied to
+// GenerateTestAndBranch() in instruction generators.
+bool CanReverseCondition(Label* always_true_target,
+ Label* false_target,
+ HCondition* condition);
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_UTILS_H_
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0df7e3b..0db5837 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1216,16 +1216,21 @@
DCHECK_EQ(cond_value, 0);
}
} else {
+ HCondition* condition = cond->AsCondition();
bool is_materialized =
- !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
+ condition == nullptr || condition->NeedsMaterialization();
// Moves do not affect the eflags register, so if the condition is
// evaluated just before the if, we don't need to evaluate it
// again. We can't use the eflags on long/FP conditions if they are
// materialized due to the complex branching.
- Primitive::Type type = cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
- bool eflags_set = cond->IsCondition()
- && cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction)
+ Primitive::Type type = (condition != nullptr)
+ ? cond->InputAt(0)->GetType()
+ : Primitive::kPrimInt;
+ bool eflags_set = condition != nullptr
+ && condition->IsBeforeWhenDisregardMoves(instruction)
&& (type != Primitive::kPrimLong && !Primitive::IsFloatingPointType(type));
+ // Can we optimize the jump if we know that the next block is the true case?
+ bool can_jump_to_false = CanReverseCondition(always_true_target, false_target, condition);
if (is_materialized) {
if (!eflags_set) {
// Materialized condition, compare against 0.
@@ -1235,9 +1240,17 @@
} else {
__ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
}
+ if (can_jump_to_false) {
+ __ j(kEqual, false_target);
+ return;
+ }
__ j(kNotEqual, true_target);
} else {
- __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
+ if (can_jump_to_false) {
+ __ j(X86Condition(condition->GetOppositeCondition()), false_target);
+ return;
+ }
+ __ j(X86Condition(condition->GetCondition()), true_target);
}
} else {
// Condition has not been materialized, use its inputs as the
@@ -1247,7 +1260,7 @@
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
// Generate the comparison directly.
GenerateCompareTestAndBranch(instruction->AsIf(),
- cond->AsCondition(),
+ condition,
true_target,
false_target,
always_true_target);
@@ -1270,7 +1283,13 @@
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
+
+ if (can_jump_to_false) {
+ __ j(X86Condition(condition->GetOppositeCondition()), false_target);
+ return;
+ }
+
+ __ j(X86Condition(condition->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5218d70..b5abec2 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1183,16 +1183,20 @@
DCHECK_EQ(cond_value, 0);
}
} else {
- bool is_materialized =
- !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
+ HCondition* condition = cond->AsCondition();
+ bool is_materialized = condition == nullptr || condition->NeedsMaterialization();
// Moves do not affect the eflags register, so if the condition is
// evaluated just before the if, we don't need to evaluate it
// again. We can't use the eflags on FP conditions if they are
// materialized due to the complex branching.
- Primitive::Type type = cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
- bool eflags_set = cond->IsCondition()
- && cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction)
+ Primitive::Type type = (condition != nullptr)
+ ? cond->InputAt(0)->GetType()
+ : Primitive::kPrimInt;
+ bool eflags_set = condition != nullptr
+ && condition->IsBeforeWhenDisregardMoves(instruction)
&& !Primitive::IsFloatingPointType(type);
+ // Can we optimize the jump if we know that the next block is the true case?
+ bool can_jump_to_false = CanReverseCondition(always_true_target, false_target, condition);
if (is_materialized) {
if (!eflags_set) {
@@ -1204,9 +1208,17 @@
__ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()),
Immediate(0));
}
+ if (can_jump_to_false) {
+ __ j(kEqual, false_target);
+ return;
+ }
__ j(kNotEqual, true_target);
} else {
- __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target);
+ if (can_jump_to_false) {
+ __ j(X86_64IntegerCondition(condition->GetOppositeCondition()), false_target);
+ return;
+ }
+ __ j(X86_64IntegerCondition(condition->GetCondition()), true_target);
}
} else {
// Condition has not been materialized, use its inputs as the
@@ -1215,7 +1227,7 @@
// Is this a long or FP comparison that has been folded into the HCondition?
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
// Generate the comparison directly.
- GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
+ GenerateCompareTestAndBranch(instruction->AsIf(), condition,
true_target, false_target, always_true_target);
return;
}
@@ -1235,7 +1247,13 @@
__ cmpl(lhs.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target);
+
+ if (can_jump_to_false) {
+ __ j(X86_64IntegerCondition(condition->GetOppositeCondition()), false_target);
+ return;
+ }
+
+ __ j(X86_64IntegerCondition(condition->GetCondition()), true_target);
}
}
if (false_target != nullptr) {