Quick: Redefine the notion of back-egdes.
Redefine a back-edge to really mean an edge to a loop head
instead of comparing instruction offsets. Generate suspend
checks also on fall-through to a loop head; insert an extra
GOTO for these edges.
Add suspend checks to fused cmp instructions.
Rewrite suspend check elimination to track whether there is
an invoke on each path from the loop head to a given back
edge, instead of using domination info to look for a basic
block with invoke that must be on each path. Ignore invokes
to intrinsics and move the optimization to a its own pass.
The new loops in 109-suspend-check should prevent intrinsics
and fused cmp-related regressions.
Bug: 18522004
Change-Id: I96ac818f76ccf9419a6e70e9ec00555f9d487a9e
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 1a18841..208cbdf 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -34,6 +34,7 @@
namespace art {
+class DexFileMethodInliner;
class GlobalValueNumbering;
enum DataFlowAttributePos {
@@ -131,11 +132,9 @@
enum OatMethodAttributes {
kIsLeaf, // Method is leaf.
- kHasLoop, // Method contains simple loop.
};
#define METHOD_IS_LEAF (1 << kIsLeaf)
-#define METHOD_HAS_LOOP (1 << kHasLoop)
// Minimum field size to contain Dalvik v_reg number.
#define VREG_NUM_WIDTH 16
@@ -731,6 +730,10 @@
return max_nested_loops_;
}
+ bool IsLoopHead(BasicBlockId bb_id) {
+ return topological_order_loop_ends_[topological_order_indexes_[bb_id]] != 0u;
+ }
+
bool IsConst(int32_t s_reg) const {
return is_constant_v_->IsBitSet(s_reg);
}
@@ -969,13 +972,23 @@
return reg_location_[method_sreg_];
}
- bool IsBackedge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
- return ((target_bb_id != NullBasicBlockId) &&
- (GetBasicBlock(target_bb_id)->start_offset <= branch_bb->start_offset));
+ bool IsBackEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
+ DCHECK_NE(target_bb_id, NullBasicBlockId);
+ DCHECK_LT(target_bb_id, topological_order_indexes_.size());
+ DCHECK_LT(branch_bb->id, topological_order_indexes_.size());
+ return topological_order_indexes_[target_bb_id] <= topological_order_indexes_[branch_bb->id];
}
- bool IsBackwardsBranch(BasicBlock* branch_bb) {
- return IsBackedge(branch_bb, branch_bb->taken) || IsBackedge(branch_bb, branch_bb->fall_through);
+ bool IsSuspendCheckEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
+ if (!IsBackEdge(branch_bb, target_bb_id)) {
+ return false;
+ }
+ if (suspend_checks_in_loops_ == nullptr) {
+ // We didn't run suspend check elimination.
+ return true;
+ }
+ uint16_t target_depth = GetBasicBlock(target_bb_id)->nesting_depth;
+ return (suspend_checks_in_loops_[branch_bb->id] & (1u << (target_depth - 1u))) == 0;
}
void CountBranch(DexOffset target_offset) {
@@ -1055,6 +1068,9 @@
bool ApplyGlobalValueNumberingGate();
bool ApplyGlobalValueNumbering(BasicBlock* bb);
void ApplyGlobalValueNumberingEnd();
+ bool EliminateSuspendChecksGate();
+ bool EliminateSuspendChecks(BasicBlock* bb);
+ void EliminateSuspendChecksEnd();
uint16_t GetGvnIFieldId(MIR* mir) const {
DCHECK(IsInstructionIGetOrIPut(mir->dalvikInsn.opcode));
@@ -1165,7 +1181,7 @@
* @brief Count the uses in the BasicBlock
* @param bb the BasicBlock
*/
- void CountUses(class BasicBlock* bb);
+ void CountUses(BasicBlock* bb);
static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
static uint64_t GetDataFlowAttributes(MIR* mir);
@@ -1209,20 +1225,6 @@
void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
- // Used for removing redudant suspend tests
- void AppendGenSuspendTestList(BasicBlock* bb) {
- if (gen_suspend_test_list_.size() == 0 ||
- gen_suspend_test_list_.back() != bb) {
- gen_suspend_test_list_.push_back(bb);
- }
- }
-
- /* This is used to check if there is already a method call dominating the
- * source basic block of a backedge and being dominated by the target basic
- * block of the backedge.
- */
- bool HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id);
-
protected:
int FindCommonParent(int block1, int block2);
void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
@@ -1338,6 +1340,10 @@
uint16_t* ifield_ids_; // Part of GVN/LVN but cached here for LVN to avoid recalculation.
uint16_t* sfield_ids_; // Ditto.
} gvn;
+ // Suspend check elimination.
+ struct {
+ DexFileMethodInliner* inliner;
+ } sce;
} temp_;
static const int kInvalidEntry = -1;
ArenaVector<BasicBlock*> block_list_;
@@ -1373,11 +1379,19 @@
ArenaVector<MirIFieldLoweringInfo> ifield_lowering_infos_;
ArenaVector<MirSFieldLoweringInfo> sfield_lowering_infos_;
ArenaVector<MirMethodLoweringInfo> method_lowering_infos_;
+
+ // In the suspend check elimination pass we determine for each basic block and enclosing
+ // loop whether there's guaranteed to be a suspend check on the path from the loop head
+ // to this block. If so, we can eliminate the back-edge suspend check.
+ // The bb->id is index into suspend_checks_in_loops_ and the loop head's depth is bit index
+ // in a suspend_checks_in_loops_[bb->id].
+ uint32_t* suspend_checks_in_loops_;
+
static const uint64_t oat_data_flow_attributes_[kMirOpLast];
- ArenaVector<BasicBlock*> gen_suspend_test_list_; // List of blocks containing suspend tests
friend class MirOptimizationTest;
friend class ClassInitCheckEliminationTest;
+ friend class SuspendCheckEliminationTest;
friend class NullCheckEliminationTest;
friend class GlobalValueNumberingTest;
friend class LocalValueNumberingTest;