summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/compiler/Compiler.h1
-rw-r--r--src/compiler/CompilerIR.h15
-rw-r--r--src/compiler/Dataflow.h1
-rw-r--r--src/compiler/Frontend.cc22
-rw-r--r--src/compiler/SSATransformation.cc36
5 files changed, 60 insertions, 15 deletions
diff --git a/src/compiler/Compiler.h b/src/compiler/Compiler.h
index 24bc8af169..05e05b676b 100644
--- a/src/compiler/Compiler.h
+++ b/src/compiler/Compiler.h
@@ -38,6 +38,7 @@ enum optControlVector {
kNullCheckElimination,
kPromoteRegs,
kTrackLiveTemps,
+ kSkipLargeMethodOptimization,
};
extern uint32_t compilerOptimizerDisableFlags;
diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h
index 317924b31e..76649d5980 100644
--- a/src/compiler/CompilerIR.h
+++ b/src/compiler/CompilerIR.h
@@ -60,6 +60,19 @@ typedef struct RegLocation {
#define INVALID_REG (0xFF)
#define INVALID_OFFSET (-1)
+/*
+ * Some code patterns cause the generation of excessively large
+ * methods - in particular initialization sequences. There isn't much
+ * benefit in optimizing these methods, and the cost can be very high.
+ * We attempt to identify these cases, and avoid performing most dataflow
+ * analysis. Two thresholds are used - one for known initializers and one
+ * for everything else. Note: we require dataflow analysis for floating point
+ * type inference. If any non-move fp operations exist in a method, dataflow
+ * is performed regardless of block count.
+ */
+#define MANY_BLOCKS_INITIALIZER 200 /* Threshold for switching dataflow off */
+#define MANY_BLOCKS 3000 /* Non-initializer threshold */
+
typedef enum BBType {
kEntryBlock,
kDalvikByteCode,
@@ -315,6 +328,8 @@ typedef struct CompilationUnit {
GrowableList fillArrayData;
const u2* insns;
u4 insnsSize;
+ bool usesFP; // Method contains at least 1 non-move FP operation
+ bool disableDataflow; // Skip dataflow analysis if possible
std::map<unsigned int, BasicBlock*> blockMap; // findBlock lookup cache
} CompilationUnit;
diff --git a/src/compiler/Dataflow.h b/src/compiler/Dataflow.h
index e810e528c1..cd2b1cb5cc 100644
--- a/src/compiler/Dataflow.h
+++ b/src/compiler/Dataflow.h
@@ -106,6 +106,7 @@ typedef enum DataFlowAttributePos {
#define DF_B_IS_REG (DF_UB | DF_UB_WIDE)
#define DF_C_IS_REG (DF_UC | DF_UC_WIDE)
#define DF_IS_GETTER_OR_SETTER (DF_IS_GETTER | DF_IS_SETTER)
+#define DF_USES_FP (DF_FP_A | DF_FP_B | DF_FP_C)
extern int oatDataFlowAttributes[kMirOpLast];
diff --git a/src/compiler/Frontend.cc b/src/compiler/Frontend.cc
index 1b1215a6bd..c5692bc10d 100644
--- a/src/compiler/Frontend.cc
+++ b/src/compiler/Frontend.cc
@@ -32,6 +32,7 @@ uint32_t compilerOptimizerDisableFlags = 0 | // Disable specific optimizations
//(1 << kNullCheckElimination) |
//(1 << kPromoteRegs) |
//(1 << kTrackLiveTemps) |
+ //(1 << kSkipLargeMethodOptimization) |
0;
uint32_t compilerDebugFlags = 0 | // Enable debug/testing modes
@@ -840,6 +841,9 @@ CompiledMethod* oatCompileMethod(const Compiler& compiler, const DexFile::CodeIt
codePtr += width;
int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode);
+ cUnit->usesFP |= (oatDataFlowAttributes[insn->dalvikInsn.opcode] &
+ DF_USES_FP);
+
if (flags & kInstrCanBranch) {
curBlock = processCanBranch(cUnit.get(), curBlock, insn, curOffset,
width, flags, codePtr, codeEnd);
@@ -899,6 +903,24 @@ CompiledMethod* oatCompileMethod(const Compiler& compiler, const DexFile::CodeIt
}
}
+ if (!cUnit->usesFP &&
+ !(cUnit->disableOpt & (1 << kSkipLargeMethodOptimization))) {
+ if ((cUnit->numBlocks > MANY_BLOCKS) ||
+ ((cUnit->numBlocks > MANY_BLOCKS_INITIALIZER) &&
+ PrettyMethod(method_idx, dex_file).find("init>") !=
+ std::string::npos)) {
+ cUnit->disableDataflow = true;
+ // Disable optimization which require dataflow/ssa
+ cUnit->disableOpt |=
+ (1 << kNullCheckElimination) |
+ (1 << kPromoteRegs);
+ if (cUnit->printMe) {
+ LOG(INFO) << "Compiler: " << PrettyMethod(method_idx, dex_file)
+ << " too big: " << cUnit->numBlocks;
+ }
+ }
+ }
+
if (cUnit->printMe) {
oatDumpCompilationUnit(cUnit.get());
}
diff --git a/src/compiler/SSATransformation.cc b/src/compiler/SSATransformation.cc
index a25d225694..e9c3d70495 100644
--- a/src/compiler/SSATransformation.cc
+++ b/src/compiler/SSATransformation.cc
@@ -759,17 +759,21 @@ void oatMethodSSATransformation(CompilationUnit* cUnit)
/* Compute the DFS order */
computeDFSOrders(cUnit);
- /* Compute the dominator info */
- computeDominators(cUnit);
+ if (!cUnit->disableDataflow) {
+ /* Compute the dominator info */
+ computeDominators(cUnit);
+ }
/* Allocate data structures in preparation for SSA conversion */
oatInitializeSSAConversion(cUnit);
- /* Find out the "Dalvik reg def x block" relation */
- computeDefBlockMatrix(cUnit);
+ if (!cUnit->disableDataflow) {
+ /* Find out the "Dalvik reg def x block" relation */
+ computeDefBlockMatrix(cUnit);
- /* Insert phi nodes to dominance frontiers for all variables */
- insertPhiNodes(cUnit);
+ /* Insert phi nodes to dominance frontiers for all variables */
+ insertPhiNodes(cUnit);
+ }
/* Rename register names by local defs and phi nodes */
oatDataFlowAnalysisDispatcher(cUnit, oatClearVisitedFlag,
@@ -777,17 +781,19 @@ void oatMethodSSATransformation(CompilationUnit* cUnit)
false /* isIterative */);
doDFSPreOrderSSARename(cUnit, cUnit->entryBlock);
- /*
- * Shared temp bit vector used by each block to count the number of defs
- * from all the predecessor blocks.
- */
- cUnit->tempSSARegisterV = oatAllocBitVector(cUnit->numSSARegs,
+ if (!cUnit->disableDataflow) {
+ /*
+ * Shared temp bit vector used by each block to count the number of defs
+ * from all the predecessor blocks.
+ */
+ cUnit->tempSSARegisterV = oatAllocBitVector(cUnit->numSSARegs,
false);
- /* Insert phi-operands with latest SSA names from predecessor blocks */
- oatDataFlowAnalysisDispatcher(cUnit, insertPhiNodeOperands,
- kReachableNodes,
- false /* isIterative */);
+ /* Insert phi-operands with latest SSA names from predecessor blocks */
+ oatDataFlowAnalysisDispatcher(cUnit, insertPhiNodeOperands,
+ kReachableNodes,
+ false /* isIterative */);
+ }
}
} // namespace art