Compile-time tuning: assembly phase
Not as much compile-time gain from reworking the assembly phase as I'd
hoped, but still worthwhile. Should see ~2% improvement thanks to
the assembly rework. On the other hand, expect some huge gains for some
application thanks to better detection of large machine-generated init
methods. Thinkfree shows a 25% improvement.
The major assembly change was to establish thread the LIR nodes that
require fixup into a fixup chain. Only those are processed during the
final assembly pass(es). This doesn't help for methods which only
require a single pass to assemble, but does speed up the larger methods
which required multiple assembly passes.
Also replaced the block_map_ basic block lookup table (which contained
space for a BasicBlock* for each dex instruction unit) with a block id
map - cutting its space requirements by half in a 32-bit pointer
environment.
Changes:
o Reduce size of LIR struct by 12.5% (one of the big memory users)
o Repurpose the use/def portion of the LIR after optimization complete.
o Encode instruction bits to LIR
o Thread LIR nodes requiring pc fixup
o Change follow-on assembly passes to only consider fixup LIRs
o Switch on pc-rel fixup kind
o Fast-path for small methods - single pass assembly
o Avoid using cb[n]z for null checks (almost always exceed displacement)
o Improve detection of large initialization methods.
o Rework def/use flag setup.
o Remove a sequential search from FindBlock using lookup table of 16-bit
block ids rather than full block pointers.
o Eliminate pcRelFixup and use fixup kind instead.
o Add check for 16-bit overflow on dex offset.
Change-Id: I4c6615f83fed46f84629ad6cfe4237205a9562b4
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index cb7694d..f915779 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -21,8 +21,8 @@
#define DEBUG_OPT(X)
/* Check RAW, WAR, and RAW dependency on the register operands */
-#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \
- ((use | def) & check->def_mask))
+#define CHECK_REG_DEP(use, def, check) ((def & check->u.m.use_mask) || \
+ ((use | def) & check->u.m.def_mask))
/* Scheduler heuristics */
#define MAX_HOIST_DISTANCE 20
@@ -30,10 +30,10 @@
#define LD_LATENCY 2
static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
- int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info);
- int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info);
- int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info);
- int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info);
+ int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->flags.alias_info);
+ int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->flags.alias_info);
+ int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->flags.alias_info);
+ int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->flags.alias_info);
return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
}
@@ -106,7 +106,7 @@
bool is_this_lir_load = target_flags & IS_LOAD;
LIR* check_lir;
/* Use the mem mask to determine the rough memory location */
- uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
+ uint64_t this_mem_mask = (this_lir->u.m.use_mask | this_lir->u.m.def_mask) & ENCODE_MEM;
/*
* Currently only eliminate redundant ld/st for constant and Dalvik
@@ -116,10 +116,10 @@
continue;
}
- uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+ uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
uint64_t stop_use_reg_mask;
if (cu_->instruction_set == kX86) {
- stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM;
+ stop_use_reg_mask = (IS_BRANCH | this_lir->u.m.use_mask) & ~ENCODE_MEM;
} else {
/*
* Add pc to the resource mask to prevent this instruction
@@ -127,7 +127,7 @@
* region bits since stop_mask is used to check data/control
* dependencies.
*/
- stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
+ stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->u.m.use_mask) & ~ENCODE_MEM;
}
for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
@@ -139,7 +139,7 @@
continue;
}
- uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM;
+ uint64_t check_mem_mask = (check_lir->u.m.use_mask | check_lir->u.m.def_mask) & ENCODE_MEM;
uint64_t alias_condition = this_mem_mask & check_mem_mask;
bool stop_here = false;
@@ -159,7 +159,7 @@
*/
DCHECK(!(check_flags & IS_STORE));
/* Same value && same register type */
- if (check_lir->alias_info == this_lir->alias_info &&
+ if (check_lir->flags.alias_info == this_lir->flags.alias_info &&
SameRegType(check_lir->operands[0], native_reg_id)) {
/*
* Different destination register - insert
@@ -172,7 +172,7 @@
}
} else if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias */
- if (check_lir->alias_info == this_lir->alias_info) {
+ if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
/* Only optimize compatible registers */
bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
if ((is_this_lir_load && is_check_lir_load) ||
@@ -297,7 +297,7 @@
continue;
}
- uint64_t stop_use_all_mask = this_lir->use_mask;
+ uint64_t stop_use_all_mask = this_lir->u.m.use_mask;
if (cu_->instruction_set != kX86) {
/*
@@ -313,7 +313,7 @@
/* Similar as above, but just check for pure register dependency */
uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM;
- uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+ uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
int next_slot = 0;
bool stop_here = false;
@@ -328,7 +328,7 @@
continue;
}
- uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM;
+ uint64_t check_mem_mask = check_lir->u.m.def_mask & ENCODE_MEM;
uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
stop_here = false;
@@ -337,7 +337,7 @@
/* We can fully disambiguate Dalvik references */
if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias or partually overlap */
- if ((check_lir->alias_info == this_lir->alias_info) ||
+ if ((check_lir->flags.alias_info == this_lir->flags.alias_info) ||
IsDalvikRegisterClobbered(this_lir, check_lir)) {
stop_here = true;
}
@@ -406,7 +406,7 @@
LIR* prev_lir = prev_inst_list[slot+1];
/* Check the highest instruction */
- if (prev_lir->def_mask == ENCODE_ALL) {
+ if (prev_lir->u.m.def_mask == ENCODE_ALL) {
/*
* If the first instruction is a load, don't hoist anything
* above it since it is unlikely to be beneficial.
@@ -436,7 +436,7 @@
*/
bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
(GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
- if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
+ if (((cur_lir->u.m.use_mask & prev_lir->u.m.def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
break;
}
}