From fa57c47f1b72916371a9c2d5c1389219bce655b4 Mon Sep 17 00:00:00 2001 From: buzbee Date: Wed, 21 Nov 2012 12:06:18 -0800 Subject: Quick Compiler: Shoot the Camel Another step towards moving the Quick Compiler from the old Dalvik coding style to Art's coding style. In this CL, Camel-case locals, struct variables and arguments are converted to lower-case with underscore names. Most of the name changes were formulistic, but I also took this opportunity to change the old "printMe" into the more traditional "verbose", and shorten cUnit to cu. No logic changes. Change-Id: I64b69b28a8357d5cc0abc1dc975954c91abd9b45 --- src/compiler/codegen/local_optimizations.cc | 360 ++++++++++++++-------------- 1 file changed, 180 insertions(+), 180 deletions(-) (limited to 'src/compiler/codegen/local_optimizations.cc') diff --git a/src/compiler/codegen/local_optimizations.cc b/src/compiler/codegen/local_optimizations.cc index 1e6e0d8fc5..d1a74441ae 100644 --- a/src/compiler/codegen/local_optimizations.cc +++ b/src/compiler/codegen/local_optimizations.cc @@ -21,8 +21,8 @@ namespace art { #define DEBUG_OPT(X) /* Check RAW, WAR, and WAR dependency on the register operands */ -#define CHECK_REG_DEP(use, def, check) ((def & check->useMask) || \ - ((use | def) & check->defMask)) +#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \ + ((use | def) & check->def_mask)) /* Scheduler heuristics */ #define MAX_HOIST_DISTANCE 20 @@ -31,27 +31,27 @@ namespace art { static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) { - int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->aliasInfo); - int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->aliasInfo); - int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->aliasInfo); - int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->aliasInfo); + int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info); + int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info); + int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info); + int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info); return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo); } /* Convert a more expensive instruction (ie load) into a move */ -static void ConvertMemOpIntoMove(CompilationUnit* cUnit, LIR* origLIR, int dest, int src) +static void ConvertMemOpIntoMove(CompilationUnit* cu, LIR* orig_lir, int dest, int src) { /* Insert a move to replace the load */ - LIR* moveLIR; - moveLIR = OpRegCopyNoInsert( cUnit, dest, src); + LIR* move_lir; + move_lir = OpRegCopyNoInsert( cu, dest, src); /* * Insert the converted instruction after the original since the * optimization is scannng in the top-down order and the new instruction * will need to be re-checked (eg the new dest clobbers the src used in - * thisLIR). + * this_lir). */ - InsertLIRAfter(origLIR, moveLIR); + InsertLIRAfter(orig_lir, move_lir); } /* @@ -72,147 +72,147 @@ static void ConvertMemOpIntoMove(CompilationUnit* cUnit, LIR* origLIR, int dest, * 1) They are must-aliases * 2) The memory location is not written to in between */ -static void ApplyLoadStoreElimination(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR) +static void ApplyLoadStoreElimination(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir) { - LIR* thisLIR; + LIR* this_lir; - if (headLIR == tailLIR) return; + if (head_lir == tail_lir) return; - for (thisLIR = PREV_LIR(tailLIR); - thisLIR != headLIR; - thisLIR = PREV_LIR(thisLIR)) { - int sinkDistance = 0; + for (this_lir = PREV_LIR(tail_lir); + this_lir != head_lir; + this_lir = PREV_LIR(this_lir)) { + int sink_distance = 0; /* Skip non-interesting instructions */ - if ((thisLIR->flags.isNop == true) || - isPseudoOpcode(thisLIR->opcode) || - (GetTargetInstFlags(thisLIR->opcode) & IS_BRANCH) || - !(GetTargetInstFlags(thisLIR->opcode) & (IS_LOAD | IS_STORE))) { + if ((this_lir->flags.is_nop == true) || + is_pseudo_opcode(this_lir->opcode) || + (GetTargetInstFlags(this_lir->opcode) & IS_BRANCH) || + !(GetTargetInstFlags(this_lir->opcode) & (IS_LOAD | IS_STORE))) { continue; } - int nativeRegId; - if (cUnit->instructionSet == kX86) { + int native_reg_id; + if (cu->instruction_set == kX86) { // If x86, location differs depending on whether memory/reg operation. - nativeRegId = (GetTargetInstFlags(thisLIR->opcode) & IS_STORE) ? thisLIR->operands[2] - : thisLIR->operands[0]; + native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2] + : this_lir->operands[0]; } else { - nativeRegId = thisLIR->operands[0]; + native_reg_id = this_lir->operands[0]; } - bool isThisLIRLoad = GetTargetInstFlags(thisLIR->opcode) & IS_LOAD; - LIR* checkLIR; + bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD; + LIR* check_lir; /* Use the mem mask to determine the rough memory location */ - uint64_t thisMemMask = (thisLIR->useMask | thisLIR->defMask) & ENCODE_MEM; + uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM; /* * Currently only eliminate redundant ld/st for constant and Dalvik * register accesses. */ - if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue; + if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue; - uint64_t stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM; - uint64_t stopUseRegMask; - if (cUnit->instructionSet == kX86) { - stopUseRegMask = (IS_BRANCH | thisLIR->useMask) & ~ENCODE_MEM; + uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM; + uint64_t stop_use_reg_mask; + if (cu->instruction_set == kX86) { + stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM; } else { /* * Add pc to the resource mask to prevent this instruction * from sinking past branch instructions. Also take out the memory - * region bits since stopMask is used to check data/control + * region bits since stop_mask is used to check data/control * dependencies. */ - stopUseRegMask = (GetPCUseDefEncoding() | thisLIR->useMask) & ~ENCODE_MEM; + stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM; } - for (checkLIR = NEXT_LIR(thisLIR); - checkLIR != tailLIR; - checkLIR = NEXT_LIR(checkLIR)) { + for (check_lir = NEXT_LIR(this_lir); + check_lir != tail_lir; + check_lir = NEXT_LIR(check_lir)) { /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (checkLIR->flags.isNop) continue; + if (check_lir->flags.is_nop) continue; - uint64_t checkMemMask = (checkLIR->useMask | checkLIR->defMask) & ENCODE_MEM; - uint64_t aliasCondition = thisMemMask & checkMemMask; - bool stopHere = false; + uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM; + uint64_t alias_condition = this_mem_mask & check_mem_mask; + bool stop_here = false; /* * Potential aliases seen - check the alias relations */ - if (checkMemMask != ENCODE_MEM && aliasCondition != 0) { - bool isCheckLIRLoad = GetTargetInstFlags(checkLIR->opcode) & IS_LOAD; - if (aliasCondition == ENCODE_LITERAL) { + if (check_mem_mask != ENCODE_MEM && alias_condition != 0) { + bool is_check_lir_load = GetTargetInstFlags(check_lir->opcode) & IS_LOAD; + if (alias_condition == ENCODE_LITERAL) { /* * Should only see literal loads in the instruction * stream. */ - DCHECK(!(GetTargetInstFlags(checkLIR->opcode) & IS_STORE)); + DCHECK(!(GetTargetInstFlags(check_lir->opcode) & IS_STORE)); /* Same value && same register type */ - if (checkLIR->aliasInfo == thisLIR->aliasInfo && - SameRegType(checkLIR->operands[0], nativeRegId)) { + if (check_lir->alias_info == this_lir->alias_info && + SameRegType(check_lir->operands[0], native_reg_id)) { /* * Different destination register - insert * a move */ - if (checkLIR->operands[0] != nativeRegId) { - ConvertMemOpIntoMove(cUnit, checkLIR, checkLIR->operands[0], - nativeRegId); + if (check_lir->operands[0] != native_reg_id) { + ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0], + native_reg_id); } - checkLIR->flags.isNop = true; + check_lir->flags.is_nop = true; } - } else if (aliasCondition == ENCODE_DALVIK_REG) { + } else if (alias_condition == ENCODE_DALVIK_REG) { /* Must alias */ - if (checkLIR->aliasInfo == thisLIR->aliasInfo) { + if (check_lir->alias_info == this_lir->alias_info) { /* Only optimize compatible registers */ - bool regCompatible = SameRegType(checkLIR->operands[0], nativeRegId); - if ((isThisLIRLoad && isCheckLIRLoad) || - (!isThisLIRLoad && isCheckLIRLoad)) { + bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id); + if ((is_this_lir_load && is_check_lir_load) || + (!is_this_lir_load && is_check_lir_load)) { /* RAR or RAW */ - if (regCompatible) { + if (reg_compatible) { /* * Different destination register - * insert a move */ - if (checkLIR->operands[0] != - nativeRegId) { - ConvertMemOpIntoMove(cUnit, checkLIR, checkLIR->operands[0], - nativeRegId); + if (check_lir->operands[0] != + native_reg_id) { + ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0], + native_reg_id); } - checkLIR->flags.isNop = true; + check_lir->flags.is_nop = true; } else { /* * Destinaions are of different types - * something complicated going on so * stop looking now. */ - stopHere = true; + stop_here = true; } - } else if (isThisLIRLoad && !isCheckLIRLoad) { + } else if (is_this_lir_load && !is_check_lir_load) { /* WAR - register value is killed */ - stopHere = true; - } else if (!isThisLIRLoad && !isCheckLIRLoad) { + stop_here = true; + } else if (!is_this_lir_load && !is_check_lir_load) { /* WAW - nuke the earlier store */ - thisLIR->flags.isNop = true; - stopHere = true; + this_lir->flags.is_nop = true; + stop_here = true; } /* Partial overlap */ - } else if (IsDalvikRegisterClobbered(thisLIR, checkLIR)) { + } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) { /* - * It is actually ok to continue if checkLIR + * It is actually ok to continue if check_lir * is a read. But it is hard to make a test * case for this so we just stop here to be * conservative. */ - stopHere = true; + stop_here = true; } } /* Memory content may be updated. Stop looking now. */ - if (stopHere) { + if (stop_here) { break; - /* The checkLIR has been transformed - check the next one */ - } else if (checkLIR->flags.isNop) { + /* The check_lir has been transformed - check the next one */ + } else if (check_lir->flags.is_nop) { continue; } } @@ -223,36 +223,36 @@ static void ApplyLoadStoreElimination(CompilationUnit* cUnit, LIR* headLIR, LIR* * their register operands have any RAW, WAR, and WAW * dependencies. If so, stop looking. */ - if (stopHere == false) { - stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask, checkLIR); + if (stop_here == false) { + stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir); } - if (stopHere == true) { - if (cUnit->instructionSet == kX86) { + if (stop_here == true) { + if (cu->instruction_set == kX86) { // Prevent stores from being sunk between ops that generate ccodes and // ops that use them. - uint64_t flags = GetTargetInstFlags(checkLIR->opcode); - if (sinkDistance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) { - checkLIR = PREV_LIR(checkLIR); - sinkDistance--; + uint64_t flags = GetTargetInstFlags(check_lir->opcode); + if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) { + check_lir = PREV_LIR(check_lir); + sink_distance--; } } - DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR, "REG CLOBBERED")); + DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED")); /* Only sink store instructions */ - if (sinkDistance && !isThisLIRLoad) { - LIR* newStoreLIR = static_cast(NewMem(cUnit, sizeof(LIR), true, kAllocLIR)); - *newStoreLIR = *thisLIR; + if (sink_distance && !is_this_lir_load) { + LIR* new_store_lir = static_cast(NewMem(cu, sizeof(LIR), true, kAllocLIR)); + *new_store_lir = *this_lir; /* - * Stop point found - insert *before* the checkLIR + * Stop point found - insert *before* the check_lir * since the instruction list is scanned in the * top-down order. */ - InsertLIRBefore(checkLIR, newStoreLIR); - thisLIR->flags.isNop = true; + InsertLIRBefore(check_lir, new_store_lir); + this_lir->flags.is_nop = true; } break; - } else if (!checkLIR->flags.isNop) { - sinkDistance++; + } else if (!check_lir->flags.is_nop) { + sink_distance++; } } } @@ -262,144 +262,144 @@ static void ApplyLoadStoreElimination(CompilationUnit* cUnit, LIR* headLIR, LIR* * Perform a pass of bottom-up walk, from the second instruction in the * superblock, to try to hoist loads to earlier slots. */ -void ApplyLoadHoisting(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR) +void ApplyLoadHoisting(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir) { - LIR* thisLIR, *checkLIR; + LIR* this_lir, *check_lir; /* * Store the list of independent instructions that can be hoisted past. * Will decide the best place to insert later. */ - LIR* prevInstList[MAX_HOIST_DISTANCE]; + LIR* prev_inst_list[MAX_HOIST_DISTANCE]; /* Empty block */ - if (headLIR == tailLIR) return; + if (head_lir == tail_lir) return; /* Start from the second instruction */ - for (thisLIR = NEXT_LIR(headLIR); - thisLIR != tailLIR; - thisLIR = NEXT_LIR(thisLIR)) { + for (this_lir = NEXT_LIR(head_lir); + this_lir != tail_lir; + this_lir = NEXT_LIR(this_lir)) { /* Skip non-interesting instructions */ - if ((thisLIR->flags.isNop == true) || - isPseudoOpcode(thisLIR->opcode) || - !(GetTargetInstFlags(thisLIR->opcode) & IS_LOAD)) { + if ((this_lir->flags.is_nop == true) || + is_pseudo_opcode(this_lir->opcode) || + !(GetTargetInstFlags(this_lir->opcode) & IS_LOAD)) { continue; } - uint64_t stopUseAllMask = thisLIR->useMask; + uint64_t stop_use_all_mask = this_lir->use_mask; - if (cUnit->instructionSet != kX86) { + if (cu->instruction_set != kX86) { /* * Branches for null/range checks are marked with the true resource * bits, and loads to Dalvik registers, constant pools, and non-alias * locations are safe to be hoisted. So only mark the heap references * conservatively here. */ - if (stopUseAllMask & ENCODE_HEAP_REF) { - stopUseAllMask |= GetPCUseDefEncoding(); + if (stop_use_all_mask & ENCODE_HEAP_REF) { + stop_use_all_mask |= GetPCUseDefEncoding(); } } /* Similar as above, but just check for pure register dependency */ - uint64_t stopUseRegMask = stopUseAllMask & ~ENCODE_MEM; - uint64_t stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM; + uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM; + uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM; - int nextSlot = 0; - bool stopHere = false; + int next_slot = 0; + bool stop_here = false; /* Try to hoist the load to a good spot */ - for (checkLIR = PREV_LIR(thisLIR); - checkLIR != headLIR; - checkLIR = PREV_LIR(checkLIR)) { + for (check_lir = PREV_LIR(this_lir); + check_lir != head_lir; + check_lir = PREV_LIR(check_lir)) { /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (checkLIR->flags.isNop) continue; + if (check_lir->flags.is_nop) continue; - uint64_t checkMemMask = checkLIR->defMask & ENCODE_MEM; - uint64_t aliasCondition = stopUseAllMask & checkMemMask; - stopHere = false; + uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM; + uint64_t alias_condition = stop_use_all_mask & check_mem_mask; + stop_here = false; /* Potential WAR alias seen - check the exact relation */ - if (checkMemMask != ENCODE_MEM && aliasCondition != 0) { + if (check_mem_mask != ENCODE_MEM && alias_condition != 0) { /* We can fully disambiguate Dalvik references */ - if (aliasCondition == ENCODE_DALVIK_REG) { + if (alias_condition == ENCODE_DALVIK_REG) { /* Must alias or partually overlap */ - if ((checkLIR->aliasInfo == thisLIR->aliasInfo) || - IsDalvikRegisterClobbered(thisLIR, checkLIR)) { - stopHere = true; + if ((check_lir->alias_info == this_lir->alias_info) || + IsDalvikRegisterClobbered(this_lir, check_lir)) { + stop_here = true; } /* Conservatively treat all heap refs as may-alias */ } else { - DCHECK_EQ(aliasCondition, ENCODE_HEAP_REF); - stopHere = true; + DCHECK_EQ(alias_condition, ENCODE_HEAP_REF); + stop_here = true; } /* Memory content may be updated. Stop looking now. */ - if (stopHere) { - prevInstList[nextSlot++] = checkLIR; + if (stop_here) { + prev_inst_list[next_slot++] = check_lir; break; } } - if (stopHere == false) { - stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask, - checkLIR); + if (stop_here == false) { + stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, + check_lir); } /* * Store the dependent or non-pseudo/indepedent instruction to the * list. */ - if (stopHere || !isPseudoOpcode(checkLIR->opcode)) { - prevInstList[nextSlot++] = checkLIR; - if (nextSlot == MAX_HOIST_DISTANCE) break; + if (stop_here || !is_pseudo_opcode(check_lir->opcode)) { + prev_inst_list[next_slot++] = check_lir; + if (next_slot == MAX_HOIST_DISTANCE) break; } /* Found a new place to put the load - move it here */ - if (stopHere == true) { - DEBUG_OPT(dumpDependentInsnPair(checkLIR, thisLIR "HOIST STOP")); + if (stop_here == true) { + DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP")); break; } } /* - * Reached the top - use headLIR as the dependent marker as all labels + * Reached the top - use head_lir as the dependent marker as all labels * are barriers. */ - if (stopHere == false && nextSlot < MAX_HOIST_DISTANCE) { - prevInstList[nextSlot++] = headLIR; + if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) { + prev_inst_list[next_slot++] = head_lir; } /* * At least one independent instruction is found. Scan in the reversed * direction to find a beneficial slot. */ - if (nextSlot >= 2) { - int firstSlot = nextSlot - 2; + if (next_slot >= 2) { + int first_slot = next_slot - 2; int slot; - LIR* depLIR = prevInstList[nextSlot-1]; + LIR* dep_lir = prev_inst_list[next_slot-1]; /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */ - if (!isPseudoOpcode(depLIR->opcode) && - (GetTargetInstFlags(depLIR->opcode) & IS_LOAD)) { - firstSlot -= LDLD_DISTANCE; + if (!is_pseudo_opcode(dep_lir->opcode) && + (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) { + first_slot -= LDLD_DISTANCE; } /* - * Make sure we check slot >= 0 since firstSlot may be negative + * Make sure we check slot >= 0 since first_slot may be negative * when the loop is first entered. */ - for (slot = firstSlot; slot >= 0; slot--) { - LIR* curLIR = prevInstList[slot]; - LIR* prevLIR = prevInstList[slot+1]; + for (slot = first_slot; slot >= 0; slot--) { + LIR* cur_lir = prev_inst_list[slot]; + LIR* prev_lir = prev_inst_list[slot+1]; /* Check the highest instruction */ - if (prevLIR->defMask == ENCODE_ALL) { + if (prev_lir->def_mask == ENCODE_ALL) { /* * If the first instruction is a load, don't hoist anything * above it since it is unlikely to be beneficial. */ - if (GetTargetInstFlags(curLIR->opcode) & IS_LOAD) continue; + if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue; /* * If the remaining number of slots is less than LD_LATENCY, * insert the hoisted load here. @@ -408,9 +408,9 @@ void ApplyLoadHoisting(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR) } // Don't look across a barrier label - if ((prevLIR->opcode == kPseudoTargetLabel) || - (prevLIR->opcode == kPseudoSafepointPC) || - (prevLIR->opcode == kPseudoBarrier)) { + if ((prev_lir->opcode == kPseudoTargetLabel) || + (prev_lir->opcode == kPseudoSafepointPC) || + (prev_lir->opcode == kPseudoBarrier)) { break; } @@ -418,37 +418,37 @@ void ApplyLoadHoisting(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR) * Try to find two instructions with load/use dependency until * the remaining instructions are less than LD_LATENCY. */ - bool prevIsLoad = isPseudoOpcode(prevLIR->opcode) ? false : - (GetTargetInstFlags(prevLIR->opcode) & IS_LOAD); - if (((curLIR->useMask & prevLIR->defMask) && prevIsLoad) || (slot < LD_LATENCY)) { + bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false : + (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD); + if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) { break; } } /* Found a slot to hoist to */ if (slot >= 0) { - LIR* curLIR = prevInstList[slot]; - LIR* newLoadLIR = static_cast(NewMem(cUnit, sizeof(LIR), true, kAllocLIR)); - *newLoadLIR = *thisLIR; + LIR* cur_lir = prev_inst_list[slot]; + LIR* new_load_lir = static_cast(NewMem(cu, sizeof(LIR), true, kAllocLIR)); + *new_load_lir = *this_lir; /* - * Insertion is guaranteed to succeed since checkLIR + * Insertion is guaranteed to succeed since check_lir * is never the first LIR on the list */ - InsertLIRBefore(curLIR, newLoadLIR); - thisLIR->flags.isNop = true; + InsertLIRBefore(cur_lir, new_load_lir); + this_lir->flags.is_nop = true; } } } } -void ApplyLocalOptimizations(CompilationUnit* cUnit, LIR* headLIR, - LIR* tailLIR) +void ApplyLocalOptimizations(CompilationUnit* cu, LIR* head_lir, + LIR* tail_lir) { - if (!(cUnit->disableOpt & (1 << kLoadStoreElimination))) { - ApplyLoadStoreElimination(cUnit, headLIR, tailLIR); + if (!(cu->disable_opt & (1 << kLoadStoreElimination))) { + ApplyLoadStoreElimination(cu, head_lir, tail_lir); } - if (!(cUnit->disableOpt & (1 << kLoadHoisting))) { - ApplyLoadHoisting(cUnit, headLIR, tailLIR); + if (!(cu->disable_opt & (1 << kLoadHoisting))) { + ApplyLoadHoisting(cu, head_lir, tail_lir); } } @@ -457,34 +457,34 @@ void ApplyLocalOptimizations(CompilationUnit* cUnit, LIR* headLIR, * Note: new redundant branches may be inserted later, and we'll * use a check in final instruction assembly to nop those out. */ -void RemoveRedundantBranches(CompilationUnit* cUnit) +void RemoveRedundantBranches(CompilationUnit* cu) { - LIR* thisLIR; + LIR* this_lir; - for (thisLIR = cUnit->firstLIRInsn; thisLIR != cUnit->lastLIRInsn; thisLIR = NEXT_LIR(thisLIR)) { + for (this_lir = cu->first_lir_insn; this_lir != cu->last_lir_insn; this_lir = NEXT_LIR(this_lir)) { /* Branch to the next instruction */ - if (BranchUnconditional(thisLIR)) { - LIR* nextLIR = thisLIR; + if (BranchUnconditional(this_lir)) { + LIR* next_lir = this_lir; while (true) { - nextLIR = NEXT_LIR(nextLIR); + next_lir = NEXT_LIR(next_lir); /* * Is the branch target the next instruction? */ - if (nextLIR == thisLIR->target) { - thisLIR->flags.isNop = true; + if (next_lir == this_lir->target) { + this_lir->flags.is_nop = true; break; } /* * Found real useful stuff between the branch and the target. - * Need to explicitly check the lastLIRInsn here because it + * Need to explicitly check the last_lir_insn here because it * might be the last real instruction. */ - if (!isPseudoOpcode(nextLIR->opcode) || - (nextLIR == cUnit->lastLIRInsn)) + if (!is_pseudo_opcode(next_lir->opcode) || + (next_lir == cu->last_lir_insn)) break; } } -- cgit v1.2.3-59-g8ed1b