Quick Compiler: Shoot the Camel
Another step towards moving the Quick Compiler from the old
Dalvik coding style to Art's coding style. In this CL, Camel-case
locals, struct variables and arguments are converted to lower-case
with underscore names. Most of the name changes were formulistic,
but I also took this opportunity to change the old "printMe" into
the more traditional "verbose", and shorten cUnit to cu.
No logic changes.
Change-Id: I64b69b28a8357d5cc0abc1dc975954c91abd9b45
diff --git a/src/compiler/codegen/arm/arm_lir.h b/src/compiler/codegen/arm/arm_lir.h
index bc3277f..7955b1b 100644
--- a/src/compiler/codegen/arm/arm_lir.h
+++ b/src/compiler/codegen/arm/arm_lir.h
@@ -89,7 +89,7 @@
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | curMethod* | <<== sp w/ 16-byte alignment
+ * | cur_method* | <<== sp w/ 16-byte alignment
* +========================+
*/
@@ -568,7 +568,7 @@
};
/* Bit flags describing the behavior of each native opcode */
-/* Instruction assembly fieldLoc kind */
+/* Instruction assembly field_loc kind */
enum ArmEncodingKind {
kFmtUnused,
kFmtBitBlt, /* Bit string using end/start */
@@ -594,7 +594,7 @@
ArmEncodingKind kind;
int end; /* end for kFmtBitBlt, 1-bit slice end for FP regs */
int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
- } fieldLoc[4];
+ } field_loc[4];
ArmOpcode opcode;
uint64_t flags;
const char* name;
diff --git a/src/compiler/codegen/arm/assemble_arm.cc b/src/compiler/codegen/arm/assemble_arm.cc
index 8e7a07b..f89915b 100644
--- a/src/compiler/codegen/arm/assemble_arm.cc
+++ b/src/compiler/codegen/arm/assemble_arm.cc
@@ -987,24 +987,24 @@
* discover that pc-relative displacements may not fit the selected
* instruction.
*/
-AssemblerStatus AssembleInstructions(CompilationUnit* cUnit,
- uintptr_t startAddr)
+AssemblerStatus AssembleInstructions(CompilationUnit* cu,
+ uintptr_t start_addr)
{
LIR* lir;
AssemblerStatus res = kSuccess; // Assume success
- for (lir = cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+ for (lir = cu->first_lir_insn; lir; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
/* 1 means padding is needed */
if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) {
- cUnit->codeBuffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
- cUnit->codeBuffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
+ cu->code_buffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
+ cu->code_buffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
}
continue;
}
- if (lir->flags.isNop) {
+ if (lir->flags.is_nop) {
continue;
}
@@ -1031,9 +1031,9 @@
* However, if the load displacement exceeds the limit,
* we revert to a 2-instruction materialization sequence.
*/
- LIR *lirTarget = lir->target;
+ LIR *lir_target = lir->target;
uintptr_t pc = (lir->offset + 4) & ~3;
- uintptr_t target = lirTarget->offset;
+ uintptr_t target = lir_target->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -1053,22 +1053,22 @@
* vldrs/vldrd we include REG_DEF_LR in the resource
* masks for these instructions.
*/
- int baseReg = (lir->opcode == kThumb2LdrPcRel12) ?
+ int base_reg = (lir->opcode == kThumb2LdrPcRel12) ?
lir->operands[0] : rARM_LR;
// Add new Adr to generate the address
- LIR* newAdr = RawLIR(cUnit, lir->dalvikOffset, kThumb2Adr,
- baseReg, 0, 0, 0, 0, lir->target);
- InsertLIRBefore(lir, newAdr);
+ LIR* new_adr = RawLIR(cu, lir->dalvik_offset, kThumb2Adr,
+ base_reg, 0, 0, 0, 0, lir->target);
+ InsertLIRBefore(lir, new_adr);
// Convert to normal load
if (lir->opcode == kThumb2LdrPcRel12) {
lir->opcode = kThumb2LdrRRI12;
}
// Change the load to be relative to the new Adr base
- lir->operands[1] = baseReg;
+ lir->operands[1] = base_reg;
lir->operands[2] = 0;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
} else {
if ((lir->opcode == kThumb2Vldrs) ||
@@ -1080,26 +1080,26 @@
}
}
} else if (lir->opcode == kThumb2Cbnz || lir->opcode == kThumb2Cbz) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (delta > 126 || delta < 0) {
/*
* Convert to cmp rx,#0 / b[eq/ne] tgt pair
* Make new branch instruction and insert after
*/
- LIR* newInst =
- RawLIR(cUnit, lir->dalvikOffset, kThumbBCond, 0,
+ LIR* new_inst =
+ RawLIR(cu, lir->dalvik_offset, kThumbBCond, 0,
(lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
0, 0, 0, lir->target);
- InsertLIRAfter(lir, newInst);
+ InsertLIRAfter(lir, new_inst);
/* Convert the cb[n]z to a cmp rx, #0 ] */
lir->opcode = kThumbCmpRI8;
/* operand[0] is src1 in both cb[n]z & CmpRI8 */
lir->operands[1] = 0;
lir->target = 0;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
} else {
lir->operands[1] = delta >> 1;
@@ -1124,121 +1124,121 @@
}
}
lir->operands[0] = reg;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
} else if (lir->opcode == kThumbBCond || lir->opcode == kThumb2BCond) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
int delta = 0;
- DCHECK(targetLIR);
+ DCHECK(target_lir);
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
delta = target - pc;
if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
lir->opcode = kThumb2BCond;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
lir->operands[0] = delta >> 1;
} else if (lir->opcode == kThumb2BUncond) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
lir->operands[0] = delta >> 1;
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == 0) { // Useless branch
- lir->flags.isNop = true;
+ lir->flags.is_nop = true;
res = kRetryAll;
}
} else if (lir->opcode == kThumbBUncond) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (delta > 2046 || delta < -2048) {
// Convert to Thumb2BCond w/ kArmCondAl
lir->opcode = kThumb2BUncond;
lir->operands[0] = 0;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
} else {
lir->operands[0] = delta >> 1;
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == -1) { // Useless branch
- lir->flags.isNop = true;
+ lir->flags.is_nop = true;
res = kRetryAll;
}
}
} else if (lir->opcode == kThumbBlx1) {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
- /* curPC is Thumb */
- uintptr_t curPC = (startAddr + lir->offset + 4) & ~3;
+ /* cur_pc is Thumb */
+ uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
uintptr_t target = lir->operands[1];
/* Match bit[1] in target with base */
- if (curPC & 0x2) {
+ if (cur_pc & 0x2) {
target |= 0x2;
}
- int delta = target - curPC;
+ int delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
} else if (lir->opcode == kThumbBl1) {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
- /* Both curPC and target are Thumb */
- uintptr_t curPC = startAddr + lir->offset + 4;
+ /* Both cur_pc and target are Thumb */
+ uintptr_t cur_pc = start_addr + lir->offset + 4;
uintptr_t target = lir->operands[1];
- int delta = target - curPC;
+ int delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
} else if (lir->opcode == kThumb2Adr) {
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
LIR* target = lir->target;
- int targetDisp = tabRec ? tabRec->offset
+ int target_disp = tab_rec ? tab_rec->offset
: target->offset;
- int disp = targetDisp - ((lir->offset + 4) & ~3);
+ int disp = target_disp - ((lir->offset + 4) & ~3);
if (disp < 4096) {
lir->operands[1] = disp;
} else {
// convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
- LIR *newMov16L =
- RawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16LST,
+ LIR *new_mov16L =
+ RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16LST,
lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tabRec), 0, lir->target);
- InsertLIRBefore(lir, newMov16L);
- LIR *newMov16H =
- RawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16HST,
+ reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ InsertLIRBefore(lir, new_mov16L);
+ LIR *new_mov16H =
+ RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16HST,
lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tabRec), 0, lir->target);
- InsertLIRBefore(lir, newMov16H);
+ reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ InsertLIRBefore(lir, new_mov16H);
lir->opcode = kThumb2AddRRR;
lir->operands[1] = rARM_PC;
lir->operands[2] = lir->operands[0];
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
} else if (lir->opcode == kThumb2MovImm16LST) {
- // operands[1] should hold disp, [2] has add, [3] has tabRec
+ // operands[1] should hold disp, [2] has add, [3] has tab_rec
LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
- // If tabRec is null, this is a literal load. Use target
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ // If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int targetDisp = tabRec ? tabRec->offset : target->offset;
- lir->operands[1] = (targetDisp - (addPCInst->offset + 4)) & 0xffff;
+ int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
} else if (lir->opcode == kThumb2MovImm16HST) {
- // operands[1] should hold disp, [2] has add, [3] has tabRec
+ // operands[1] should hold disp, [2] has add, [3] has tab_rec
LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
- // If tabRec is null, this is a literal load. Use target
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ // If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int targetDisp = tabRec ? tabRec->offset : target->offset;
+ int target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] =
- ((targetDisp - (addPCInst->offset + 4)) >> 16) & 0xffff;
+ ((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
}
}
/*
@@ -1256,12 +1256,12 @@
uint32_t operand;
uint32_t value;
operand = lir->operands[i];
- switch (encoder->fieldLoc[i].kind) {
+ switch (encoder->field_loc[i].kind) {
case kFmtUnused:
break;
case kFmtFPImm:
- value = ((operand & 0xF0) >> 4) << encoder->fieldLoc[i].end;
- value |= (operand & 0x0F) << encoder->fieldLoc[i].start;
+ value = ((operand & 0xF0) >> 4) << encoder->field_loc[i].end;
+ value |= (operand & 0x0F) << encoder->field_loc[i].start;
bits |= value;
break;
case kFmtBrOffset:
@@ -1297,27 +1297,27 @@
bits |= value;
break;
case kFmtBitBlt:
- value = (operand << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ value = (operand << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
bits |= value;
break;
case kFmtDfp: {
DCHECK(ARM_DOUBLEREG(operand));
DCHECK_EQ((operand & 0x1), 0U);
- int regName = (operand & ARM_FP_REG_MASK) >> 1;
+ int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
/* Snag the 1-bit slice and position it */
- value = ((regName & 0x10) >> 4) << encoder->fieldLoc[i].end;
+ value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
/* Extract and position the 4-bit slice */
- value |= (regName & 0x0f) << encoder->fieldLoc[i].start;
+ value |= (reg_name & 0x0f) << encoder->field_loc[i].start;
bits |= value;
break;
}
case kFmtSfp:
DCHECK(ARM_SINGLEREG(operand));
/* Snag the 1-bit slice and position it */
- value = (operand & 0x1) << encoder->fieldLoc[i].end;
+ value = (operand & 0x1) << encoder->field_loc[i].end;
/* Extract and position the 4-bit slice */
- value |= ((operand & 0x1e) >> 1) << encoder->fieldLoc[i].start;
+ value |= ((operand & 0x1e) >> 1) << encoder->field_loc[i].start;
bits |= value;
break;
case kFmtImm12:
@@ -1348,15 +1348,15 @@
}
break;
default:
- LOG(FATAL) << "Bad fmt:" << encoder->fieldLoc[i].kind;
+ LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind;
}
}
if (encoder->size == 4) {
- cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
+ cu->code_buffer.push_back((bits >> 16) & 0xff);
+ cu->code_buffer.push_back((bits >> 24) & 0xff);
}
- cUnit->codeBuffer.push_back(bits & 0xff);
- cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
+ cu->code_buffer.push_back(bits & 0xff);
+ cu->code_buffer.push_back((bits >> 8) & 0xff);
}
return res;
}
@@ -1369,23 +1369,23 @@
/*
* Target-dependent offset assignment.
*/
-int AssignInsnOffsets(CompilationUnit* cUnit)
+int AssignInsnOffsets(CompilationUnit* cu)
{
- LIR* armLIR;
+ LIR* arm_lir;
int offset = 0;
- for (armLIR = cUnit->firstLIRInsn; armLIR; armLIR = NEXT_LIR(armLIR)) {
- armLIR->offset = offset;
- if (armLIR->opcode >= 0) {
- if (!armLIR->flags.isNop) {
- offset += armLIR->flags.size;
+ for (arm_lir = cu->first_lir_insn; arm_lir; arm_lir = NEXT_LIR(arm_lir)) {
+ arm_lir->offset = offset;
+ if (arm_lir->opcode >= 0) {
+ if (!arm_lir->flags.is_nop) {
+ offset += arm_lir->flags.size;
}
- } else if (armLIR->opcode == kPseudoPseudoAlign4) {
+ } else if (arm_lir->opcode == kPseudoPseudoAlign4) {
if (offset & 0x2) {
offset += 2;
- armLIR->operands[0] = 1;
+ arm_lir->operands[0] = 1;
} else {
- armLIR->operands[0] = 0;
+ arm_lir->operands[0] = 0;
}
}
/* Pseudo opcodes don't consume space */
diff --git a/src/compiler/codegen/arm/call_arm.cc b/src/compiler/codegen/arm/call_arm.cc
index 98137ad..775b25d 100644
--- a/src/compiler/codegen/arm/call_arm.cc
+++ b/src/compiler/codegen/arm/call_arm.cc
@@ -26,10 +26,10 @@
/* Return the position of an ssa name within the argument list */
-static int InPosition(CompilationUnit* cUnit, int sReg)
+static int InPosition(CompilationUnit* cu, int s_reg)
{
- int vReg = SRegToVReg(cUnit, sReg);
- return vReg - cUnit->numRegs;
+ int v_reg = SRegToVReg(cu, s_reg);
+ return v_reg - cu->num_regs;
}
/*
@@ -37,23 +37,23 @@
* there. NOTE: all live arg registers must be locked prior to this call
* to avoid having them allocated as a temp by downstream utilities.
*/
-RegLocation ArgLoc(CompilationUnit* cUnit, RegLocation loc)
+RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc)
{
- int argNum = InPosition(cUnit, loc.sRegLow);
+ int arg_num = InPosition(cu, loc.s_reg_low);
if (loc.wide) {
- if (argNum == 2) {
+ if (arg_num == 2) {
// Bad case - half in register, half in frame. Just punt
loc.location = kLocInvalid;
- } else if (argNum < 2) {
- loc.lowReg = rARM_ARG1 + argNum;
- loc.highReg = loc.lowReg + 1;
+ } else if (arg_num < 2) {
+ loc.low_reg = rARM_ARG1 + arg_num;
+ loc.high_reg = loc.low_reg + 1;
loc.location = kLocPhysReg;
} else {
loc.location = kLocDalvikFrame;
}
} else {
- if (argNum < 3) {
- loc.lowReg = rARM_ARG1 + argNum;
+ if (arg_num < 3) {
+ loc.low_reg = rARM_ARG1 + arg_num;
loc.location = kLocPhysReg;
} else {
loc.location = kLocDalvikFrame;
@@ -67,15 +67,15 @@
* the frame, we can't use the normal LoadValue() because it assumed
* a proper frame - and we're frameless.
*/
-RegLocation LoadArg(CompilationUnit* cUnit, RegLocation loc)
+RegLocation LoadArg(CompilationUnit* cu, RegLocation loc)
{
if (loc.location == kLocDalvikFrame) {
- int start = (InPosition(cUnit, loc.sRegLow) + 1) * sizeof(uint32_t);
- loc.lowReg = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rARM_SP, start, loc.lowReg);
+ int start = (InPosition(cu, loc.s_reg_low) + 1) * sizeof(uint32_t);
+ loc.low_reg = AllocTemp(cu);
+ LoadWordDisp(cu, rARM_SP, start, loc.low_reg);
if (loc.wide) {
- loc.highReg = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rARM_SP, start + sizeof(uint32_t), loc.highReg);
+ loc.high_reg = AllocTemp(cu);
+ LoadWordDisp(cu, rARM_SP, start + sizeof(uint32_t), loc.high_reg);
}
loc.location = kLocPhysReg;
}
@@ -83,24 +83,24 @@
}
/* Lock any referenced arguments that arrive in registers */
-static void LockLiveArgs(CompilationUnit* cUnit, MIR* mir)
+static void LockLiveArgs(CompilationUnit* cu, MIR* mir)
{
- int firstIn = cUnit->numRegs;
- const int numArgRegs = 3; // TODO: generalize & move to RegUtil.cc
- for (int i = 0; i < mir->ssaRep->numUses; i++) {
- int vReg = SRegToVReg(cUnit, mir->ssaRep->uses[i]);
- int InPosition = vReg - firstIn;
- if (InPosition < numArgRegs) {
- LockTemp(cUnit, rARM_ARG1 + InPosition);
+ int first_in = cu->num_regs;
+ const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ int v_reg = SRegToVReg(cu, mir->ssa_rep->uses[i]);
+ int InPosition = v_reg - first_in;
+ if (InPosition < num_arg_regs) {
+ LockTemp(cu, rARM_ARG1 + InPosition);
}
}
}
/* Find the next MIR, which may be in a following basic block */
-static MIR* GetNextMir(CompilationUnit* cUnit, BasicBlock** pBb, MIR* mir)
+static MIR* GetNextMir(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir)
{
- BasicBlock* bb = *pBb;
- MIR* origMir = mir;
+ BasicBlock* bb = *p_bb;
+ MIR* orig_mir = mir;
while (bb != NULL) {
if (mir != NULL) {
mir = mir->next;
@@ -108,121 +108,121 @@
if (mir != NULL) {
return mir;
} else {
- bb = bb->fallThrough;
- *pBb = bb;
+ bb = bb->fall_through;
+ *p_bb = bb;
if (bb) {
- mir = bb->firstMIRInsn;
+ mir = bb->first_mir_insn;
if (mir != NULL) {
return mir;
}
}
}
}
- return origMir;
+ return orig_mir;
}
-/* Used for the "printMe" listing */
-void GenPrintLabel(CompilationUnit *cUnit, MIR* mir)
+/* Used for the "verbose" listing */
+void GenPrintLabel(CompilationUnit *cu, MIR* mir)
{
/* Mark the beginning of a Dalvik instruction for line tracking */
- char* instStr = cUnit->printMe ?
- GetDalvikDisassembly(cUnit, mir->dalvikInsn, "") : NULL;
- MarkBoundary(cUnit, mir->offset, instStr);
+ char* inst_str = cu->verbose ?
+ GetDalvikDisassembly(cu, mir->dalvikInsn, "") : NULL;
+ MarkBoundary(cu, mir->offset, inst_str);
/* Don't generate the SSA annotation unless verbose mode is on */
- if (cUnit->printMe && mir->ssaRep) {
- char* ssaString = GetSSAString(cUnit, mir->ssaRep);
- NewLIR1(cUnit, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssaString));
+ if (cu->verbose && mir->ssa_rep) {
+ char* ssa_string = GetSSAString(cu, mir->ssa_rep);
+ NewLIR1(cu, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssa_string));
}
}
-static MIR* SpecialIGet(CompilationUnit* cUnit, BasicBlock** bb, MIR* mir,
- OpSize size, bool longOrDouble, bool isObject)
+static MIR* SpecialIGet(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
+ OpSize size, bool long_or_double, bool is_object)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = FastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
- if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+ int field_offset;
+ bool is_volatile;
+ uint32_t field_idx = mir->dalvikInsn.vC;
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+ if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
- RegLocation rlObj = GetSrc(cUnit, mir, 0);
- LockLiveArgs(cUnit, mir);
- rlObj = ArgLoc(cUnit, rlObj);
- RegLocation rlDest;
- if (longOrDouble) {
- rlDest = GetReturnWide(cUnit, false);
+ RegLocation rl_obj = GetSrc(cu, mir, 0);
+ LockLiveArgs(cu, mir);
+ rl_obj = ArgLoc(cu, rl_obj);
+ RegLocation rl_dest;
+ if (long_or_double) {
+ rl_dest = GetReturnWide(cu, false);
} else {
- rlDest = GetReturn(cUnit, false);
+ rl_dest = GetReturn(cu, false);
}
// Point of no return - no aborts after this
- GenPrintLabel(cUnit, mir);
- rlObj = LoadArg(cUnit, rlObj);
- GenIGet(cUnit, fieldIdx, mir->optimizationFlags, size, rlDest, rlObj,
- longOrDouble, isObject);
- return GetNextMir(cUnit, bb, mir);
+ GenPrintLabel(cu, mir);
+ rl_obj = LoadArg(cu, rl_obj);
+ GenIGet(cu, field_idx, mir->optimization_flags, size, rl_dest, rl_obj,
+ long_or_double, is_object);
+ return GetNextMir(cu, bb, mir);
}
-static MIR* SpecialIPut(CompilationUnit* cUnit, BasicBlock** bb, MIR* mir,
- OpSize size, bool longOrDouble, bool isObject)
+static MIR* SpecialIPut(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
+ OpSize size, bool long_or_double, bool is_object)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = FastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
- if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+ int field_offset;
+ bool is_volatile;
+ uint32_t field_idx = mir->dalvikInsn.vC;
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+ if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
- RegLocation rlSrc;
- RegLocation rlObj;
- LockLiveArgs(cUnit, mir);
- if (longOrDouble) {
- rlSrc = GetSrcWide(cUnit, mir, 0);
- rlObj = GetSrc(cUnit, mir, 2);
+ RegLocation rl_src;
+ RegLocation rl_obj;
+ LockLiveArgs(cu, mir);
+ if (long_or_double) {
+ rl_src = GetSrcWide(cu, mir, 0);
+ rl_obj = GetSrc(cu, mir, 2);
} else {
- rlSrc = GetSrc(cUnit, mir, 0);
- rlObj = GetSrc(cUnit, mir, 1);
+ rl_src = GetSrc(cu, mir, 0);
+ rl_obj = GetSrc(cu, mir, 1);
}
- rlSrc = ArgLoc(cUnit, rlSrc);
- rlObj = ArgLoc(cUnit, rlObj);
+ rl_src = ArgLoc(cu, rl_src);
+ rl_obj = ArgLoc(cu, rl_obj);
// Reject if source is split across registers & frame
- if (rlObj.location == kLocInvalid) {
- ResetRegPool(cUnit);
+ if (rl_obj.location == kLocInvalid) {
+ ResetRegPool(cu);
return NULL;
}
// Point of no return - no aborts after this
- GenPrintLabel(cUnit, mir);
- rlObj = LoadArg(cUnit, rlObj);
- rlSrc = LoadArg(cUnit, rlSrc);
- GenIPut(cUnit, fieldIdx, mir->optimizationFlags, size, rlSrc, rlObj,
- longOrDouble, isObject);
- return GetNextMir(cUnit, bb, mir);
+ GenPrintLabel(cu, mir);
+ rl_obj = LoadArg(cu, rl_obj);
+ rl_src = LoadArg(cu, rl_src);
+ GenIPut(cu, field_idx, mir->optimization_flags, size, rl_src, rl_obj,
+ long_or_double, is_object);
+ return GetNextMir(cu, bb, mir);
}
-static MIR* SpecialIdentity(CompilationUnit* cUnit, MIR* mir)
+static MIR* SpecialIdentity(CompilationUnit* cu, MIR* mir)
{
- RegLocation rlSrc;
- RegLocation rlDest;
- bool wide = (mir->ssaRep->numUses == 2);
+ RegLocation rl_src;
+ RegLocation rl_dest;
+ bool wide = (mir->ssa_rep->num_uses == 2);
if (wide) {
- rlSrc = GetSrcWide(cUnit, mir, 0);
- rlDest = GetReturnWide(cUnit, false);
+ rl_src = GetSrcWide(cu, mir, 0);
+ rl_dest = GetReturnWide(cu, false);
} else {
- rlSrc = GetSrc(cUnit, mir, 0);
- rlDest = GetReturn(cUnit, false);
+ rl_src = GetSrc(cu, mir, 0);
+ rl_dest = GetReturn(cu, false);
}
- LockLiveArgs(cUnit, mir);
- rlSrc = ArgLoc(cUnit, rlSrc);
- if (rlSrc.location == kLocInvalid) {
- ResetRegPool(cUnit);
+ LockLiveArgs(cu, mir);
+ rl_src = ArgLoc(cu, rl_src);
+ if (rl_src.location == kLocInvalid) {
+ ResetRegPool(cu);
return NULL;
}
// Point of no return - no aborts after this
- GenPrintLabel(cUnit, mir);
- rlSrc = LoadArg(cUnit, rlSrc);
+ GenPrintLabel(cu, mir);
+ rl_src = LoadArg(cu, rl_src);
if (wide) {
- StoreValueWide(cUnit, rlDest, rlSrc);
+ StoreValueWide(cu, rl_dest, rl_src);
} else {
- StoreValue(cUnit, rlDest, rlSrc);
+ StoreValue(cu, rl_dest, rl_src);
}
return mir;
}
@@ -230,78 +230,78 @@
/*
* Special-case code genration for simple non-throwing leaf methods.
*/
-void GenSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- SpecialCaseHandler specialCase)
+void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
{
- cUnit->currentDalvikOffset = mir->offset;
- MIR* nextMir = NULL;
- switch (specialCase) {
+ cu->current_dalvik_offset = mir->offset;
+ MIR* next_mir = NULL;
+ switch (special_case) {
case kNullMethod:
DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
- nextMir = mir;
+ next_mir = mir;
break;
case kConstFunction:
- GenPrintLabel(cUnit, mir);
- LoadConstant(cUnit, rARM_RET0, mir->dalvikInsn.vB);
- nextMir = GetNextMir(cUnit, &bb, mir);
+ GenPrintLabel(cu, mir);
+ LoadConstant(cu, rARM_RET0, mir->dalvikInsn.vB);
+ next_mir = GetNextMir(cu, &bb, mir);
break;
case kIGet:
- nextMir = SpecialIGet(cUnit, &bb, mir, kWord, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kWord, false, false);
break;
case kIGetBoolean:
case kIGetByte:
- nextMir = SpecialIGet(cUnit, &bb, mir, kUnsignedByte, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kUnsignedByte, false, false);
break;
case kIGetObject:
- nextMir = SpecialIGet(cUnit, &bb, mir, kWord, false, true);
+ next_mir = SpecialIGet(cu, &bb, mir, kWord, false, true);
break;
case kIGetChar:
- nextMir = SpecialIGet(cUnit, &bb, mir, kUnsignedHalf, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kUnsignedHalf, false, false);
break;
case kIGetShort:
- nextMir = SpecialIGet(cUnit, &bb, mir, kSignedHalf, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kSignedHalf, false, false);
break;
case kIGetWide:
- nextMir = SpecialIGet(cUnit, &bb, mir, kLong, true, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kLong, true, false);
break;
case kIPut:
- nextMir = SpecialIPut(cUnit, &bb, mir, kWord, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kWord, false, false);
break;
case kIPutBoolean:
case kIPutByte:
- nextMir = SpecialIPut(cUnit, &bb, mir, kUnsignedByte, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kUnsignedByte, false, false);
break;
case kIPutObject:
- nextMir = SpecialIPut(cUnit, &bb, mir, kWord, false, true);
+ next_mir = SpecialIPut(cu, &bb, mir, kWord, false, true);
break;
case kIPutChar:
- nextMir = SpecialIPut(cUnit, &bb, mir, kUnsignedHalf, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kUnsignedHalf, false, false);
break;
case kIPutShort:
- nextMir = SpecialIPut(cUnit, &bb, mir, kSignedHalf, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kSignedHalf, false, false);
break;
case kIPutWide:
- nextMir = SpecialIPut(cUnit, &bb, mir, kLong, true, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kLong, true, false);
break;
case kIdentity:
- nextMir = SpecialIdentity(cUnit, mir);
+ next_mir = SpecialIdentity(cu, mir);
break;
default:
return;
}
- if (nextMir != NULL) {
- cUnit->currentDalvikOffset = nextMir->offset;
- if (specialCase != kIdentity) {
- GenPrintLabel(cUnit, nextMir);
+ if (next_mir != NULL) {
+ cu->current_dalvik_offset = next_mir->offset;
+ if (special_case != kIdentity) {
+ GenPrintLabel(cu, next_mir);
}
- NewLIR1(cUnit, kThumbBx, rARM_LR);
- cUnit->coreSpillMask = 0;
- cUnit->numCoreSpills = 0;
- cUnit->fpSpillMask = 0;
- cUnit->numFPSpills = 0;
- cUnit->frameSize = 0;
- cUnit->coreVmapTable.clear();
- cUnit->fpVmapTable.clear();
+ NewLIR1(cu, kThumbBx, rARM_LR);
+ cu->core_spill_mask = 0;
+ cu->num_core_spills = 0;
+ cu->fp_spill_mask = 0;
+ cu->num_fp_spills = 0;
+ cu->frame_size = 0;
+ cu->core_vmap_table.clear();
+ cu->fp_vmap_table.clear();
}
}
@@ -314,109 +314,109 @@
* The test loop will look something like:
*
* adr rBase, <table>
- * ldr rVal, [rARM_SP, vRegOff]
- * mov rIdx, #tableSize
+ * ldr r_val, [rARM_SP, v_reg_off]
+ * mov r_idx, #table_size
* lp:
- * ldmia rBase!, {rKey, rDisp}
- * sub rIdx, #1
- * cmp rVal, rKey
+ * ldmia rBase!, {r_key, r_disp}
+ * sub r_idx, #1
+ * cmp r_val, r_key
* ifeq
- * add rARM_PC, rDisp ; This is the branch from which we compute displacement
- * cbnz rIdx, lp
+ * add rARM_PC, r_disp ; This is the branch from which we compute displacement
+ * cbnz r_idx, lp
*/
-void GenSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpSparseSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tabRec =
- static_cast<SwitchTable*>(NewMem(cUnit, sizeof(SwitchTable), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
int size = table[1];
- tabRec->targets = static_cast<LIR**>(NewMem(cUnit, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cUnit, &cUnit->switchTables, reinterpret_cast<uintptr_t>(tabRec));
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
// Get the switch value
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- int rBase = AllocTemp(cUnit);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int rBase = AllocTemp(cu);
/* Allocate key and disp temps */
- int rKey = AllocTemp(cUnit);
- int rDisp = AllocTemp(cUnit);
- // Make sure rKey's register number is less than rDisp's number for ldmia
- if (rKey > rDisp) {
- int tmp = rDisp;
- rDisp = rKey;
- rKey = tmp;
+ int r_key = AllocTemp(cu);
+ int r_disp = AllocTemp(cu);
+ // Make sure r_key's register number is less than r_disp's number for ldmia
+ if (r_key > r_disp) {
+ int tmp = r_disp;
+ r_disp = r_key;
+ r_key = tmp;
}
// Materialize a pointer to the switch table
- NewLIR3(cUnit, kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tabRec));
- // Set up rIdx
- int rIdx = AllocTemp(cUnit);
- LoadConstant(cUnit, rIdx, size);
+ NewLIR3(cu, kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ // Set up r_idx
+ int r_idx = AllocTemp(cu);
+ LoadConstant(cu, r_idx, size);
// Establish loop branch target
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
// Load next key/disp
- NewLIR2(cUnit, kThumb2LdmiaWB, rBase, (1 << rKey) | (1 << rDisp));
- OpRegReg(cUnit, kOpCmp, rKey, rlSrc.lowReg);
+ NewLIR2(cu, kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
+ OpRegReg(cu, kOpCmp, r_key, rl_src.low_reg);
// Go if match. NOTE: No instruction set switch here - must stay Thumb2
- OpIT(cUnit, kArmCondEq, "");
- LIR* switchBranch = NewLIR1(cUnit, kThumb2AddPCR, rDisp);
- tabRec->anchor = switchBranch;
+ OpIT(cu, kArmCondEq, "");
+ LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, r_disp);
+ tab_rec->anchor = switch_branch;
// Needs to use setflags encoding here
- NewLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
- OpCondBranch(cUnit, kCondNe, target);
+ NewLIR3(cu, kThumb2SubsRRI12, r_idx, r_idx, 1);
+ OpCondBranch(cu, kCondNe, target);
}
-void GenPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tabRec =
- static_cast<SwitchTable*>(NewMem(cUnit, sizeof(SwitchTable), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
int size = table[1];
- tabRec->targets = static_cast<LIR**>(NewMem(cUnit, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cUnit, &cUnit->switchTables, reinterpret_cast<uintptr_t>(tabRec));
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
// Get the switch value
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- int tableBase = AllocTemp(cUnit);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int table_base = AllocTemp(cu);
// Materialize a pointer to the switch table
- NewLIR3(cUnit, kThumb2Adr, tableBase, 0, reinterpret_cast<uintptr_t>(tabRec));
- int lowKey = s4FromSwitchData(&table[2]);
+ NewLIR3(cu, kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ int low_key = s4FromSwitchData(&table[2]);
int keyReg;
// Remove the bias, if necessary
- if (lowKey == 0) {
- keyReg = rlSrc.lowReg;
+ if (low_key == 0) {
+ keyReg = rl_src.low_reg;
} else {
- keyReg = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpSub, keyReg, rlSrc.lowReg, lowKey);
+ keyReg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
- OpRegImm(cUnit, kOpCmp, keyReg, size-1);
- LIR* branchOver = OpCondBranch(cUnit, kCondHi, NULL);
+ OpRegImm(cu, kOpCmp, keyReg, size-1);
+ LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
// Load the displacement from the switch table
- int dispReg = AllocTemp(cUnit);
- LoadBaseIndexed(cUnit, tableBase, keyReg, dispReg, 2, kWord);
+ int disp_reg = AllocTemp(cu);
+ LoadBaseIndexed(cu, table_base, keyReg, disp_reg, 2, kWord);
// ..and go! NOTE: No instruction set switch here - must stay Thumb2
- LIR* switchBranch = NewLIR1(cUnit, kThumb2AddPCR, dispReg);
- tabRec->anchor = switchBranch;
+ LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, disp_reg);
+ tab_rec->anchor = switch_branch;
- /* branchOver target here */
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
+ /* branch_over target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
}
/*
@@ -429,30 +429,30 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void GenFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset, RegLocation rlSrc)
+void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tabRec =
- static_cast<FillArrayData*>(NewMem(cUnit, sizeof(FillArrayData), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
- uint16_t width = tabRec->table[1];
- uint32_t size = tabRec->table[2] | ((static_cast<uint32_t>(tabRec->table[3])) << 16);
- tabRec->size = (size * width) + 8;
+ FillArrayData *tab_rec =
+ static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ uint16_t width = tab_rec->table[1];
+ uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+ tab_rec->size = (size * width) + 8;
- InsertGrowableList(cUnit, &cUnit->fillArrayData, reinterpret_cast<uintptr_t>(tabRec));
+ InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
// Making a call - use explicit registers
- FlushAllRegs(cUnit); /* Everything to home location */
- LoadValueDirectFixed(cUnit, rlSrc, r0);
- LoadWordDisp(cUnit, rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ FlushAllRegs(cu); /* Everything to home location */
+ LoadValueDirectFixed(cu, rl_src, r0);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
rARM_LR);
// Materialize a pointer to the fill data image
- NewLIR3(cUnit, kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tabRec));
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rARM_LR);
- MarkSafepointPC(cUnit, callInst);
+ NewLIR3(cu, kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
}
/*
@@ -464,7 +464,7 @@
* r0 -> self pointer [arg0 for oat[Lock/Unlock]Object
* r1 -> object [arg1 for oat[Lock/Unlock]Object
* r2 -> intial contents of object->lock, later result of strex
- * r3 -> self->threadId
+ * r3 -> self->thread_id
* r12 -> allow to be used by utilities as general temp
*
* The result of the strex is 0 if we acquire the lock.
@@ -481,33 +481,33 @@
* preserved.
*
*/
-void GenMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
+ FlushAllRegs(cu);
DCHECK_EQ(LW_SHAPE_THIN, 0);
- LoadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, r0, optFlags);
- LoadWordDisp(cUnit, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
- NewLIR3(cUnit, kThumb2Ldrex, r1, r0,
+ LoadValueDirectFixed(cu, rl_src, r0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
+ LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ NewLIR3(cu, kThumb2Ldrex, r1, r0,
Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
// Align owner
- OpRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
- // Is lock unheld on lock or held by us (==threadId) on unlock?
- NewLIR4(cUnit, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
- NewLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
- OpRegImm(cUnit, kOpCmp, r1, 0);
- OpIT(cUnit, kArmCondEq, "");
- NewLIR4(cUnit, kThumb2Strex, r1, r2, r0,
+ OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ // Is lock unheld on lock or held by us (==thread_id) on unlock?
+ NewLIR4(cu, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
+ NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ OpRegImm(cu, kOpCmp, r1, 0);
+ OpIT(cu, kArmCondEq, "");
+ NewLIR4(cu, kThumb2Strex, r1, r2, r0,
Object::MonitorOffset().Int32Value() >> 2);
- OpRegImm(cUnit, kOpCmp, r1, 0);
- OpIT(cUnit, kArmCondNe, "T");
+ OpRegImm(cu, kOpCmp, r1, 0);
+ OpIT(cu, kArmCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
- LoadWordDisp(cUnit, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rARM_LR);
- MarkSafepointPC(cUnit, callInst);
- GenMemBarrier(cUnit, kLoadLoad);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
+ GenMemBarrier(cu, kLoadLoad);
}
/*
@@ -516,129 +516,129 @@
* a zero recursion count, it's safe to punch it back to the
* initial, unlock thin state with a store word.
*/
-void GenMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
DCHECK_EQ(LW_SHAPE_THIN, 0);
- FlushAllRegs(cUnit);
- LoadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, r0, optFlags);
- LoadWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
- LoadWordDisp(cUnit, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
- // Is lock unheld on lock or held by us (==threadId) on unlock?
- OpRegRegImm(cUnit, kOpAnd, r3, r1,
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, r0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
+ LoadWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
+ LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ // Is lock unheld on lock or held by us (==thread_id) on unlock?
+ OpRegRegImm(cu, kOpAnd, r3, r1,
(LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
// Align owner
- OpRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
- NewLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
- OpRegReg(cUnit, kOpSub, r1, r2);
- OpIT(cUnit, kArmCondEq, "EE");
- StoreWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r3);
+ OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ OpRegReg(cu, kOpSub, r1, r2);
+ OpIT(cu, kArmCondEq, "EE");
+ StoreWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r3);
// Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp(cUnit, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rARM_LR);
- MarkSafepointPC(cUnit, callInst);
- GenMemBarrier(cUnit, kStoreLoad);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
+ GenMemBarrier(cu, kStoreLoad);
}
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void MarkGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
+void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
{
- int regCardBase = AllocTemp(cUnit);
- int regCardNo = AllocTemp(cUnit);
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
- LoadWordDisp(cUnit, rARM_SELF, Thread::CardTableOffset().Int32Value(), regCardBase);
- OpRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, CardTable::kCardShift);
- StoreBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
+ int reg_card_base = AllocTemp(cu);
+ int reg_card_no = AllocTemp(cu);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
+ LoadWordDisp(cu, rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+ OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+ StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
kUnsignedByte);
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
- FreeTemp(cUnit, regCardBase);
- FreeTemp(cUnit, regCardNo);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+ FreeTemp(cu, reg_card_base);
+ FreeTemp(cu, reg_card_no);
}
-void GenEntrySequence(CompilationUnit* cUnit, RegLocation* ArgLocs,
- RegLocation rlMethod)
+void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
/*
* On entry, r0, r1, r2 & r3 are live. Let the register allocation
* mechanism know so it doesn't try to use any of them when
* expanding the frame or flushing. This leaves the utility
* code with a single temp: r12. This should be enough.
*/
- LockTemp(cUnit, r0);
- LockTemp(cUnit, r1);
- LockTemp(cUnit, r2);
- LockTemp(cUnit, r3);
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
+ LockTemp(cu, r2);
+ LockTemp(cu, r3);
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- (static_cast<size_t>(cUnit->frameSize) <
+ bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+ (static_cast<size_t>(cu->frame_size) <
Thread::kStackOverflowReservedBytes));
- NewLIR0(cUnit, kPseudoMethodEntry);
- if (!skipOverflowCheck) {
+ NewLIR0(cu, kPseudoMethodEntry);
+ if (!skip_overflow_check) {
/* Load stack limit */
- LoadWordDisp(cUnit, rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ LoadWordDisp(cu, rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
}
/* Spill core callee saves */
- NewLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
+ NewLIR1(cu, kThumb2Push, cu->core_spill_mask);
/* Need to spill any FP regs? */
- if (cUnit->numFPSpills) {
+ if (cu->num_fp_spills) {
/*
* NOTE: fp spills are a little different from core spills in that
* they are pushed as a contiguous block. When promoting from
* the fp set, we must allocate all singles from s16..highest-promoted
*/
- NewLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
+ NewLIR1(cu, kThumb2VPushCS, cu->num_fp_spills);
}
- if (!skipOverflowCheck) {
- OpRegRegImm(cUnit, kOpSub, rARM_LR, rARM_SP, cUnit->frameSize - (spillCount * 4));
- GenRegRegCheck(cUnit, kCondCc, rARM_LR, r12, kThrowStackOverflow);
- OpRegCopy(cUnit, rARM_SP, rARM_LR); // Establish stack
+ if (!skip_overflow_check) {
+ OpRegRegImm(cu, kOpSub, rARM_LR, rARM_SP, cu->frame_size - (spill_count * 4));
+ GenRegRegCheck(cu, kCondCc, rARM_LR, r12, kThrowStackOverflow);
+ OpRegCopy(cu, rARM_SP, rARM_LR); // Establish stack
} else {
- OpRegImm(cUnit, kOpSub, rARM_SP, cUnit->frameSize - (spillCount * 4));
+ OpRegImm(cu, kOpSub, rARM_SP, cu->frame_size - (spill_count * 4));
}
- FlushIns(cUnit, ArgLocs, rlMethod);
+ FlushIns(cu, ArgLocs, rl_method);
- FreeTemp(cUnit, r0);
- FreeTemp(cUnit, r1);
- FreeTemp(cUnit, r2);
- FreeTemp(cUnit, r3);
+ FreeTemp(cu, r0);
+ FreeTemp(cu, r1);
+ FreeTemp(cu, r2);
+ FreeTemp(cu, r3);
}
-void GenExitSequence(CompilationUnit* cUnit)
+void GenExitSequence(CompilationUnit* cu)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
/*
* In the exit path, r0/r1 are live - make sure they aren't
* allocated by the register utilities as temps.
*/
- LockTemp(cUnit, r0);
- LockTemp(cUnit, r1);
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
- NewLIR0(cUnit, kPseudoMethodExit);
- OpRegImm(cUnit, kOpAdd, rARM_SP, cUnit->frameSize - (spillCount * 4));
+ NewLIR0(cu, kPseudoMethodExit);
+ OpRegImm(cu, kOpAdd, rARM_SP, cu->frame_size - (spill_count * 4));
/* Need to restore any FP callee saves? */
- if (cUnit->numFPSpills) {
- NewLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
+ if (cu->num_fp_spills) {
+ NewLIR1(cu, kThumb2VPopCS, cu->num_fp_spills);
}
- if (cUnit->coreSpillMask & (1 << rARM_LR)) {
+ if (cu->core_spill_mask & (1 << rARM_LR)) {
/* Unspill rARM_LR to rARM_PC */
- cUnit->coreSpillMask &= ~(1 << rARM_LR);
- cUnit->coreSpillMask |= (1 << rARM_PC);
+ cu->core_spill_mask &= ~(1 << rARM_LR);
+ cu->core_spill_mask |= (1 << rARM_PC);
}
- NewLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
- if (!(cUnit->coreSpillMask & (1 << rARM_PC))) {
+ NewLIR1(cu, kThumb2Pop, cu->core_spill_mask);
+ if (!(cu->core_spill_mask & (1 << rARM_PC))) {
/* We didn't pop to rARM_PC, so must do a bv rARM_LR */
- NewLIR1(cUnit, kThumbBx, rARM_LR);
+ NewLIR1(cu, kThumbBx, rARM_LR);
}
}
diff --git a/src/compiler/codegen/arm/fp_arm.cc b/src/compiler/codegen/arm/fp_arm.cc
index 3584971..46695b9 100644
--- a/src/compiler/codegen/arm/fp_arm.cc
+++ b/src/compiler/codegen/arm/fp_arm.cc
@@ -20,11 +20,11 @@
namespace art {
-bool GenArithOpFloat(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
int op = kThumbBkpt;
- RegLocation rlResult;
+ RegLocation rl_result;
/*
* Don't attempt to optimize register usage since these opcodes call out to
@@ -50,24 +50,24 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
case Instruction::NEG_FLOAT: {
- return GenArithOpFloatPortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpFloatPortable(cu, opcode, rl_dest, rl_src1, rl_src2);
}
default:
return true;
}
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR3(cUnit, op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR3(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-bool GenArithOpDouble(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
{
int op = kThumbBkpt;
- RegLocation rlResult;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::ADD_DOUBLE_2ADDR:
@@ -89,31 +89,31 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
case Instruction::NEG_DOUBLE: {
- return GenArithOpDoublePortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpDoublePortable(cu, opcode, rl_dest, rl_src1, rl_src2);
}
default:
return true;
}
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- DCHECK(rlSrc1.wide);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- DCHECK(rlSrc2.wide);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- DCHECK(rlDest.wide);
- DCHECK(rlResult.wide);
- NewLIR3(cUnit, op, S2d(rlResult.lowReg, rlResult.highReg), S2d(rlSrc1.lowReg, rlSrc1.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
- StoreValueWide(cUnit, rlDest, rlResult);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenConversion(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc)
+bool GenConversion(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src)
{
int op = kThumbBkpt;
- int srcReg;
- RegLocation rlResult;
+ int src_reg;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::INT_TO_FLOAT:
@@ -138,182 +138,182 @@
case Instruction::FLOAT_TO_LONG:
case Instruction::LONG_TO_FLOAT:
case Instruction::DOUBLE_TO_LONG:
- return GenConversionPortable(cUnit, opcode, rlDest, rlSrc);
+ return GenConversionPortable(cu, opcode, rl_dest, rl_src);
default:
return true;
}
- if (rlSrc.wide) {
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- srcReg = S2d(rlSrc.lowReg, rlSrc.highReg);
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, kFPReg);
- srcReg = rlSrc.lowReg;
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
}
- if (rlDest.wide) {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, S2d(rlResult.lowReg, rlResult.highReg), srcReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ if (rl_dest.wide) {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, rlResult.lowReg, srcReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, rl_result.low_reg, src_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
return false;
}
-void GenFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- bool gtBias, bool isDouble)
+void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ bool gt_bias, bool is_double)
{
- LIR* labelList = cUnit->blockLabelList;
- LIR* target = &labelList[bb->taken->id];
- RegLocation rlSrc1;
- RegLocation rlSrc2;
- if (isDouble) {
- rlSrc1 = GetSrcWide(cUnit, mir, 0);
- rlSrc2 = GetSrcWide(cUnit, mir, 2);
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- NewLIR2(cUnit, kThumb2Vcmpd, S2d(rlSrc1.lowReg, rlSrc2.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
+ LIR* label_list = cu->block_label_list;
+ LIR* target = &label_list[bb->taken->id];
+ RegLocation rl_src1;
+ RegLocation rl_src2;
+ if (is_double) {
+ rl_src1 = GetSrcWide(cu, mir, 0);
+ rl_src2 = GetSrcWide(cu, mir, 2);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
- rlSrc1 = GetSrc(cUnit, mir, 0);
- rlSrc2 = GetSrc(cUnit, mir, 1);
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- NewLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
+ rl_src1 = GetSrc(cu, mir, 0);
+ rl_src2 = GetSrc(cu, mir, 1);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
}
- NewLIR0(cUnit, kThumb2Fmstat);
+ NewLIR0(cu, kThumb2Fmstat);
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
switch(ccode) {
case kCondEq:
case kCondNe:
break;
case kCondLt:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondMi;
}
break;
case kCondLe:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondLs;
}
break;
case kCondGt:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondHi;
}
break;
case kCondGe:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondCs;
}
break;
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpCondBranch(cUnit, ccode, target);
+ OpCondBranch(cu, ccode, target);
}
-bool GenCmpFP(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- bool isDouble;
- int defaultResult;
- RegLocation rlResult;
+ bool is_double;
+ int default_result;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::CMPL_FLOAT:
- isDouble = false;
- defaultResult = -1;
+ is_double = false;
+ default_result = -1;
break;
case Instruction::CMPG_FLOAT:
- isDouble = false;
- defaultResult = 1;
+ is_double = false;
+ default_result = 1;
break;
case Instruction::CMPL_DOUBLE:
- isDouble = true;
- defaultResult = -1;
+ is_double = true;
+ default_result = -1;
break;
case Instruction::CMPG_DOUBLE:
- isDouble = true;
- defaultResult = 1;
+ is_double = true;
+ default_result = 1;
break;
default:
return true;
}
- if (isDouble) {
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- ClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadConstant(cUnit, rlResult.lowReg, defaultResult);
- NewLIR2(cUnit, kThumb2Vcmpd, S2d(rlSrc1.lowReg, rlSrc2.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
+ if (is_double) {
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstant(cu, rl_result.low_reg, default_result);
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- ClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadConstant(cUnit, rlResult.lowReg, defaultResult);
- NewLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstant(cu, rl_result.low_reg, default_result);
+ NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
}
- DCHECK(!ARM_FPREG(rlResult.lowReg));
- NewLIR0(cUnit, kThumb2Fmstat);
+ DCHECK(!ARM_FPREG(rl_result.low_reg));
+ NewLIR0(cu, kThumb2Fmstat);
- OpIT(cUnit, (defaultResult == -1) ? kArmCondGt : kArmCondMi, "");
- NewLIR2(cUnit, kThumb2MovImmShift, rlResult.lowReg,
- ModifiedImmediate(-defaultResult)); // Must not alter ccodes
- GenBarrier(cUnit);
+ OpIT(cu, (default_result == -1) ? kArmCondGt : kArmCondMi, "");
+ NewLIR2(cu, kThumb2MovImmShift, rl_result.low_reg,
+ ModifiedImmediate(-default_result)); // Must not alter ccodes
+ GenBarrier(cu);
- OpIT(cUnit, kArmCondEq, "");
- LoadConstant(cUnit, rlResult.lowReg, 0);
- GenBarrier(cUnit);
+ OpIT(cu, kArmCondEq, "");
+ LoadConstant(cu, rl_result.low_reg, 0);
+ GenBarrier(cu);
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-void GenNegFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValue(cUnit, rlSrc, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, kThumb2Vnegs, rlResult.lowReg, rlSrc.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
-void GenNegDouble(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, kThumb2Vnegd, S2d(rlResult.lowReg, rlResult.highReg),
- S2d(rlSrc.lowReg, rlSrc.highReg));
- StoreValueWide(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
}
-bool GenInlinedSqrt(CompilationUnit* cUnit, CallInfo* info) {
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
+bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+ DCHECK_EQ(cu->instruction_set, kThumb2);
LIR *branch;
- RegLocation rlSrc = info->args[0];
- RegLocation rlDest = InlineTargetWide(cUnit, info); // double place for result
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, kThumb2Vsqrtd, S2d(rlResult.lowReg, rlResult.highReg),
- S2d(rlSrc.lowReg, rlSrc.highReg));
- NewLIR2(cUnit, kThumb2Vcmpd, S2d(rlResult.lowReg, rlResult.highReg),
- S2d(rlResult.lowReg, rlResult.highReg));
- NewLIR0(cUnit, kThumb2Fmstat);
- branch = NewLIR2(cUnit, kThumbBCond, 0, kArmCondEq);
- ClobberCalleeSave(cUnit);
- LockCallTemps(cUnit); // Using fixed registers
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pSqrt));
- NewLIR3(cUnit, kThumb2Fmrrd, r0, r1, S2d(rlSrc.lowReg, rlSrc.highReg));
- NewLIR1(cUnit, kThumbBlxR, rTgt);
- NewLIR3(cUnit, kThumb2Fmdrr, S2d(rlResult.lowReg, rlResult.highReg), r0, r1);
- branch->target = NewLIR0(cUnit, kPseudoTargetLabel);
- StoreValueWide(cUnit, rlDest, rlResult);
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(cu, info); // double place for result
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_result.low_reg, rl_result.high_reg));
+ NewLIR0(cu, kThumb2Fmstat);
+ branch = NewLIR2(cu, kThumbBCond, 0, kArmCondEq);
+ ClobberCalleeSave(cu);
+ LockCallTemps(cu); // Using fixed registers
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pSqrt));
+ NewLIR3(cu, kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR1(cu, kThumbBlxR, r_tgt);
+ NewLIR3(cu, kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValueWide(cu, rl_dest, rl_result);
return true;
}
diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc
index bdb3bea..45fe807 100644
--- a/src/compiler/codegen/arm/int_arm.cc
+++ b/src/compiler/codegen/arm/int_arm.cc
@@ -24,11 +24,11 @@
namespace art {
-LIR* OpCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
int src2, LIR* target)
{
- OpRegReg(cUnit, kOpCmp, src1, src2);
- return OpCondBranch(cUnit, cond, target);
+ OpRegReg(cu, kOpCmp, src1, src2);
+ return OpCondBranch(cu, cond, target);
}
/*
@@ -41,11 +41,11 @@
* met, and an "E" means the instruction is executed if the condition
* is not met.
*/
-LIR* OpIT(CompilationUnit* cUnit, ArmConditionCode code, const char* guide)
+LIR* OpIT(CompilationUnit* cu, ArmConditionCode code, const char* guide)
{
int mask;
- int condBit = code & 1;
- int altBit = condBit ^ 1;
+ int cond_bit = code & 1;
+ int alt_bit = cond_bit ^ 1;
int mask3 = 0;
int mask2 = 0;
int mask1 = 0;
@@ -53,11 +53,11 @@
//Note: case fallthroughs intentional
switch (strlen(guide)) {
case 3:
- mask1 = (guide[2] == 'T') ? condBit : altBit;
+ mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
case 2:
- mask2 = (guide[1] == 'T') ? condBit : altBit;
+ mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
case 1:
- mask3 = (guide[0] == 'T') ? condBit : altBit;
+ mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
break;
case 0:
break;
@@ -66,7 +66,7 @@
}
mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
(1 << (3 - strlen(guide)));
- return NewLIR2(cUnit, kThumb2It, code, mask);
+ return NewLIR2(cu, kThumb2It, code, mask);
}
/*
@@ -84,168 +84,168 @@
* neg rX
* done:
*/
-void GenCmpLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LIR* target1;
LIR* target2;
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
- int tReg = AllocTemp(cUnit);
- LoadConstant(cUnit, tReg, -1);
- OpRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
- LIR* branch1 = OpCondBranch(cUnit, kCondLt, NULL);
- LIR* branch2 = OpCondBranch(cUnit, kCondGt, NULL);
- OpRegRegReg(cUnit, kOpSub, tReg, rlSrc1.lowReg, rlSrc2.lowReg);
- LIR* branch3 = OpCondBranch(cUnit, kCondEq, NULL);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, -1);
+ OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ LIR* branch1 = OpCondBranch(cu, kCondLt, NULL);
+ LIR* branch2 = OpCondBranch(cu, kCondGt, NULL);
+ OpRegRegReg(cu, kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ LIR* branch3 = OpCondBranch(cu, kCondEq, NULL);
- OpIT(cUnit, kArmCondHi, "E");
- NewLIR2(cUnit, kThumb2MovImmShift, tReg, ModifiedImmediate(-1));
- LoadConstant(cUnit, tReg, 1);
- GenBarrier(cUnit);
+ OpIT(cu, kArmCondHi, "E");
+ NewLIR2(cu, kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
+ LoadConstant(cu, t_reg, 1);
+ GenBarrier(cu);
- target2 = NewLIR0(cUnit, kPseudoTargetLabel);
- OpRegReg(cUnit, kOpNeg, tReg, tReg);
+ target2 = NewLIR0(cu, kPseudoTargetLabel);
+ OpRegReg(cu, kOpNeg, t_reg, t_reg);
- target1 = NewLIR0(cUnit, kPseudoTargetLabel);
+ target1 = NewLIR0(cu, kPseudoTargetLabel);
- RegLocation rlTemp = LocCReturn(); // Just using as template, will change
- rlTemp.lowReg = tReg;
- StoreValue(cUnit, rlDest, rlTemp);
- FreeTemp(cUnit, tReg);
+ RegLocation rl_temp = LocCReturn(); // Just using as template, will change
+ rl_temp.low_reg = t_reg;
+ StoreValue(cu, rl_dest, rl_temp);
+ FreeTemp(cu, t_reg);
branch1->target = target1;
branch2->target = target2;
branch3->target = branch1->target;
}
-void GenFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
+void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
{
- LIR* labelList = cUnit->blockLabelList;
- LIR* taken = &labelList[bb->taken->id];
- LIR* notTaken = &labelList[bb->fallThrough->id];
- RegLocation rlSrc1 = GetSrcWide(cUnit, mir, 0);
- RegLocation rlSrc2 = GetSrcWide(cUnit, mir, 2);
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* not_taken = &label_list[bb->fall_through->id];
+ RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
+ RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
- OpRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
+ OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
switch(ccode) {
case kCondEq:
- OpCondBranch(cUnit, kCondNe, notTaken);
+ OpCondBranch(cu, kCondNe, not_taken);
break;
case kCondNe:
- OpCondBranch(cUnit, kCondNe, taken);
+ OpCondBranch(cu, kCondNe, taken);
break;
case kCondLt:
- OpCondBranch(cUnit, kCondLt, taken);
- OpCondBranch(cUnit, kCondGt, notTaken);
+ OpCondBranch(cu, kCondLt, taken);
+ OpCondBranch(cu, kCondGt, not_taken);
ccode = kCondCc;
break;
case kCondLe:
- OpCondBranch(cUnit, kCondLt, taken);
- OpCondBranch(cUnit, kCondGt, notTaken);
+ OpCondBranch(cu, kCondLt, taken);
+ OpCondBranch(cu, kCondGt, not_taken);
ccode = kCondLs;
break;
case kCondGt:
- OpCondBranch(cUnit, kCondGt, taken);
- OpCondBranch(cUnit, kCondLt, notTaken);
+ OpCondBranch(cu, kCondGt, taken);
+ OpCondBranch(cu, kCondLt, not_taken);
ccode = kCondHi;
break;
case kCondGe:
- OpCondBranch(cUnit, kCondGt, taken);
- OpCondBranch(cUnit, kCondLt, notTaken);
+ OpCondBranch(cu, kCondGt, taken);
+ OpCondBranch(cu, kCondLt, not_taken);
ccode = kCondCs;
break;
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- OpCondBranch(cUnit, ccode, taken);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpCondBranch(cu, ccode, taken);
}
/*
* Generate a register comparison to an immediate and branch. Caller
* is responsible for setting branch target field.
*/
-LIR* OpCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
- int checkValue, LIR* target)
+LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+ int check_value, LIR* target)
{
LIR* branch;
- int modImm;
- ArmConditionCode armCond = ArmConditionEncoding(cond);
- if ((ARM_LOWREG(reg)) && (checkValue == 0) &&
- ((armCond == kArmCondEq) || (armCond == kArmCondNe))) {
- branch = NewLIR2(cUnit, (armCond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
+ int mod_imm;
+ ArmConditionCode arm_cond = ArmConditionEncoding(cond);
+ if ((ARM_LOWREG(reg)) && (check_value == 0) &&
+ ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
+ branch = NewLIR2(cu, (arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
reg, 0);
} else {
- modImm = ModifiedImmediate(checkValue);
- if (ARM_LOWREG(reg) && ((checkValue & 0xff) == checkValue)) {
- NewLIR2(cUnit, kThumbCmpRI8, reg, checkValue);
- } else if (modImm >= 0) {
- NewLIR2(cUnit, kThumb2CmpRI8, reg, modImm);
+ mod_imm = ModifiedImmediate(check_value);
+ if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
+ NewLIR2(cu, kThumbCmpRI8, reg, check_value);
+ } else if (mod_imm >= 0) {
+ NewLIR2(cu, kThumb2CmpRI8, reg, mod_imm);
} else {
- int tReg = AllocTemp(cUnit);
- LoadConstant(cUnit, tReg, checkValue);
- OpRegReg(cUnit, kOpCmp, reg, tReg);
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, check_value);
+ OpRegReg(cu, kOpCmp, reg, t_reg);
}
- branch = NewLIR2(cUnit, kThumbBCond, 0, armCond);
+ branch = NewLIR2(cu, kThumbBCond, 0, arm_cond);
}
branch->target = target;
return branch;
}
-LIR* OpRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
+LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src)
{
LIR* res;
int opcode;
- if (ARM_FPREG(rDest) || ARM_FPREG(rSrc))
- return FpRegCopy(cUnit, rDest, rSrc);
- if (ARM_LOWREG(rDest) && ARM_LOWREG(rSrc))
+ if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
+ return FpRegCopy(cu, r_dest, r_src);
+ if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
opcode = kThumbMovRR;
- else if (!ARM_LOWREG(rDest) && !ARM_LOWREG(rSrc))
+ else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
opcode = kThumbMovRR_H2H;
- else if (ARM_LOWREG(rDest))
+ else if (ARM_LOWREG(r_dest))
opcode = kThumbMovRR_H2L;
else
opcode = kThumbMovRR_L2H;
- res = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
+ res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
-LIR* OpRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
+LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
{
- LIR* res = OpRegCopyNoInsert(cUnit, rDest, rSrc);
- AppendLIR(cUnit, res);
+ LIR* res = OpRegCopyNoInsert(cu, r_dest, r_src);
+ AppendLIR(cu, res);
return res;
}
-void OpRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
+void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi,
+ int src_lo, int src_hi)
{
- bool destFP = ARM_FPREG(destLo) && ARM_FPREG(destHi);
- bool srcFP = ARM_FPREG(srcLo) && ARM_FPREG(srcHi);
- DCHECK_EQ(ARM_FPREG(srcLo), ARM_FPREG(srcHi));
- DCHECK_EQ(ARM_FPREG(destLo), ARM_FPREG(destHi));
- if (destFP) {
- if (srcFP) {
- OpRegCopy(cUnit, S2d(destLo, destHi), S2d(srcLo, srcHi));
+ bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
+ bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
+ DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
+ DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
+ if (dest_fp) {
+ if (src_fp) {
+ OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
} else {
- NewLIR3(cUnit, kThumb2Fmdrr, S2d(destLo, destHi), srcLo, srcHi);
+ NewLIR3(cu, kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
}
} else {
- if (srcFP) {
- NewLIR3(cUnit, kThumb2Fmrrd, destLo, destHi, S2d(srcLo, srcHi));
+ if (src_fp) {
+ NewLIR3(cu, kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
} else {
// Handle overlap
- if (srcHi == destLo) {
- OpRegCopy(cUnit, destHi, srcHi);
- OpRegCopy(cUnit, destLo, srcLo);
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
} else {
- OpRegCopy(cUnit, destLo, srcLo);
- OpRegCopy(cUnit, destHi, srcHi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
}
}
}
@@ -258,7 +258,7 @@
DividePattern pattern;
};
-static const MagicTable magicTable[] = {
+static const MagicTable magic_table[] = {
{0, 0, DivideNone}, // 0
{0, 0, DivideNone}, // 1
{0, 0, DivideNone}, // 2
@@ -278,277 +278,277 @@
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool SmallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
- RegLocation rlSrc, RegLocation rlDest, int lit)
+bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
{
- if ((lit < 0) || (lit >= static_cast<int>(sizeof(magicTable)/sizeof(magicTable[0])))) {
+ if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
- DividePattern pattern = magicTable[lit].pattern;
+ DividePattern pattern = magic_table[lit].pattern;
if (pattern == DivideNone) {
return false;
}
// Tuning: add rem patterns
- if (dalvikOpcode != Instruction::DIV_INT_LIT8) {
+ if (dalvik_opcode != Instruction::DIV_INT_LIT8) {
return false;
}
- int rMagic = AllocTemp(cUnit);
- LoadConstant(cUnit, rMagic, magicTable[lit].magic);
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int rHi = AllocTemp(cUnit);
- int rLo = AllocTemp(cUnit);
- NewLIR4(cUnit, kThumb2Smull, rLo, rHi, rMagic, rlSrc.lowReg);
+ int r_magic = AllocTemp(cu);
+ LoadConstant(cu, r_magic, magic_table[lit].magic);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int r_hi = AllocTemp(cu);
+ int r_lo = AllocTemp(cu);
+ NewLIR4(cu, kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
switch(pattern) {
case Divide3:
- OpRegRegRegShift(cUnit, kOpSub, rlResult.lowReg, rHi,
- rlSrc.lowReg, EncodeShift(kArmAsr, 31));
+ OpRegRegRegShift(cu, kOpSub, rl_result.low_reg, r_hi,
+ rl_src.low_reg, EncodeShift(kArmAsr, 31));
break;
case Divide5:
- OpRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
- OpRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
- EncodeShift(kArmAsr, magicTable[lit].shift));
+ OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
case Divide7:
- OpRegReg(cUnit, kOpAdd, rHi, rlSrc.lowReg);
- OpRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
- OpRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
- EncodeShift(kArmAsr, magicTable[lit].shift));
+ OpRegReg(cu, kOpAdd, r_hi, rl_src.low_reg);
+ OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
default:
LOG(FATAL) << "Unexpected pattern: " << pattern;
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-LIR* GenRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
+LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
int reg1, int base, int offset, ThrowKind kind)
{
LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
return NULL;
}
-RegLocation GenDivRemLit(CompilationUnit* cUnit, RegLocation rlDest, int reg1, int lit, bool isDiv)
+RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
- return rlDest;
+ return rl_dest;
}
-RegLocation GenDivRem(CompilationUnit* cUnit, RegLocation rlDest, int reg1, int reg2, bool isDiv)
+RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
- return rlDest;
+ return rl_dest;
}
-bool GenInlinedMinMaxInt(CompilationUnit *cUnit, CallInfo* info, bool isMin)
+bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
{
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
- RegLocation rlSrc1 = info->args[0];
- RegLocation rlSrc2 = info->args[1];
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
- RegLocation rlDest = InlineTarget(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- OpIT(cUnit, (isMin) ? kArmCondGt : kArmCondLt, "E");
- OpRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc2.lowReg);
- OpRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc1.lowReg);
- GenBarrier(cUnit);
- StoreValue(cUnit, rlDest, rlResult);
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+ RegLocation rl_src1 = info->args[0];
+ RegLocation rl_src2 = info->args[1];
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpIT(cu, (is_min) ? kArmCondGt : kArmCondLt, "E");
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
+ GenBarrier(cu);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-void OpLea(CompilationUnit* cUnit, int rBase, int reg1, int reg2, int scale, int offset)
+void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
{
LOG(FATAL) << "Unexpected use of OpLea for Arm";
}
-void OpTlsCmp(CompilationUnit* cUnit, int offset, int val)
+void OpTlsCmp(CompilationUnit* cu, int offset, int val)
{
LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
}
-bool GenInlinedCas32(CompilationUnit* cUnit, CallInfo* info, bool need_write_barrier) {
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
- // Unused - RegLocation rlSrcUnsafe = info->args[0];
- RegLocation rlSrcObj= info->args[1]; // Object - known non-null
- RegLocation rlSrcOffset= info->args[2]; // long low
- rlSrcOffset.wide = 0; // ignore high half in info->args[3]
- RegLocation rlSrcExpected= info->args[4]; // int or Object
- RegLocation rlSrcNewValue= info->args[5]; // int or Object
- RegLocation rlDest = InlineTarget(cUnit, info); // boolean place for result
+bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+ // Unused - RegLocation rl_src_unsafe = info->args[0];
+ RegLocation rl_src_obj= info->args[1]; // Object - known non-null
+ RegLocation rl_src_offset= info->args[2]; // long low
+ rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ RegLocation rl_src_expected= info->args[4]; // int or Object
+ RegLocation rl_src_new_value= info->args[5]; // int or Object
+ RegLocation rl_dest = InlineTarget(cu, info); // boolean place for result
// Release store semantics, get the barrier out of the way. TODO: revisit
- GenMemBarrier(cUnit, kStoreLoad);
+ GenMemBarrier(cu, kStoreLoad);
- RegLocation rlObject = LoadValue(cUnit, rlSrcObj, kCoreReg);
- RegLocation rlNewValue = LoadValue(cUnit, rlSrcNewValue, kCoreReg);
+ RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
+ RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
if (need_write_barrier) {
// Mark card for object assuming new value is stored.
- MarkGCCard(cUnit, rlNewValue.lowReg, rlObject.lowReg);
+ MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
}
- RegLocation rlOffset = LoadValue(cUnit, rlSrcOffset, kCoreReg);
+ RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
- int rPtr = AllocTemp(cUnit);
- OpRegRegReg(cUnit, kOpAdd, rPtr, rlObject.lowReg, rlOffset.lowReg);
+ int r_ptr = AllocTemp(cu);
+ OpRegRegReg(cu, kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
- // Free now unneeded rlObject and rlOffset to give more temps.
- ClobberSReg(cUnit, rlObject.sRegLow);
- FreeTemp(cUnit, rlObject.lowReg);
- ClobberSReg(cUnit, rlOffset.sRegLow);
- FreeTemp(cUnit, rlOffset.lowReg);
+ // Free now unneeded rl_object and rl_offset to give more temps.
+ ClobberSReg(cu, rl_object.s_reg_low);
+ FreeTemp(cu, rl_object.low_reg);
+ ClobberSReg(cu, rl_offset.s_reg_low);
+ FreeTemp(cu, rl_offset.low_reg);
- int rOldValue = AllocTemp(cUnit);
- NewLIR3(cUnit, kThumb2Ldrex, rOldValue, rPtr, 0); // rOldValue := [rPtr]
+ int r_old_value = AllocTemp(cu);
+ NewLIR3(cu, kThumb2Ldrex, r_old_value, r_ptr, 0); // r_old_value := [r_ptr]
- RegLocation rlExpected = LoadValue(cUnit, rlSrcExpected, kCoreReg);
+ RegLocation rl_expected = LoadValue(cu, rl_src_expected, kCoreReg);
- // if (rOldValue == rExpected) {
- // [rPtr] <- rNewValue && rResult := success ? 0 : 1
- // rResult ^= 1
+ // if (r_old_value == rExpected) {
+ // [r_ptr] <- r_new_value && r_result := success ? 0 : 1
+ // r_result ^= 1
// } else {
- // rResult := 0
+ // r_result := 0
// }
- OpRegReg(cUnit, kOpCmp, rOldValue, rlExpected.lowReg);
- FreeTemp(cUnit, rOldValue); // Now unneeded.
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpIT(cUnit, kArmCondEq, "TE");
- NewLIR4(cUnit, kThumb2Strex, rlResult.lowReg, rlNewValue.lowReg, rPtr, 0);
- FreeTemp(cUnit, rPtr); // Now unneeded.
- OpRegImm(cUnit, kOpXor, rlResult.lowReg, 1);
- OpRegReg(cUnit, kOpXor, rlResult.lowReg, rlResult.lowReg);
+ OpRegReg(cu, kOpCmp, r_old_value, rl_expected.low_reg);
+ FreeTemp(cu, r_old_value); // Now unneeded.
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpIT(cu, kArmCondEq, "TE");
+ NewLIR4(cu, kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
+ FreeTemp(cu, r_ptr); // Now unneeded.
+ OpRegImm(cu, kOpXor, rl_result.low_reg, 1);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, rl_result.low_reg);
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-LIR* OpPcRelLoad(CompilationUnit* cUnit, int reg, LIR* target)
+LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target)
{
- return RawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
+ return RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
}
-LIR* OpVldm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
{
- return NewLIR3(cUnit, kThumb2Vldms, rBase, fr0, count);
+ return NewLIR3(cu, kThumb2Vldms, rBase, fr0, count);
}
-LIR* OpVstm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
{
- return NewLIR3(cUnit, kThumb2Vstms, rBase, fr0, count);
+ return NewLIR3(cu, kThumb2Vstms, rBase, fr0, count);
}
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
- RegLocation rlResult, int lit,
- int firstBit, int secondBit)
+void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
{
- OpRegRegRegShift(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
- EncodeShift(kArmLsl, secondBit - firstBit));
- if (firstBit != 0) {
- OpRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
+ OpRegRegRegShift(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
+ EncodeShift(kArmLsl, second_bit - first_bit));
+ if (first_bit != 0) {
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
}
}
-void GenDivZeroCheck(CompilationUnit* cUnit, int regLo, int regHi)
+void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
{
- int tReg = AllocTemp(cUnit);
- NewLIR4(cUnit, kThumb2OrrRRRs, tReg, regLo, regHi, 0);
- FreeTemp(cUnit, tReg);
- GenCheck(cUnit, kCondEq, kThrowDivZero);
+ int t_reg = AllocTemp(cu);
+ NewLIR4(cu, kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
+ FreeTemp(cu, t_reg);
+ GenCheck(cu, kCondEq, kThrowDivZero);
}
// Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cUnit, LIR* target)
+LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
{
- NewLIR2(cUnit, kThumbSubRI8, rARM_SUSPEND, 1);
- return OpCondBranch(cUnit, (target == NULL) ? kCondEq : kCondNe, target);
+ NewLIR2(cu, kThumbSubRI8, rARM_SUSPEND, 1);
+ return OpCondBranch(cu, (target == NULL) ? kCondEq : kCondNe, target);
}
// Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cUnit, ConditionCode cCode, int reg, LIR* target)
+LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
{
// Combine sub & test using sub setflags encoding here
- NewLIR3(cUnit, kThumb2SubsRRI12, reg, reg, 1);
- return OpCondBranch(cUnit, cCode, target);
+ NewLIR3(cu, kThumb2SubsRRI12, reg, reg, 1);
+ return OpCondBranch(cu, c_code, target);
}
-void GenMemBarrier(CompilationUnit* cUnit, MemBarrierKind barrierKind)
+void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind)
{
#if ANDROID_SMP != 0
- int dmbFlavor;
+ int dmb_flavor;
// TODO: revisit Arm barrier kinds
- switch (barrierKind) {
- case kLoadStore: dmbFlavor = kSY; break;
- case kLoadLoad: dmbFlavor = kSY; break;
- case kStoreStore: dmbFlavor = kST; break;
- case kStoreLoad: dmbFlavor = kSY; break;
+ switch (barrier_kind) {
+ case kLoadStore: dmb_flavor = kSY; break;
+ case kLoadLoad: dmb_flavor = kSY; break;
+ case kStoreStore: dmb_flavor = kST; break;
+ case kStoreLoad: dmb_flavor = kSY; break;
default:
- LOG(FATAL) << "Unexpected MemBarrierKind: " << barrierKind;
- dmbFlavor = kSY; // quiet gcc.
+ LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
+ dmb_flavor = kSY; // quiet gcc.
break;
}
- LIR* dmb = NewLIR1(cUnit, kThumb2Dmb, dmbFlavor);
- dmb->defMask = ENCODE_ALL;
+ LIR* dmb = NewLIR1(cu, kThumb2Dmb, dmb_flavor);
+ dmb->def_mask = ENCODE_ALL;
#endif
}
-bool GenNegLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
+bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src)
{
- rlSrc = LoadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int zReg = AllocTemp(cUnit);
- LoadConstantNoClobber(cUnit, zReg, 0);
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int z_reg = AllocTemp(cu);
+ LoadConstantNoClobber(cu, z_reg, 0);
// Check for destructive overlap
- if (rlResult.lowReg == rlSrc.highReg) {
- int tReg = AllocTemp(cUnit);
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, zReg, rlSrc.lowReg);
- OpRegRegReg(cUnit, kOpSbc, rlResult.highReg, zReg, tReg);
- FreeTemp(cUnit, tReg);
+ if (rl_result.low_reg == rl_src.high_reg) {
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, t_reg);
+ FreeTemp(cu, t_reg);
} else {
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, zReg, rlSrc.lowReg);
- OpRegRegReg(cUnit, kOpSbc, rlResult.highReg, zReg, rlSrc.highReg);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
}
- FreeTemp(cUnit, zReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ FreeTemp(cu, z_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenAddLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
return false;
}
-bool GenSubLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
return false;
}
-bool GenAndLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
return false;
}
-bool GenOrLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
return false;
}
-bool GenXorLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of genXoLong for Arm";
return false;
diff --git a/src/compiler/codegen/arm/target_arm.cc b/src/compiler/codegen/arm/target_arm.cc
index f5d13d3..9c12237 100644
--- a/src/compiler/codegen/arm/target_arm.cc
+++ b/src/compiler/codegen/arm/target_arm.cc
@@ -23,15 +23,15 @@
namespace art {
-static int coreRegs[] = {r0, r1, r2, r3, rARM_SUSPEND, r5, r6, r7, r8, rARM_SELF, r10,
+static int core_regs[] = {r0, r1, r2, r3, rARM_SUSPEND, r5, r6, r7, r8, rARM_SELF, r10,
r11, r12, rARM_SP, rARM_LR, rARM_PC};
static int ReservedRegs[] = {rARM_SUSPEND, rARM_SELF, rARM_SP, rARM_LR, rARM_PC};
static int FpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15,
fr16, fr17, fr18, fr19, fr20, fr21, fr22, fr23,
fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31};
-static int coreTemps[] = {r0, r1, r2, r3, r12};
-static int fpTemps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+static int core_temps[] = {r0, r1, r2, r3, r12};
+static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
RegLocation LocCReturn()
@@ -85,9 +85,9 @@
// Create a double from a pair of singles.
-int S2d(int lowReg, int highReg)
+int S2d(int low_reg, int high_reg)
{
- return ARM_S2D(lowReg, highReg);
+ return ARM_S2D(low_reg, high_reg);
}
// Is reg a single or double?
@@ -123,20 +123,20 @@
/*
* Decode the register id.
*/
-uint64_t GetRegMaskCommon(CompilationUnit* cUnit, int reg)
+uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
{
uint64_t seed;
int shift;
- int regId;
+ int reg_id;
- regId = reg & 0x1f;
+ reg_id = reg & 0x1f;
/* Each double register is equal to a pair of single-precision FP registers */
seed = ARM_DOUBLEREG(reg) ? 3 : 1;
/* FP register starts at bit position 16 */
shift = ARM_FPREG(reg) ? kArmFPReg0 : 0;
/* Expand the double register id into single offset */
- shift += regId;
+ shift += reg_id;
return (seed << shift);
}
@@ -145,79 +145,79 @@
return ENCODE_ARM_REG_PC;
}
-void SetupTargetResourceMasks(CompilationUnit* cUnit, LIR* lir)
+void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
{
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
+ DCHECK_EQ(cu->instruction_set, kThumb2);
// Thumb2 specific setup
uint64_t flags = EncodingMap[lir->opcode].flags;
int opcode = lir->opcode;
if (flags & REG_DEF_SP) {
- lir->defMask |= ENCODE_ARM_REG_SP;
+ lir->def_mask |= ENCODE_ARM_REG_SP;
}
if (flags & REG_USE_SP) {
- lir->useMask |= ENCODE_ARM_REG_SP;
+ lir->use_mask |= ENCODE_ARM_REG_SP;
}
if (flags & REG_DEF_LIST0) {
- lir->defMask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+ lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
}
if (flags & REG_DEF_LIST1) {
- lir->defMask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+ lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
}
if (flags & REG_DEF_FPCS_LIST0) {
- lir->defMask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+ lir->def_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
}
if (flags & REG_DEF_FPCS_LIST2) {
for (int i = 0; i < lir->operands[2]; i++) {
- SetupRegMask(cUnit, &lir->defMask, lir->operands[1] + i);
+ SetupRegMask(cu, &lir->def_mask, lir->operands[1] + i);
}
}
if (flags & REG_USE_PC) {
- lir->useMask |= ENCODE_ARM_REG_PC;
+ lir->use_mask |= ENCODE_ARM_REG_PC;
}
/* Conservatively treat the IT block */
if (flags & IS_IT) {
- lir->defMask = ENCODE_ALL;
+ lir->def_mask = ENCODE_ALL;
}
if (flags & REG_USE_LIST0) {
- lir->useMask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+ lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
}
if (flags & REG_USE_LIST1) {
- lir->useMask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+ lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
}
if (flags & REG_USE_FPCS_LIST0) {
- lir->useMask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+ lir->use_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
}
if (flags & REG_USE_FPCS_LIST2) {
for (int i = 0; i < lir->operands[2]; i++) {
- SetupRegMask(cUnit, &lir->useMask, lir->operands[1] + i);
+ SetupRegMask(cu, &lir->use_mask, lir->operands[1] + i);
}
}
/* Fixup for kThumbPush/lr and kThumbPop/pc */
if (opcode == kThumbPush || opcode == kThumbPop) {
- uint64_t r8Mask = GetRegMaskCommon(cUnit, r8);
- if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
- lir->useMask &= ~r8Mask;
- lir->useMask |= ENCODE_ARM_REG_LR;
- } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
- lir->defMask &= ~r8Mask;
- lir->defMask |= ENCODE_ARM_REG_PC;
+ uint64_t r8Mask = GetRegMaskCommon(cu, r8);
+ if ((opcode == kThumbPush) && (lir->use_mask & r8Mask)) {
+ lir->use_mask &= ~r8Mask;
+ lir->use_mask |= ENCODE_ARM_REG_LR;
+ } else if ((opcode == kThumbPop) && (lir->def_mask & r8Mask)) {
+ lir->def_mask &= ~r8Mask;
+ lir->def_mask |= ENCODE_ARM_REG_PC;
}
}
if (flags & REG_DEF_LR) {
- lir->defMask |= ENCODE_ARM_REG_LR;
+ lir->def_mask |= ENCODE_ARM_REG_LR;
}
}
@@ -248,7 +248,7 @@
return res;
}
-static const char* coreRegNames[16] = {
+static const char* core_reg_names[16] = {
"r0",
"r1",
"r2",
@@ -268,7 +268,7 @@
};
-static const char* shiftNames[4] = {
+static const char* shift_names[4] = {
"lsl",
"lsr",
"asr",
@@ -282,17 +282,17 @@
buf[0] = 0;
for (i = 0; i < 16; i++, vector >>= 1) {
if (vector & 0x1) {
- int regId = i;
+ int reg_id = i;
if (opcode == kThumbPush && i == 8) {
- regId = r14lr;
+ reg_id = r14lr;
} else if (opcode == kThumbPop && i == 8) {
- regId = r15pc;
+ reg_id = r15pc;
}
if (printed) {
- sprintf(buf + strlen(buf), ", r%d", regId);
+ sprintf(buf + strlen(buf), ", r%d", reg_id);
} else {
printed = true;
- sprintf(buf, "r%d", regId);
+ sprintf(buf, "r%d", reg_id);
}
}
}
@@ -328,36 +328,36 @@
return bits >> (((value & 0xf80) >> 7) - 8);
}
-const char* ccNames[] = {"eq","ne","cs","cc","mi","pl","vs","vc",
+const char* cc_names[] = {"eq","ne","cs","cc","mi","pl","vs","vc",
"hi","ls","ge","lt","gt","le","al","nv"};
/*
* Interpret a format string and build a string no longer than size
* See format key in Assemble.c.
*/
-std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* baseAddr)
+std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
{
std::string buf;
int i;
- const char* fmtEnd = &fmt[strlen(fmt)];
+ const char* fmt_end = &fmt[strlen(fmt)];
char tbuf[256];
const char* name;
char nc;
- while (fmt < fmtEnd) {
+ while (fmt < fmt_end) {
int operand;
if (*fmt == '!') {
fmt++;
- DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT(fmt, fmt_end);
nc = *fmt++;
if (nc=='!') {
strcpy(tbuf, "!");
} else {
- DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT(fmt, fmt_end);
DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
operand = lir->operands[nc-'0'];
switch (*fmt++) {
case 'H':
if (operand != 0) {
- sprintf(tbuf, ", %s %d",shiftNames[operand & 0x3], operand >> 2);
+ sprintf(tbuf, ", %s %d",shift_names[operand & 0x3], operand >> 2);
} else {
strcpy(tbuf,"");
}
@@ -418,8 +418,8 @@
break;
case 'C':
DCHECK_LT(operand, static_cast<int>(
- sizeof(coreRegNames)/sizeof(coreRegNames[0])));
- sprintf(tbuf,"%s",coreRegNames[operand]);
+ sizeof(core_reg_names)/sizeof(core_reg_names[0])));
+ sprintf(tbuf,"%s",core_reg_names[operand]);
break;
case 'E':
sprintf(tbuf,"%d", operand*4);
@@ -428,11 +428,11 @@
sprintf(tbuf,"%d", operand*2);
break;
case 'c':
- strcpy(tbuf, ccNames[operand]);
+ strcpy(tbuf, cc_names[operand]);
break;
case 't':
sprintf(tbuf,"0x%08x (L%p)",
- reinterpret_cast<uintptr_t>(baseAddr) + lir->offset + 4 +
+ reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 +
(operand << 1),
lir->target);
break;
@@ -440,7 +440,7 @@
int offset_1 = lir->operands[0];
int offset_2 = NEXT_LIR(lir)->operands[0];
uintptr_t target =
- (((reinterpret_cast<uintptr_t>(baseAddr) + lir->offset + 4) &
+ (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) &
~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
0xfffffffc;
sprintf(tbuf, "%p", reinterpret_cast<void *>(target));
@@ -473,7 +473,7 @@
return buf;
}
-void DumpResourceMask(LIR* armLIR, uint64_t mask, const char* prefix)
+void DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
{
char buf[256];
buf[0] = 0;
@@ -499,9 +499,9 @@
}
/* Memory bits */
- if (armLIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", armLIR->aliasInfo & 0xffff,
- (armLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ if (arm_lir && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", arm_lir->alias_info & 0xffff,
+ (arm_lir->alias_info & 0x80000000) ? "(+1)" : "");
}
if (mask & ENCODE_LITERAL) {
strcat(buf, "lit ");
@@ -550,105 +550,105 @@
* Alloc a pair of core registers, or a double. Low reg in low byte,
* high reg in next byte.
*/
-int AllocTypedTempPair(CompilationUnit* cUnit, bool fpHint, int regClass)
+int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class)
{
- int highReg;
- int lowReg;
+ int high_reg;
+ int low_reg;
int res = 0;
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- lowReg = AllocTempDouble(cUnit);
- highReg = lowReg + 1;
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ low_reg = AllocTempDouble(cu);
+ high_reg = low_reg + 1;
} else {
- lowReg = AllocTemp(cUnit);
- highReg = AllocTemp(cUnit);
+ low_reg = AllocTemp(cu);
+ high_reg = AllocTemp(cu);
}
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
return res;
}
-int AllocTypedTemp(CompilationUnit* cUnit, bool fpHint, int regClass)
+int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class)
{
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
- return AllocTempFloat(cUnit);
- return AllocTemp(cUnit);
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
+ return AllocTempFloat(cu);
+ return AllocTemp(cu);
}
-void CompilerInitializeRegAlloc(CompilationUnit* cUnit)
+void CompilerInitializeRegAlloc(CompilationUnit* cu)
{
- int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
- int numReserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
- int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
- int numFPRegs = sizeof(FpRegs)/sizeof(*FpRegs);
- int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+ int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+ int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+ int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+ int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+ int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
RegisterPool *pool =
- static_cast<RegisterPool*>(NewMem(cUnit, sizeof(*pool), true, kAllocRegAlloc));
- cUnit->regPool = pool;
- pool->numCoreRegs = numRegs;
- pool->coreRegs = reinterpret_cast<RegisterInfo*>
- (NewMem(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs), true, kAllocRegAlloc));
- pool->numFPRegs = numFPRegs;
+ static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
+ cu->reg_pool = pool;
+ pool->num_core_regs = num_regs;
+ pool->core_regs = reinterpret_cast<RegisterInfo*>
+ (NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs), true, kAllocRegAlloc));
+ pool->num_fp_regs = num_fp_regs;
pool->FPRegs = static_cast<RegisterInfo*>
- (NewMem(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true, kAllocRegAlloc));
- CompilerInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
- CompilerInitPool(pool->FPRegs, FpRegs, pool->numFPRegs);
+ (NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs), true, kAllocRegAlloc));
+ CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
+ CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
// Keep special registers from being allocated
- for (int i = 0; i < numReserved; i++) {
+ for (int i = 0; i < num_reserved; i++) {
if (NO_SUSPEND && (ReservedRegs[i] == rARM_SUSPEND)) {
//To measure cost of suspend check
continue;
}
- MarkInUse(cUnit, ReservedRegs[i]);
+ MarkInUse(cu, ReservedRegs[i]);
}
// Mark temp regs - all others not in use can be used for promotion
- for (int i = 0; i < numTemps; i++) {
- MarkTemp(cUnit, coreTemps[i]);
+ for (int i = 0; i < num_temps; i++) {
+ MarkTemp(cu, core_temps[i]);
}
- for (int i = 0; i < numFPTemps; i++) {
- MarkTemp(cUnit, fpTemps[i]);
+ for (int i = 0; i < num_fp_temps; i++) {
+ MarkTemp(cu, fp_temps[i]);
}
// Start allocation at r2 in an attempt to avoid clobbering return values
- pool->nextCoreReg = r2;
+ pool->next_core_reg = r2;
// Construct the alias map.
- cUnit->phiAliasMap = static_cast<int*>
- (NewMem(cUnit, cUnit->numSSARegs * sizeof(cUnit->phiAliasMap[0]), false, kAllocDFInfo));
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- cUnit->phiAliasMap[i] = i;
+ cu->phi_alias_map = static_cast<int*>
+ (NewMem(cu, cu->num_ssa_regs * sizeof(cu->phi_alias_map[0]), false, kAllocDFInfo));
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ cu->phi_alias_map[i] = i;
}
- for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
- int defReg = phi->ssaRep->defs[0];
- for (int i = 0; i < phi->ssaRep->numUses; i++) {
- for (int j = 0; j < cUnit->numSSARegs; j++) {
- if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
- cUnit->phiAliasMap[j] = defReg;
+ for (MIR* phi = cu->phi_list; phi; phi = phi->meta.phi_next) {
+ int def_reg = phi->ssa_rep->defs[0];
+ for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+ for (int j = 0; j < cu->num_ssa_regs; j++) {
+ if (cu->phi_alias_map[j] == phi->ssa_rep->uses[i]) {
+ cu->phi_alias_map[j] = def_reg;
}
}
}
}
}
-void FreeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree)
+void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+ RegLocation rl_free)
{
- if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
- (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
// No overlap, free both
- FreeTemp(cUnit, rlFree.lowReg);
- FreeTemp(cUnit, rlFree.highReg);
+ FreeTemp(cu, rl_free.low_reg);
+ FreeTemp(cu, rl_free.high_reg);
}
}
/*
- * TUNING: is leaf? Can't just use "hasInvoke" to determine as some
+ * TUNING: is leaf? Can't just use "has_invoke" to determine as some
* instructions might call out to C/assembly helper functions. Until
* machinery is in place, always spill lr.
*/
-void AdjustSpillMask(CompilationUnit* cUnit)
+void AdjustSpillMask(CompilationUnit* cu)
{
- cUnit->coreSpillMask |= (1 << rARM_LR);
- cUnit->numCoreSpills++;
+ cu->core_spill_mask |= (1 << rARM_LR);
+ cu->num_core_spills++;
}
/*
@@ -657,52 +657,52 @@
* include any holes in the mask. Associate holes with
* Dalvik register INVALID_VREG (0xFFFFU).
*/
-void MarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
+void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
{
DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE);
reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE;
- // Ensure fpVmapTable is large enough
- int tableSize = cUnit->fpVmapTable.size();
- for (int i = tableSize; i < (reg + 1); i++) {
- cUnit->fpVmapTable.push_back(INVALID_VREG);
+ // Ensure fp_vmap_table is large enough
+ int table_size = cu->fp_vmap_table.size();
+ for (int i = table_size; i < (reg + 1); i++) {
+ cu->fp_vmap_table.push_back(INVALID_VREG);
}
// Add the current mapping
- cUnit->fpVmapTable[reg] = vReg;
- // Size of fpVmapTable is high-water mark, use to set mask
- cUnit->numFPSpills = cUnit->fpVmapTable.size();
- cUnit->fpSpillMask = ((1 << cUnit->numFPSpills) - 1) << ARM_FP_CALLEE_SAVE_BASE;
+ cu->fp_vmap_table[reg] = v_reg;
+ // Size of fp_vmap_table is high-water mark, use to set mask
+ cu->num_fp_spills = cu->fp_vmap_table.size();
+ cu->fp_spill_mask = ((1 << cu->num_fp_spills) - 1) << ARM_FP_CALLEE_SAVE_BASE;
}
-void FlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
+void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
{
- RegisterInfo* info1 = GetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = GetRegInfo(cUnit, reg2);
+ RegisterInfo* info1 = GetRegInfo(cu, reg1);
+ RegisterInfo* info2 = GetRegInfo(cu, reg2);
DCHECK(info1 && info2 && info1->pair && info2->pair &&
(info1->partner == info2->reg) &&
(info2->partner == info1->reg));
if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
+ if (!(info1->is_temp && info2->is_temp)) {
+ /* Should not happen. If it does, there's a problem in eval_loc */
LOG(FATAL) << "Long half-temp, half-promoted";
}
info1->dirty = false;
info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) <
- SRegToVReg(cUnit, info1->sReg))
+ if (SRegToVReg(cu, info2->s_reg) <
+ SRegToVReg(cu, info1->s_reg))
info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- StoreBaseDispWide(cUnit, rARM_SP, VRegOffset(cUnit, vReg), info1->reg, info1->partner);
+ int v_reg = SRegToVReg(cu, info1->s_reg);
+ StoreBaseDispWide(cu, rARM_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
}
}
-void FlushReg(CompilationUnit* cUnit, int reg)
+void FlushReg(CompilationUnit* cu, int reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
+ RegisterInfo* info = GetRegInfo(cu, reg);
if (info->live && info->dirty) {
info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- StoreBaseDisp(cUnit, rARM_SP, VRegOffset(cUnit, vReg), reg, kWord);
+ int v_reg = SRegToVReg(cu, info->s_reg);
+ StoreBaseDisp(cu, rARM_SP, VRegOffset(cu, v_reg), reg, kWord);
}
}
@@ -712,81 +712,81 @@
}
/* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cUnit)
+void ClobberCalleeSave(CompilationUnit *cu)
{
- Clobber(cUnit, r0);
- Clobber(cUnit, r1);
- Clobber(cUnit, r2);
- Clobber(cUnit, r3);
- Clobber(cUnit, r12);
- Clobber(cUnit, r14lr);
- Clobber(cUnit, fr0);
- Clobber(cUnit, fr1);
- Clobber(cUnit, fr2);
- Clobber(cUnit, fr3);
- Clobber(cUnit, fr4);
- Clobber(cUnit, fr5);
- Clobber(cUnit, fr6);
- Clobber(cUnit, fr7);
- Clobber(cUnit, fr8);
- Clobber(cUnit, fr9);
- Clobber(cUnit, fr10);
- Clobber(cUnit, fr11);
- Clobber(cUnit, fr12);
- Clobber(cUnit, fr13);
- Clobber(cUnit, fr14);
- Clobber(cUnit, fr15);
+ Clobber(cu, r0);
+ Clobber(cu, r1);
+ Clobber(cu, r2);
+ Clobber(cu, r3);
+ Clobber(cu, r12);
+ Clobber(cu, r14lr);
+ Clobber(cu, fr0);
+ Clobber(cu, fr1);
+ Clobber(cu, fr2);
+ Clobber(cu, fr3);
+ Clobber(cu, fr4);
+ Clobber(cu, fr5);
+ Clobber(cu, fr6);
+ Clobber(cu, fr7);
+ Clobber(cu, fr8);
+ Clobber(cu, fr9);
+ Clobber(cu, fr10);
+ Clobber(cu, fr11);
+ Clobber(cu, fr12);
+ Clobber(cu, fr13);
+ Clobber(cu, fr14);
+ Clobber(cu, fr15);
}
-RegLocation GetReturnWideAlt(CompilationUnit* cUnit)
+RegLocation GetReturnWideAlt(CompilationUnit* cu)
{
RegLocation res = LocCReturnWide();
- res.lowReg = r2;
- res.highReg = r3;
- Clobber(cUnit, r2);
- Clobber(cUnit, r3);
- MarkInUse(cUnit, r2);
- MarkInUse(cUnit, r3);
- MarkPair(cUnit, res.lowReg, res.highReg);
+ res.low_reg = r2;
+ res.high_reg = r3;
+ Clobber(cu, r2);
+ Clobber(cu, r3);
+ MarkInUse(cu, r2);
+ MarkInUse(cu, r3);
+ MarkPair(cu, res.low_reg, res.high_reg);
return res;
}
-RegLocation GetReturnAlt(CompilationUnit* cUnit)
+RegLocation GetReturnAlt(CompilationUnit* cu)
{
RegLocation res = LocCReturn();
- res.lowReg = r1;
- Clobber(cUnit, r1);
- MarkInUse(cUnit, r1);
+ res.low_reg = r1;
+ Clobber(cu, r1);
+ MarkInUse(cu, r1);
return res;
}
-RegisterInfo* GetRegInfo(CompilationUnit* cUnit, int reg)
+RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
{
- return ARM_FPREG(reg) ? &cUnit->regPool->FPRegs[reg & ARM_FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return ARM_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & ARM_FP_REG_MASK]
+ : &cu->reg_pool->core_regs[reg];
}
/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cUnit)
+void LockCallTemps(CompilationUnit* cu)
{
- LockTemp(cUnit, r0);
- LockTemp(cUnit, r1);
- LockTemp(cUnit, r2);
- LockTemp(cUnit, r3);
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
+ LockTemp(cu, r2);
+ LockTemp(cu, r3);
}
/* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cUnit)
+void FreeCallTemps(CompilationUnit* cu)
{
- FreeTemp(cUnit, r0);
- FreeTemp(cUnit, r1);
- FreeTemp(cUnit, r2);
- FreeTemp(cUnit, r3);
+ FreeTemp(cu, r0);
+ FreeTemp(cu, r1);
+ FreeTemp(cu, r2);
+ FreeTemp(cu, r3);
}
-int LoadHelper(CompilationUnit* cUnit, int offset)
+int LoadHelper(CompilationUnit* cu, int offset)
{
- LoadWordDisp(cUnit, rARM_SELF, offset, rARM_LR);
+ LoadWordDisp(cu, rARM_SELF, offset, rARM_LR);
return rARM_LR;
}
diff --git a/src/compiler/codegen/arm/utility_arm.cc b/src/compiler/codegen/arm/utility_arm.cc
index bfb05d5..b064135 100644
--- a/src/compiler/codegen/arm/utility_arm.cc
+++ b/src/compiler/codegen/arm/utility_arm.cc
@@ -25,42 +25,42 @@
static int EncodeImmSingle(int value)
{
int res;
- int bitA = (value & 0x80000000) >> 31;
- int notBitB = (value & 0x40000000) >> 30;
- int bitB = (value & 0x20000000) >> 29;
- int bSmear = (value & 0x3e000000) >> 25;
+ int bit_a = (value & 0x80000000) >> 31;
+ int not_bit_b = (value & 0x40000000) >> 30;
+ int bit_b = (value & 0x20000000) >> 29;
+ int b_smear = (value & 0x3e000000) >> 25;
int slice = (value & 0x01f80000) >> 19;
int zeroes = (value & 0x0007ffff);
if (zeroes != 0)
return -1;
- if (bitB) {
- if ((notBitB != 0) || (bSmear != 0x1f))
+ if (bit_b) {
+ if ((not_bit_b != 0) || (b_smear != 0x1f))
return -1;
} else {
- if ((notBitB != 1) || (bSmear != 0x0))
+ if ((not_bit_b != 1) || (b_smear != 0x0))
return -1;
}
- res = (bitA << 7) | (bitB << 6) | slice;
+ res = (bit_a << 7) | (bit_b << 6) | slice;
return res;
}
-static LIR* LoadFPConstantValue(CompilationUnit* cUnit, int rDest, int value)
+static LIR* LoadFPConstantValue(CompilationUnit* cu, int r_dest, int value)
{
- int encodedImm = EncodeImmSingle(value);
- DCHECK(ARM_SINGLEREG(rDest));
- if (encodedImm >= 0) {
- return NewLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm);
+ int encoded_imm = EncodeImmSingle(value);
+ DCHECK(ARM_SINGLEREG(r_dest));
+ if (encoded_imm >= 0) {
+ return NewLIR2(cu, kThumb2Vmovs_IMM8, r_dest, encoded_imm);
}
- LIR* dataTarget = ScanLiteralPool(cUnit->literalList, value, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->literalList, value);
+ LIR* data_target = ScanLiteralPool(cu->literal_list, value, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->literal_list, value);
}
- LIR* loadPcRel = RawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2Vldrs,
- rDest, r15pc, 0, 0, 0, dataTarget);
- SetMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = reinterpret_cast<uintptr_t>(dataTarget);
- AppendLIR(cUnit, loadPcRel);
- return loadPcRel;
+ LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrs,
+ r_dest, r15pc, 0, 0, 0, data_target);
+ SetMemRefType(load_pc_rel, true, kLiteral);
+ load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ AppendLIR(cu, load_pc_rel);
+ return load_pc_rel;
}
static int LeadingZeros(uint32_t val)
@@ -88,8 +88,8 @@
*/
int ModifiedImmediate(uint32_t value)
{
- int zLeading;
- int zTrailing;
+ int z_leading;
+ int z_trailing;
uint32_t b0 = value & 0xff;
/* Note: case of value==0 must use 0:000:0:0000000 encoding */
@@ -103,17 +103,17 @@
if (value == ((b0 << 24) | (b0 << 8)))
return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
/* Can we do it with rotation? */
- zLeading = LeadingZeros(value);
- zTrailing = 32 - LeadingZeros(~value & (value - 1));
+ z_leading = LeadingZeros(value);
+ z_trailing = 32 - LeadingZeros(~value & (value - 1));
/* A run of eight or fewer active bits? */
- if ((zLeading + zTrailing) < 24)
+ if ((z_leading + z_trailing) < 24)
return -1; /* No - bail */
/* left-justify the constant, discarding msb (known to be 1) */
- value <<= zLeading + 1;
+ value <<= z_leading + 1;
/* Create bcdefgh */
value >>= 25;
/* Put it all together */
- return value | ((0x8 + zLeading) << 7); /* [01000..11111]:bcdefgh */
+ return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
}
/*
@@ -121,75 +121,75 @@
* grab from the per-translation literal pool.
*
* No additional register clobbering operation performed. Use this version when
- * 1) rDest is freshly returned from AllocTemp or
+ * 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR* LoadConstantNoClobber(CompilationUnit* cUnit, int rDest, int value)
+LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value)
{
LIR* res;
- int modImm;
+ int mod_imm;
- if (ARM_FPREG(rDest)) {
- return LoadFPConstantValue(cUnit, rDest, value);
+ if (ARM_FPREG(r_dest)) {
+ return LoadFPConstantValue(cu, r_dest, value);
}
/* See if the value can be constructed cheaply */
- if (ARM_LOWREG(rDest) && (value >= 0) && (value <= 255)) {
- return NewLIR2(cUnit, kThumbMovImm, rDest, value);
+ if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
+ return NewLIR2(cu, kThumbMovImm, r_dest, value);
}
/* Check Modified immediate special cases */
- modImm = ModifiedImmediate(value);
- if (modImm >= 0) {
- res = NewLIR2(cUnit, kThumb2MovImmShift, rDest, modImm);
+ mod_imm = ModifiedImmediate(value);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2MovImmShift, r_dest, mod_imm);
return res;
}
- modImm = ModifiedImmediate(~value);
- if (modImm >= 0) {
- res = NewLIR2(cUnit, kThumb2MvnImm12, rDest, modImm);
+ mod_imm = ModifiedImmediate(~value);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2MvnImm12, r_dest, mod_imm);
return res;
}
/* 16-bit immediate? */
if ((value & 0xffff) == value) {
- res = NewLIR2(cUnit, kThumb2MovImm16, rDest, value);
+ res = NewLIR2(cu, kThumb2MovImm16, r_dest, value);
return res;
}
/* No shortcut - go ahead and use literal pool */
- LIR* dataTarget = ScanLiteralPool(cUnit->literalList, value, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->literalList, value);
+ LIR* data_target = ScanLiteralPool(cu->literal_list, value, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->literal_list, value);
}
- LIR* loadPcRel = RawLIR(cUnit, cUnit->currentDalvikOffset,
- kThumb2LdrPcRel12, rDest, 0, 0, 0, 0, dataTarget);
- SetMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = reinterpret_cast<uintptr_t>(dataTarget);
- res = loadPcRel;
- AppendLIR(cUnit, loadPcRel);
+ LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset,
+ kThumb2LdrPcRel12, r_dest, 0, 0, 0, 0, data_target);
+ SetMemRefType(load_pc_rel, true, kLiteral);
+ load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ res = load_pc_rel;
+ AppendLIR(cu, load_pc_rel);
/*
* To save space in the constant pool, we use the ADD_RRI8 instruction to
* add up to 255 to an existing constant value.
*/
- if (dataTarget->operands[0] != value) {
- OpRegImm(cUnit, kOpAdd, rDest, value - dataTarget->operands[0]);
+ if (data_target->operands[0] != value) {
+ OpRegImm(cu, kOpAdd, r_dest, value - data_target->operands[0]);
}
return res;
}
-LIR* OpBranchUnconditional(CompilationUnit* cUnit, OpKind op)
+LIR* OpBranchUnconditional(CompilationUnit* cu, OpKind op)
{
DCHECK_EQ(op, kOpUncondBr);
- return NewLIR1(cUnit, kThumbBUncond, 0 /* offset to be patched */);
+ return NewLIR1(cu, kThumbBUncond, 0 /* offset to be patched */);
}
-LIR* OpCondBranch(CompilationUnit* cUnit, ConditionCode cc, LIR* target)
+LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
{
- LIR* branch = NewLIR2(cUnit, kThumb2BCond, 0 /* offset to be patched */,
+ LIR* branch = NewLIR2(cu, kThumb2BCond, 0 /* offset to be patched */,
ArmConditionEncoding(cc));
branch->target = target;
return branch;
}
-LIR* OpReg(CompilationUnit* cUnit, OpKind op, int rDestSrc)
+LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src)
{
ArmOpcode opcode = kThumbBkpt;
switch (op) {
@@ -199,34 +199,34 @@
default:
LOG(FATAL) << "Bad opcode " << op;
}
- return NewLIR1(cUnit, opcode, rDestSrc);
+ return NewLIR1(cu, opcode, r_dest_src);
}
-LIR* OpRegRegShift(CompilationUnit* cUnit, OpKind op, int rDestSrc1,
- int rSrc2, int shift)
+LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1,
+ int r_src2, int shift)
{
- bool thumbForm = ((shift == 0) && ARM_LOWREG(rDestSrc1) && ARM_LOWREG(rSrc2));
+ bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdc:
- opcode = (thumbForm) ? kThumbAdcRR : kThumb2AdcRRR;
+ opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
break;
case kOpAnd:
- opcode = (thumbForm) ? kThumbAndRR : kThumb2AndRRR;
+ opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
break;
case kOpBic:
- opcode = (thumbForm) ? kThumbBicRR : kThumb2BicRRR;
+ opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
break;
case kOpCmn:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbCmnRR : kThumb2CmnRR;
+ opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
break;
case kOpCmp:
- if (thumbForm)
+ if (thumb_form)
opcode = kThumbCmpRR;
- else if ((shift == 0) && !ARM_LOWREG(rDestSrc1) && !ARM_LOWREG(rSrc2))
+ else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
opcode = kThumbCmpHH;
- else if ((shift == 0) && ARM_LOWREG(rDestSrc1))
+ else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
opcode = kThumbCmpLH;
else if (shift == 0)
opcode = kThumbCmpHL;
@@ -234,107 +234,107 @@
opcode = kThumb2CmpRR;
break;
case kOpXor:
- opcode = (thumbForm) ? kThumbEorRR : kThumb2EorRRR;
+ opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
break;
case kOpMov:
DCHECK_EQ(shift, 0);
- if (ARM_LOWREG(rDestSrc1) && ARM_LOWREG(rSrc2))
+ if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
opcode = kThumbMovRR;
- else if (!ARM_LOWREG(rDestSrc1) && !ARM_LOWREG(rSrc2))
+ else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
opcode = kThumbMovRR_H2H;
- else if (ARM_LOWREG(rDestSrc1))
+ else if (ARM_LOWREG(r_dest_src1))
opcode = kThumbMovRR_H2L;
else
opcode = kThumbMovRR_L2H;
break;
case kOpMul:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbMul : kThumb2MulRRR;
+ opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
break;
case kOpMvn:
- opcode = (thumbForm) ? kThumbMvn : kThumb2MnvRR;
+ opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
break;
case kOpNeg:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbNeg : kThumb2NegRR;
+ opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
break;
case kOpOr:
- opcode = (thumbForm) ? kThumbOrr : kThumb2OrrRRR;
+ opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
break;
case kOpSbc:
- opcode = (thumbForm) ? kThumbSbc : kThumb2SbcRRR;
+ opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
break;
case kOpTst:
- opcode = (thumbForm) ? kThumbTst : kThumb2TstRR;
+ opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
break;
case kOpLsl:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbLslRR : kThumb2LslRRR;
+ opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
break;
case kOpLsr:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbLsrRR : kThumb2LsrRRR;
+ opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
break;
case kOpAsr:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbAsrRR : kThumb2AsrRRR;
+ opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
break;
case kOpRor:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbRorRR : kThumb2RorRRR;
+ opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
break;
case kOpAdd:
- opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
+ opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
break;
case kOpSub:
- opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
+ opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
break;
case kOp2Byte:
DCHECK_EQ(shift, 0);
- return NewLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 8);
+ return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
case kOp2Short:
DCHECK_EQ(shift, 0);
- return NewLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 16);
+ return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
case kOp2Char:
DCHECK_EQ(shift, 0);
- return NewLIR4(cUnit, kThumb2Ubfx, rDestSrc1, rSrc2, 0, 16);
+ return NewLIR4(cu, kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
default:
LOG(FATAL) << "Bad opcode: " << op;
break;
}
DCHECK_GE(static_cast<int>(opcode), 0);
if (EncodingMap[opcode].flags & IS_BINARY_OP)
- return NewLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+ return NewLIR2(cu, opcode, r_dest_src1, r_src2);
else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
- if (EncodingMap[opcode].fieldLoc[2].kind == kFmtShift)
- return NewLIR3(cUnit, opcode, rDestSrc1, rSrc2, shift);
+ if (EncodingMap[opcode].field_loc[2].kind == kFmtShift)
+ return NewLIR3(cu, opcode, r_dest_src1, r_src2, shift);
else
- return NewLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2);
+ return NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, r_src2);
} else if (EncodingMap[opcode].flags & IS_QUAD_OP)
- return NewLIR4(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2, shift);
+ return NewLIR4(cu, opcode, r_dest_src1, r_dest_src1, r_src2, shift);
else {
LOG(FATAL) << "Unexpected encoding operand count";
return NULL;
}
}
-LIR* OpRegReg(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int rSrc2)
+LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2)
{
- return OpRegRegShift(cUnit, op, rDestSrc1, rSrc2, 0);
+ return OpRegRegShift(cu, op, r_dest_src1, r_src2, 0);
}
-LIR* OpRegRegRegShift(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
- int rSrc2, int shift)
+LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2, int shift)
{
ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (shift == 0) && ARM_LOWREG(rDest) && ARM_LOWREG(rSrc1) &&
- ARM_LOWREG(rSrc2);
+ bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
+ ARM_LOWREG(r_src2);
switch (op) {
case kOpAdd:
- opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
+ opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
break;
case kOpSub:
- opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
+ opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
break;
case kOpRsub:
opcode = kThumb2RsubRRR;
@@ -383,119 +383,119 @@
}
DCHECK_GE(static_cast<int>(opcode), 0);
if (EncodingMap[opcode].flags & IS_QUAD_OP)
- return NewLIR4(cUnit, opcode, rDest, rSrc1, rSrc2, shift);
+ return NewLIR4(cu, opcode, r_dest, r_src1, r_src2, shift);
else {
DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
- return NewLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
+ return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
}
}
-LIR* OpRegRegReg(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
- int rSrc2)
+LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2)
{
- return OpRegRegRegShift(cUnit, op, rDest, rSrc1, rSrc2, 0);
+ return OpRegRegRegShift(cu, op, r_dest, r_src1, r_src2, 0);
}
-LIR* OpRegRegImm(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
+LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
int value)
{
LIR* res;
bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
+ int abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
- ArmOpcode altOpcode = kThumbBkpt;
- bool allLowRegs = (ARM_LOWREG(rDest) && ARM_LOWREG(rSrc1));
- int modImm = ModifiedImmediate(value);
- int modImmNeg = ModifiedImmediate(-value);
+ ArmOpcode alt_opcode = kThumbBkpt;
+ bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
+ int mod_imm = ModifiedImmediate(value);
+ int mod_imm_neg = ModifiedImmediate(-value);
switch (op) {
case kOpLsl:
- if (allLowRegs)
- return NewLIR3(cUnit, kThumbLslRRI5, rDest, rSrc1, value);
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbLslRRI5, r_dest, r_src1, value);
else
- return NewLIR3(cUnit, kThumb2LslRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2LslRRI5, r_dest, r_src1, value);
case kOpLsr:
- if (allLowRegs)
- return NewLIR3(cUnit, kThumbLsrRRI5, rDest, rSrc1, value);
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbLsrRRI5, r_dest, r_src1, value);
else
- return NewLIR3(cUnit, kThumb2LsrRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2LsrRRI5, r_dest, r_src1, value);
case kOpAsr:
- if (allLowRegs)
- return NewLIR3(cUnit, kThumbAsrRRI5, rDest, rSrc1, value);
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbAsrRRI5, r_dest, r_src1, value);
else
- return NewLIR3(cUnit, kThumb2AsrRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2AsrRRI5, r_dest, r_src1, value);
case kOpRor:
- return NewLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2RorRRI5, r_dest, r_src1, value);
case kOpAdd:
- if (ARM_LOWREG(rDest) && (rSrc1 == r13sp) &&
+ if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
(value <= 1020) && ((value & 0x3)==0)) {
- return NewLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1, value >> 2);
- } else if (ARM_LOWREG(rDest) && (rSrc1 == r15pc) &&
+ return NewLIR3(cu, kThumbAddSpRel, r_dest, r_src1, value >> 2);
+ } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
(value <= 1020) && ((value & 0x3)==0)) {
- return NewLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1, value >> 2);
+ return NewLIR3(cu, kThumbAddPcRel, r_dest, r_src1, value >> 2);
}
// Note: intentional fallthrough
case kOpSub:
- if (allLowRegs && ((absValue & 0x7) == absValue)) {
+ if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
if (op == kOpAdd)
opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
else
opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
- return NewLIR3(cUnit, opcode, rDest, rSrc1, absValue);
- } else if ((absValue & 0xff) == absValue) {
+ return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
+ } else if ((abs_value & 0xff) == abs_value) {
if (op == kOpAdd)
opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
else
opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
- return NewLIR3(cUnit, opcode, rDest, rSrc1, absValue);
+ return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
}
- if (modImmNeg >= 0) {
+ if (mod_imm_neg >= 0) {
op = (op == kOpAdd) ? kOpSub : kOpAdd;
- modImm = modImmNeg;
+ mod_imm = mod_imm_neg;
}
if (op == kOpSub) {
opcode = kThumb2SubRRI8;
- altOpcode = kThumb2SubRRR;
+ alt_opcode = kThumb2SubRRR;
} else {
opcode = kThumb2AddRRI8;
- altOpcode = kThumb2AddRRR;
+ alt_opcode = kThumb2AddRRR;
}
break;
case kOpAdc:
opcode = kThumb2AdcRRI8;
- altOpcode = kThumb2AdcRRR;
+ alt_opcode = kThumb2AdcRRR;
break;
case kOpSbc:
opcode = kThumb2SbcRRI8;
- altOpcode = kThumb2SbcRRR;
+ alt_opcode = kThumb2SbcRRR;
break;
case kOpOr:
opcode = kThumb2OrrRRI8;
- altOpcode = kThumb2OrrRRR;
+ alt_opcode = kThumb2OrrRRR;
break;
case kOpAnd:
opcode = kThumb2AndRRI8;
- altOpcode = kThumb2AndRRR;
+ alt_opcode = kThumb2AndRRR;
break;
case kOpXor:
opcode = kThumb2EorRRI8;
- altOpcode = kThumb2EorRRR;
+ alt_opcode = kThumb2EorRRR;
break;
case kOpMul:
//TUNING: power of 2, shift & add
- modImm = -1;
- altOpcode = kThumb2MulRRR;
+ mod_imm = -1;
+ alt_opcode = kThumb2MulRRR;
break;
case kOpCmp: {
- int modImm = ModifiedImmediate(value);
+ int mod_imm = ModifiedImmediate(value);
LIR* res;
- if (modImm >= 0) {
- res = NewLIR2(cUnit, kThumb2CmpRI8, rSrc1, modImm);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2CmpRI8, r_src1, mod_imm);
} else {
- int rTmp = AllocTemp(cUnit);
- res = LoadConstant(cUnit, rTmp, value);
- OpRegReg(cUnit, kOpCmp, rSrc1, rTmp);
- FreeTemp(cUnit, rTmp);
+ int r_tmp = AllocTemp(cu);
+ res = LoadConstant(cu, r_tmp, value);
+ OpRegReg(cu, kOpCmp, r_src1, r_tmp);
+ FreeTemp(cu, r_tmp);
}
return res;
}
@@ -503,63 +503,63 @@
LOG(FATAL) << "Bad opcode: " << op;
}
- if (modImm >= 0) {
- return NewLIR3(cUnit, opcode, rDest, rSrc1, modImm);
+ if (mod_imm >= 0) {
+ return NewLIR3(cu, opcode, r_dest, r_src1, mod_imm);
} else {
- int rScratch = AllocTemp(cUnit);
- LoadConstant(cUnit, rScratch, value);
- if (EncodingMap[altOpcode].flags & IS_QUAD_OP)
- res = NewLIR4(cUnit, altOpcode, rDest, rSrc1, rScratch, 0);
+ int r_scratch = AllocTemp(cu);
+ LoadConstant(cu, r_scratch, value);
+ if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+ res = NewLIR4(cu, alt_opcode, r_dest, r_src1, r_scratch, 0);
else
- res = NewLIR3(cUnit, altOpcode, rDest, rSrc1, rScratch);
- FreeTemp(cUnit, rScratch);
+ res = NewLIR3(cu, alt_opcode, r_dest, r_src1, r_scratch);
+ FreeTemp(cu, r_scratch);
return res;
}
}
/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
-LIR* OpRegImm(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int value)
+LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value)
{
bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
- bool shortForm = (((absValue & 0xff) == absValue) && ARM_LOWREG(rDestSrc1));
+ int abs_value = (neg) ? -value : value;
+ bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdd:
- if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
+ if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
- return NewLIR1(cUnit, kThumbAddSpI7, value >> 2);
- } else if (shortForm) {
+ return NewLIR1(cu, kThumbAddSpI7, value >> 2);
+ } else if (short_form) {
opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
}
break;
case kOpSub:
- if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
+ if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
- return NewLIR1(cUnit, kThumbSubSpI7, value >> 2);
- } else if (shortForm) {
+ return NewLIR1(cu, kThumbSubSpI7, value >> 2);
+ } else if (short_form) {
opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
}
break;
case kOpCmp:
- if (ARM_LOWREG(rDestSrc1) && shortForm)
- opcode = (shortForm) ? kThumbCmpRI8 : kThumbCmpRR;
- else if (ARM_LOWREG(rDestSrc1))
+ if (ARM_LOWREG(r_dest_src1) && short_form)
+ opcode = (short_form) ? kThumbCmpRI8 : kThumbCmpRR;
+ else if (ARM_LOWREG(r_dest_src1))
opcode = kThumbCmpRR;
else {
- shortForm = false;
+ short_form = false;
opcode = kThumbCmpHL;
}
break;
default:
/* Punt to OpRegRegImm - if bad case catch it there */
- shortForm = false;
+ short_form = false;
break;
}
- if (shortForm)
- return NewLIR2(cUnit, opcode, rDestSrc1, absValue);
+ if (short_form)
+ return NewLIR2(cu, opcode, r_dest_src1, abs_value);
else {
- return OpRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
}
}
@@ -570,58 +570,58 @@
static int EncodeImmDoubleHigh(int value)
{
int res;
- int bitA = (value & 0x80000000) >> 31;
- int notBitB = (value & 0x40000000) >> 30;
- int bitB = (value & 0x20000000) >> 29;
- int bSmear = (value & 0x3fc00000) >> 22;
+ int bit_a = (value & 0x80000000) >> 31;
+ int not_bit_b = (value & 0x40000000) >> 30;
+ int bit_b = (value & 0x20000000) >> 29;
+ int b_smear = (value & 0x3fc00000) >> 22;
int slice = (value & 0x003f0000) >> 16;
int zeroes = (value & 0x0000ffff);
if (zeroes != 0)
return -1;
- if (bitB) {
- if ((notBitB != 0) || (bSmear != 0xff))
+ if (bit_b) {
+ if ((not_bit_b != 0) || (b_smear != 0xff))
return -1;
} else {
- if ((notBitB != 1) || (bSmear != 0x0))
+ if ((not_bit_b != 1) || (b_smear != 0x0))
return -1;
}
- res = (bitA << 7) | (bitB << 6) | slice;
+ res = (bit_a << 7) | (bit_b << 6) | slice;
return res;
}
-static int EncodeImmDouble(int valLo, int valHi)
+static int EncodeImmDouble(int val_lo, int val_hi)
{
int res = -1;
- if (valLo == 0)
- res = EncodeImmDoubleHigh(valHi);
+ if (val_lo == 0)
+ res = EncodeImmDoubleHigh(val_hi);
return res;
}
-LIR* LoadConstantValueWide(CompilationUnit* cUnit, int rDestLo, int rDestHi,
- int valLo, int valHi)
+LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+ int val_lo, int val_hi)
{
- int encodedImm = EncodeImmDouble(valLo, valHi);
+ int encoded_imm = EncodeImmDouble(val_lo, val_hi);
LIR* res;
- if (ARM_FPREG(rDestLo)) {
- if (encodedImm >= 0) {
- res = NewLIR2(cUnit, kThumb2Vmovd_IMM8, S2d(rDestLo, rDestHi),
- encodedImm);
+ if (ARM_FPREG(r_dest_lo)) {
+ if (encoded_imm >= 0) {
+ res = NewLIR2(cu, kThumb2Vmovd_IMM8, S2d(r_dest_lo, r_dest_hi),
+ encoded_imm);
} else {
- LIR* dataTarget = ScanLiteralPoolWide(cUnit->literalList, valLo, valHi);
- if (dataTarget == NULL) {
- dataTarget = AddWideData(cUnit, &cUnit->literalList, valLo, valHi);
+ LIR* data_target = ScanLiteralPoolWide(cu->literal_list, val_lo, val_hi);
+ if (data_target == NULL) {
+ data_target = AddWideData(cu, &cu->literal_list, val_lo, val_hi);
}
- LIR* loadPcRel =
- RawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2Vldrd,
- S2d(rDestLo, rDestHi), r15pc, 0, 0, 0, dataTarget);
- SetMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = reinterpret_cast<uintptr_t>(dataTarget);
- AppendLIR(cUnit, loadPcRel);
- res = loadPcRel;
+ LIR* load_pc_rel =
+ RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrd,
+ S2d(r_dest_lo, r_dest_hi), r15pc, 0, 0, 0, data_target);
+ SetMemRefType(load_pc_rel, true, kLiteral);
+ load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ AppendLIR(cu, load_pc_rel);
+ res = load_pc_rel;
}
} else {
- res = LoadConstantNoClobber(cUnit, rDestLo, valLo);
- LoadConstantNoClobber(cUnit, rDestHi, valHi);
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
}
return res;
}
@@ -630,24 +630,24 @@
return ((amount & 0x1f) << 2) | code;
}
-LIR* LoadBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rDest,
+LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest,
int scale, OpSize size)
{
- bool allLowRegs = ARM_LOWREG(rBase) && ARM_LOWREG(rIndex) && ARM_LOWREG(rDest);
+ bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
LIR* load;
ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (allLowRegs && (scale == 0));
- int regPtr;
+ bool thumb_form = (all_low_regs && (scale == 0));
+ int reg_ptr;
- if (ARM_FPREG(rDest)) {
- if (ARM_SINGLEREG(rDest)) {
+ if (ARM_FPREG(r_dest)) {
+ if (ARM_SINGLEREG(r_dest)) {
DCHECK((size == kWord) || (size == kSingle));
opcode = kThumb2Vldrs;
size = kSingle;
} else {
- DCHECK(ARM_DOUBLEREG(rDest));
+ DCHECK(ARM_DOUBLEREG(r_dest));
DCHECK((size == kLong) || (size == kDouble));
- DCHECK_EQ((rDest & 0x1), 0);
+ DCHECK_EQ((r_dest & 0x1), 0);
opcode = kThumb2Vldrd;
size = kDouble;
}
@@ -659,60 +659,60 @@
switch (size) {
case kDouble: // fall-through
case kSingle:
- regPtr = AllocTemp(cUnit);
+ reg_ptr = AllocTemp(cu);
if (scale) {
- NewLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
+ NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
EncodeShift(kArmLsl, scale));
} else {
- OpRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
+ OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
}
- load = NewLIR3(cUnit, opcode, rDest, regPtr, 0);
- FreeTemp(cUnit, regPtr);
+ load = NewLIR3(cu, opcode, r_dest, reg_ptr, 0);
+ FreeTemp(cu, reg_ptr);
return load;
case kWord:
- opcode = (thumbForm) ? kThumbLdrRRR : kThumb2LdrRRR;
+ opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
break;
case kUnsignedHalf:
- opcode = (thumbForm) ? kThumbLdrhRRR : kThumb2LdrhRRR;
+ opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
break;
case kSignedHalf:
- opcode = (thumbForm) ? kThumbLdrshRRR : kThumb2LdrshRRR;
+ opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
break;
case kUnsignedByte:
- opcode = (thumbForm) ? kThumbLdrbRRR : kThumb2LdrbRRR;
+ opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
break;
case kSignedByte:
- opcode = (thumbForm) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
+ opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
break;
default:
LOG(FATAL) << "Bad size: " << size;
}
- if (thumbForm)
- load = NewLIR3(cUnit, opcode, rDest, rBase, rIndex);
+ if (thumb_form)
+ load = NewLIR3(cu, opcode, r_dest, rBase, r_index);
else
- load = NewLIR4(cUnit, opcode, rDest, rBase, rIndex, scale);
+ load = NewLIR4(cu, opcode, r_dest, rBase, r_index, scale);
return load;
}
-LIR* StoreBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rSrc,
+LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src,
int scale, OpSize size)
{
- bool allLowRegs = ARM_LOWREG(rBase) && ARM_LOWREG(rIndex) && ARM_LOWREG(rSrc);
+ bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
LIR* store;
ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (allLowRegs && (scale == 0));
- int regPtr;
+ bool thumb_form = (all_low_regs && (scale == 0));
+ int reg_ptr;
- if (ARM_FPREG(rSrc)) {
- if (ARM_SINGLEREG(rSrc)) {
+ if (ARM_FPREG(r_src)) {
+ if (ARM_SINGLEREG(r_src)) {
DCHECK((size == kWord) || (size == kSingle));
opcode = kThumb2Vstrs;
size = kSingle;
} else {
- DCHECK(ARM_DOUBLEREG(rSrc));
+ DCHECK(ARM_DOUBLEREG(r_src));
DCHECK((size == kLong) || (size == kDouble));
- DCHECK_EQ((rSrc & 0x1), 0);
+ DCHECK_EQ((r_src & 0x1), 0);
opcode = kThumb2Vstrd;
size = kDouble;
}
@@ -724,136 +724,136 @@
switch (size) {
case kDouble: // fall-through
case kSingle:
- regPtr = AllocTemp(cUnit);
+ reg_ptr = AllocTemp(cu);
if (scale) {
- NewLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
+ NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
EncodeShift(kArmLsl, scale));
} else {
- OpRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
+ OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
}
- store = NewLIR3(cUnit, opcode, rSrc, regPtr, 0);
- FreeTemp(cUnit, regPtr);
+ store = NewLIR3(cu, opcode, r_src, reg_ptr, 0);
+ FreeTemp(cu, reg_ptr);
return store;
case kWord:
- opcode = (thumbForm) ? kThumbStrRRR : kThumb2StrRRR;
+ opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
break;
case kUnsignedHalf:
case kSignedHalf:
- opcode = (thumbForm) ? kThumbStrhRRR : kThumb2StrhRRR;
+ opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
break;
case kUnsignedByte:
case kSignedByte:
- opcode = (thumbForm) ? kThumbStrbRRR : kThumb2StrbRRR;
+ opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
break;
default:
LOG(FATAL) << "Bad size: " << size;
}
- if (thumbForm)
- store = NewLIR3(cUnit, opcode, rSrc, rBase, rIndex);
+ if (thumb_form)
+ store = NewLIR3(cu, opcode, r_src, rBase, r_index);
else
- store = NewLIR4(cUnit, opcode, rSrc, rBase, rIndex, scale);
+ store = NewLIR4(cu, opcode, r_src, rBase, r_index, scale);
return store;
}
/*
* Load value from base + displacement. Optionally perform null check
- * on base (which must have an associated sReg and MIR). If not
+ * on base (which must have an associated s_reg and MIR). If not
* performing null check, incoming MIR can be null.
*/
-LIR* LoadBaseDispBody(CompilationUnit* cUnit, int rBase,
- int displacement, int rDest, int rDestHi, OpSize size,
- int sReg)
+LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg)
{
LIR* res;
LIR* load;
ArmOpcode opcode = kThumbBkpt;
- bool shortForm = false;
+ bool short_form = false;
bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool allLowRegs = (ARM_LOWREG(rBase) && ARM_LOWREG(rDest));
- int encodedDisp = displacement;
+ bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
+ int encoded_disp = displacement;
bool is64bit = false;
switch (size) {
case kDouble:
case kLong:
is64bit = true;
- if (ARM_FPREG(rDest)) {
- if (ARM_SINGLEREG(rDest)) {
- DCHECK(ARM_FPREG(rDestHi));
- rDest = S2d(rDest, rDestHi);
+ if (ARM_FPREG(r_dest)) {
+ if (ARM_SINGLEREG(r_dest)) {
+ DCHECK(ARM_FPREG(r_dest_hi));
+ r_dest = S2d(r_dest, r_dest_hi);
}
opcode = kThumb2Vldrd;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
} else {
- res = LoadBaseDispBody(cUnit, rBase, displacement, rDest,
- -1, kWord, sReg);
- LoadBaseDispBody(cUnit, rBase, displacement + 4, rDestHi,
+ res = LoadBaseDispBody(cu, rBase, displacement, r_dest,
+ -1, kWord, s_reg);
+ LoadBaseDispBody(cu, rBase, displacement + 4, r_dest_hi,
-1, kWord, INVALID_SREG);
return res;
}
case kSingle:
case kWord:
- if (ARM_FPREG(rDest)) {
+ if (ARM_FPREG(r_dest)) {
opcode = kThumb2Vldrs;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
}
- if (ARM_LOWREG(rDest) && (rBase == r15pc) &&
+ if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
(displacement <= 1020) && (displacement >= 0)) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbLdrPcRel;
- } else if (ARM_LOWREG(rDest) && (rBase == r13sp) &&
+ } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
(displacement <= 1020) && (displacement >= 0)) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbLdrSpRel;
- } else if (allLowRegs && displacement < 128 && displacement >= 0) {
+ } else if (all_low_regs && displacement < 128 && displacement >= 0) {
DCHECK_EQ((displacement & 0x3), 0);
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbLdrRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrRRI12;
}
break;
case kUnsignedHalf:
- if (allLowRegs && displacement < 64 && displacement >= 0) {
+ if (all_low_regs && displacement < 64 && displacement >= 0) {
DCHECK_EQ((displacement & 0x1), 0);
- shortForm = true;
- encodedDisp >>= 1;
+ short_form = true;
+ encoded_disp >>= 1;
opcode = kThumbLdrhRRI5;
} else if (displacement < 4092 && displacement >= 0) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrhRRI12;
}
break;
case kSignedHalf:
if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrshRRI12;
}
break;
case kUnsignedByte:
- if (allLowRegs && displacement < 32 && displacement >= 0) {
- shortForm = true;
+ if (all_low_regs && displacement < 32 && displacement >= 0) {
+ short_form = true;
opcode = kThumbLdrbRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrbRRI12;
}
break;
case kSignedByte:
if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrsbRRI12;
}
break;
@@ -861,199 +861,199 @@
LOG(FATAL) << "Bad size: " << size;
}
- if (shortForm) {
- load = res = NewLIR3(cUnit, opcode, rDest, rBase, encodedDisp);
+ if (short_form) {
+ load = res = NewLIR3(cu, opcode, r_dest, rBase, encoded_disp);
} else {
- int regOffset = AllocTemp(cUnit);
- res = LoadConstant(cUnit, regOffset, encodedDisp);
- load = LoadBaseIndexed(cUnit, rBase, regOffset, rDest, 0, size);
- FreeTemp(cUnit, regOffset);
+ int reg_offset = AllocTemp(cu);
+ res = LoadConstant(cu, reg_offset, encoded_disp);
+ load = LoadBaseIndexed(cu, rBase, reg_offset, r_dest, 0, size);
+ FreeTemp(cu, reg_offset);
}
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (rBase == rARM_SP) {
- AnnotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */, is64bit);
+ AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
}
return load;
}
-LIR* LoadBaseDisp(CompilationUnit* cUnit, int rBase,
- int displacement, int rDest, OpSize size, int sReg)
+LIR* LoadBaseDisp(CompilationUnit* cu, int rBase,
+ int displacement, int r_dest, OpSize size, int s_reg)
{
- return LoadBaseDispBody(cUnit, rBase, displacement, rDest, -1, size,
- sReg);
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1, size,
+ s_reg);
}
- LIR* LoadBaseDispWide(CompilationUnit* cUnit, int rBase,
- int displacement, int rDestLo, int rDestHi, int sReg)
+ LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase,
+ int displacement, int r_dest_lo, int r_dest_hi, int s_reg)
{
- return LoadBaseDispBody(cUnit, rBase, displacement, rDestLo, rDestHi,
- kLong, sReg);
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi,
+ kLong, s_reg);
}
-LIR* StoreBaseDispBody(CompilationUnit* cUnit, int rBase, int displacement,
- int rSrc, int rSrcHi, OpSize size)
+LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement,
+ int r_src, int r_src_hi, OpSize size)
{
LIR* res, *store;
ArmOpcode opcode = kThumbBkpt;
- bool shortForm = false;
+ bool short_form = false;
bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool allLowRegs = (ARM_LOWREG(rBase) && ARM_LOWREG(rSrc));
- int encodedDisp = displacement;
+ bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
+ int encoded_disp = displacement;
bool is64bit = false;
switch (size) {
case kLong:
case kDouble:
is64bit = true;
- if (!ARM_FPREG(rSrc)) {
- res = StoreBaseDispBody(cUnit, rBase, displacement, rSrc, -1, kWord);
- StoreBaseDispBody(cUnit, rBase, displacement + 4, rSrcHi, -1, kWord);
+ if (!ARM_FPREG(r_src)) {
+ res = StoreBaseDispBody(cu, rBase, displacement, r_src, -1, kWord);
+ StoreBaseDispBody(cu, rBase, displacement + 4, r_src_hi, -1, kWord);
return res;
}
- if (ARM_SINGLEREG(rSrc)) {
- DCHECK(ARM_FPREG(rSrcHi));
- rSrc = S2d(rSrc, rSrcHi);
+ if (ARM_SINGLEREG(r_src)) {
+ DCHECK(ARM_FPREG(r_src_hi));
+ r_src = S2d(r_src, r_src_hi);
}
opcode = kThumb2Vstrd;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
case kSingle:
case kWord:
- if (ARM_FPREG(rSrc)) {
- DCHECK(ARM_SINGLEREG(rSrc));
+ if (ARM_FPREG(r_src)) {
+ DCHECK(ARM_SINGLEREG(r_src));
opcode = kThumb2Vstrs;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
}
- if (allLowRegs && displacement < 128 && displacement >= 0) {
+ if (all_low_regs && displacement < 128 && displacement >= 0) {
DCHECK_EQ((displacement & 0x3), 0);
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbStrRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2StrRRI12;
}
break;
case kUnsignedHalf:
case kSignedHalf:
- if (allLowRegs && displacement < 64 && displacement >= 0) {
+ if (all_low_regs && displacement < 64 && displacement >= 0) {
DCHECK_EQ((displacement & 0x1), 0);
- shortForm = true;
- encodedDisp >>= 1;
+ short_form = true;
+ encoded_disp >>= 1;
opcode = kThumbStrhRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2StrhRRI12;
}
break;
case kUnsignedByte:
case kSignedByte:
- if (allLowRegs && displacement < 32 && displacement >= 0) {
- shortForm = true;
+ if (all_low_regs && displacement < 32 && displacement >= 0) {
+ short_form = true;
opcode = kThumbStrbRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2StrbRRI12;
}
break;
default:
LOG(FATAL) << "Bad size: " << size;
}
- if (shortForm) {
- store = res = NewLIR3(cUnit, opcode, rSrc, rBase, encodedDisp);
+ if (short_form) {
+ store = res = NewLIR3(cu, opcode, r_src, rBase, encoded_disp);
} else {
- int rScratch = AllocTemp(cUnit);
- res = LoadConstant(cUnit, rScratch, encodedDisp);
- store = StoreBaseIndexed(cUnit, rBase, rScratch, rSrc, 0, size);
- FreeTemp(cUnit, rScratch);
+ int r_scratch = AllocTemp(cu);
+ res = LoadConstant(cu, r_scratch, encoded_disp);
+ store = StoreBaseIndexed(cu, rBase, r_scratch, r_src, 0, size);
+ FreeTemp(cu, r_scratch);
}
// TODO: In future, may need to differentiate Dalvik & spill accesses
if (rBase == rARM_SP) {
- AnnotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */,
+ AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */,
is64bit);
}
return res;
}
-LIR* StoreBaseDisp(CompilationUnit* cUnit, int rBase, int displacement,
- int rSrc, OpSize size)
+LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement,
+ int r_src, OpSize size)
{
- return StoreBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
+ return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
}
-LIR* StoreBaseDispWide(CompilationUnit* cUnit, int rBase, int displacement,
- int rSrcLo, int rSrcHi)
+LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement,
+ int r_src_lo, int r_src_hi)
{
- return StoreBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
+ return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
}
-void LoadPair(CompilationUnit* cUnit, int base, int lowReg, int highReg)
+void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg)
{
- LoadBaseDispWide(cUnit, base, 0, lowReg, highReg, INVALID_SREG);
+ LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
}
-LIR* FpRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
+LIR* FpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
{
int opcode;
- DCHECK_EQ(ARM_DOUBLEREG(rDest), ARM_DOUBLEREG(rSrc));
- if (ARM_DOUBLEREG(rDest)) {
+ DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
+ if (ARM_DOUBLEREG(r_dest)) {
opcode = kThumb2Vmovd;
} else {
- if (ARM_SINGLEREG(rDest)) {
- opcode = ARM_SINGLEREG(rSrc) ? kThumb2Vmovs : kThumb2Fmsr;
+ if (ARM_SINGLEREG(r_dest)) {
+ opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
} else {
- DCHECK(ARM_SINGLEREG(rSrc));
+ DCHECK(ARM_SINGLEREG(r_src));
opcode = kThumb2Fmrs;
}
}
- LIR* res = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
-LIR* OpThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset)
+LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
{
LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
return NULL;
}
-LIR* OpMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp)
+LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
{
LOG(FATAL) << "Unexpected use of OpMem for Arm";
return NULL;
}
-LIR* StoreBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rSrc, int rSrcHi,
- OpSize size, int sReg)
+LIR* StoreBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_src, int r_src_hi,
+ OpSize size, int s_reg)
{
LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
return NULL;
}
-LIR* OpRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase,
+LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
int offset)
{
LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
return NULL;
}
-LIR* LoadBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rDest, int rDestHi,
- OpSize size, int sReg)
+LIR* LoadBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_dest, int r_dest_hi,
+ OpSize size, int s_reg)
{
LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
return NULL;
diff --git a/src/compiler/codegen/codegen_util.cc b/src/compiler/codegen/codegen_util.cc
index 5082185..9373291 100644
--- a/src/compiler/codegen/codegen_util.cc
+++ b/src/compiler/codegen/codegen_util.cc
@@ -26,74 +26,74 @@
/* Convert an instruction to a NOP */
void NopLIR( LIR* lir)
{
- lir->flags.isNop = true;
+ lir->flags.is_nop = true;
}
-void SetMemRefType(LIR* lir, bool isLoad, int memType)
+void SetMemRefType(LIR* lir, bool is_load, int mem_type)
{
- uint64_t *maskPtr;
+ uint64_t *mask_ptr;
uint64_t mask = ENCODE_MEM;;
DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
- if (isLoad) {
- maskPtr = &lir->useMask;
+ if (is_load) {
+ mask_ptr = &lir->use_mask;
} else {
- maskPtr = &lir->defMask;
+ mask_ptr = &lir->def_mask;
}
/* Clear out the memref flags */
- *maskPtr &= ~mask;
+ *mask_ptr &= ~mask;
/* ..and then add back the one we need */
- switch (memType) {
+ switch (mem_type) {
case kLiteral:
- DCHECK(isLoad);
- *maskPtr |= ENCODE_LITERAL;
+ DCHECK(is_load);
+ *mask_ptr |= ENCODE_LITERAL;
break;
case kDalvikReg:
- *maskPtr |= ENCODE_DALVIK_REG;
+ *mask_ptr |= ENCODE_DALVIK_REG;
break;
case kHeapRef:
- *maskPtr |= ENCODE_HEAP_REF;
+ *mask_ptr |= ENCODE_HEAP_REF;
break;
case kMustNotAlias:
/* Currently only loads can be marked as kMustNotAlias */
DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
- *maskPtr |= ENCODE_MUST_NOT_ALIAS;
+ *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
break;
default:
- LOG(FATAL) << "Oat: invalid memref kind - " << memType;
+ LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
}
}
/*
* Mark load/store instructions that access Dalvik registers through the stack.
*/
-void AnnotateDalvikRegAccess(LIR* lir, int regId, bool isLoad, bool is64bit)
+void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit)
{
- SetMemRefType(lir, isLoad, kDalvikReg);
+ SetMemRefType(lir, is_load, kDalvikReg);
/*
- * Store the Dalvik register id in aliasInfo. Mark the MSB if it is a 64-bit
+ * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
* access.
*/
- lir->aliasInfo = ENCODE_ALIAS_INFO(regId, is64bit);
+ lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
}
/*
* Mark the corresponding bit(s).
*/
-void SetupRegMask(CompilationUnit* cUnit, uint64_t* mask, int reg)
+void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg)
{
- *mask |= GetRegMaskCommon(cUnit, reg);
+ *mask |= GetRegMaskCommon(cu, reg);
}
/*
* Set up the proper fields in the resource mask
*/
-void SetupResourceMasks(CompilationUnit* cUnit, LIR* lir)
+void SetupResourceMasks(CompilationUnit* cu, LIR* lir)
{
int opcode = lir->opcode;
if (opcode <= 0) {
- lir->useMask = lir->defMask = 0;
+ lir->use_mask = lir->def_mask = 0;
return;
}
@@ -117,21 +117,21 @@
* turn will trash everything.
*/
if (flags & IS_BRANCH) {
- lir->defMask = lir->useMask = ENCODE_ALL;
+ lir->def_mask = lir->use_mask = ENCODE_ALL;
return;
}
if (flags & REG_DEF0) {
- SetupRegMask(cUnit, &lir->defMask, lir->operands[0]);
+ SetupRegMask(cu, &lir->def_mask, lir->operands[0]);
}
if (flags & REG_DEF1) {
- SetupRegMask(cUnit, &lir->defMask, lir->operands[1]);
+ SetupRegMask(cu, &lir->def_mask, lir->operands[1]);
}
if (flags & SETS_CCODES) {
- lir->defMask |= ENCODE_CCODE;
+ lir->def_mask |= ENCODE_CCODE;
}
if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
@@ -139,17 +139,17 @@
for (i = 0; i < 4; i++) {
if (flags & (1 << (kRegUse0 + i))) {
- SetupRegMask(cUnit, &lir->useMask, lir->operands[i]);
+ SetupRegMask(cu, &lir->use_mask, lir->operands[i]);
}
}
}
if (flags & USES_CCODES) {
- lir->useMask |= ENCODE_CCODE;
+ lir->use_mask |= ENCODE_CCODE;
}
// Handle target-specific actions
- SetupTargetResourceMasks(cUnit, lir);
+ SetupTargetResourceMasks(cu, lir);
}
/*
@@ -159,17 +159,17 @@
#define DUMP_SSA_REP(X)
/* Pretty-print a LIR instruction */
-void DumpLIRInsn(CompilationUnit* cUnit, LIR* lir, unsigned char* baseAddr)
+void DumpLIRInsn(CompilationUnit* cu, LIR* lir, unsigned char* base_addr)
{
int offset = lir->offset;
int dest = lir->operands[0];
- const bool dumpNop = (cUnit->enableDebug & (1 << kDebugShowNops));
+ const bool dump_nop = (cu->enable_debug & (1 << kDebugShowNops));
/* Handle pseudo-ops individually, and all regular insns as a group */
switch (lir->opcode) {
case kPseudoMethodEntry:
LOG(INFO) << "-------- method entry "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
break;
case kPseudoMethodExit:
LOG(INFO) << "-------- Method_Exit";
@@ -188,13 +188,13 @@
break;
case kPseudoDalvikByteCodeBoundary:
LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
- << lir->dalvikOffset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
+ << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
break;
case kPseudoExitBlock:
LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
break;
case kPseudoPseudoAlign4:
- LOG(INFO) << reinterpret_cast<uintptr_t>(baseAddr) + offset << " (0x" << std::hex
+ LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
<< offset << "): .align4";
break;
case kPseudoEHBlockLabel:
@@ -214,10 +214,10 @@
LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
break;
case kPseudoSafepointPC:
- LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvikOffset << ":";
+ LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
break;
case kPseudoExportedPC:
- LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvikOffset << ":";
+ LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
break;
case kPseudoCaseLabel:
LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
@@ -225,52 +225,52 @@
lir->operands[0];
break;
default:
- if (lir->flags.isNop && !dumpNop) {
+ if (lir->flags.is_nop && !dump_nop) {
break;
} else {
std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
- lir, baseAddr));
+ lir, base_addr));
std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
- lir, baseAddr));
+ lir, base_addr));
LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
- reinterpret_cast<unsigned int>(baseAddr + offset),
+ reinterpret_cast<unsigned int>(base_addr + offset),
op_name.c_str(), op_operands.c_str(),
- lir->flags.isNop ? "(nop)" : "");
+ lir->flags.is_nop ? "(nop)" : "");
}
break;
}
- if (lir->useMask && (!lir->flags.isNop || dumpNop)) {
- DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->useMask, "use"));
+ if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use"));
}
- if (lir->defMask && (!lir->flags.isNop || dumpNop)) {
- DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->defMask, "def"));
+ if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def"));
}
}
-void DumpPromotionMap(CompilationUnit *cUnit)
+void DumpPromotionMap(CompilationUnit *cu)
{
- int numRegs = cUnit->numDalvikRegisters + cUnit->numCompilerTemps + 1;
- for (int i = 0; i < numRegs; i++) {
- PromotionMap vRegMap = cUnit->promotionMap[i];
+ int num_regs = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
+ for (int i = 0; i < num_regs; i++) {
+ PromotionMap v_reg_map = cu->promotion_map[i];
std::string buf;
- if (vRegMap.fpLocation == kLocPhysReg) {
- StringAppendF(&buf, " : s%d", vRegMap.FpReg & FpRegMask());
+ if (v_reg_map.fp_location == kLocPhysReg) {
+ StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
}
std::string buf3;
- if (i < cUnit->numDalvikRegisters) {
+ if (i < cu->num_dalvik_registers) {
StringAppendF(&buf3, "%02d", i);
- } else if (i == cUnit->methodSReg) {
+ } else if (i == cu->method_sreg) {
buf3 = "Method*";
} else {
- StringAppendF(&buf3, "ct%d", i - cUnit->numDalvikRegisters);
+ StringAppendF(&buf3, "ct%d", i - cu->num_dalvik_registers);
}
LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
- vRegMap.coreLocation == kLocPhysReg ?
- "r" : "SP+", vRegMap.coreLocation == kLocPhysReg ?
- vRegMap.coreReg : SRegOffset(cUnit, i),
+ v_reg_map.core_location == kLocPhysReg ?
+ "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
+ v_reg_map.core_reg : SRegOffset(cu, i),
buf.c_str());
}
}
@@ -293,50 +293,50 @@
}
/* Dump instructions and constant pool contents */
-void CodegenDump(CompilationUnit* cUnit)
+void CodegenDump(CompilationUnit* cu)
{
LOG(INFO) << "Dumping LIR insns for "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- LIR* lirInsn;
- int insnsSize = cUnit->insnsSize;
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LIR* lir_insn;
+ int insns_size = cu->insns_size;
- LOG(INFO) << "Regs (excluding ins) : " << cUnit->numRegs;
- LOG(INFO) << "Ins : " << cUnit->numIns;
- LOG(INFO) << "Outs : " << cUnit->numOuts;
- LOG(INFO) << "CoreSpills : " << cUnit->numCoreSpills;
- LOG(INFO) << "FPSpills : " << cUnit->numFPSpills;
- LOG(INFO) << "CompilerTemps : " << cUnit->numCompilerTemps;
- LOG(INFO) << "Frame size : " << cUnit->frameSize;
- LOG(INFO) << "code size is " << cUnit->totalSize <<
- " bytes, Dalvik size is " << insnsSize * 2;
+ LOG(INFO) << "Regs (excluding ins) : " << cu->num_regs;
+ LOG(INFO) << "Ins : " << cu->num_ins;
+ LOG(INFO) << "Outs : " << cu->num_outs;
+ LOG(INFO) << "CoreSpills : " << cu->num_core_spills;
+ LOG(INFO) << "FPSpills : " << cu->num_fp_spills;
+ LOG(INFO) << "CompilerTemps : " << cu->num_compiler_temps;
+ LOG(INFO) << "Frame size : " << cu->frame_size;
+ LOG(INFO) << "code size is " << cu->total_size <<
+ " bytes, Dalvik size is " << insns_size * 2;
LOG(INFO) << "expansion factor: "
- << static_cast<float>(cUnit->totalSize) / static_cast<float>(insnsSize * 2);
- DumpPromotionMap(cUnit);
- for (lirInsn = cUnit->firstLIRInsn; lirInsn; lirInsn = lirInsn->next) {
- DumpLIRInsn(cUnit, lirInsn, 0);
+ << static_cast<float>(cu->total_size) / static_cast<float>(insns_size * 2);
+ DumpPromotionMap(cu);
+ for (lir_insn = cu->first_lir_insn; lir_insn; lir_insn = lir_insn->next) {
+ DumpLIRInsn(cu, lir_insn, 0);
}
- for (lirInsn = cUnit->literalList; lirInsn; lirInsn = lirInsn->next) {
- LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lirInsn->offset, lirInsn->offset,
- lirInsn->operands[0]);
+ for (lir_insn = cu->literal_list; lir_insn; lir_insn = lir_insn->next) {
+ LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
+ lir_insn->operands[0]);
}
const DexFile::MethodId& method_id =
- cUnit->dex_file->GetMethodId(cUnit->method_idx);
- std::string signature(cUnit->dex_file->GetMethodSignature(method_id));
- std::string name(cUnit->dex_file->GetMethodName(method_id));
- std::string descriptor(cUnit->dex_file->GetMethodDeclaringClassDescriptor(method_id));
+ cu->dex_file->GetMethodId(cu->method_idx);
+ std::string signature(cu->dex_file->GetMethodSignature(method_id));
+ std::string name(cu->dex_file->GetMethodName(method_id));
+ std::string descriptor(cu->dex_file->GetMethodDeclaringClassDescriptor(method_id));
// Dump mapping tables
- DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, cUnit->pc2dexMappingTable);
- DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, cUnit->dex2pcMappingTable);
+ DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, cu->pc2dexMappingTable);
+ DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, cu->dex2pcMappingTable);
}
-LIR* RawLIR(CompilationUnit* cUnit, int dalvikOffset, int opcode, int op0,
+LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0,
int op1, int op2, int op3, int op4, LIR* target)
{
- LIR* insn = static_cast<LIR*>(NewMem(cUnit, sizeof(LIR), true, kAllocLIR));
- insn->dalvikOffset = dalvikOffset;
+ LIR* insn = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ insn->dalvik_offset = dalvik_offset;
insn->opcode = opcode;
insn->operands[0] = op0;
insn->operands[1] = op1;
@@ -344,11 +344,11 @@
insn->operands[3] = op3;
insn->operands[4] = op4;
insn->target = target;
- SetupResourceMasks(cUnit, insn);
+ SetupResourceMasks(cu, insn);
if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
(opcode == kPseudoExportedPC)) {
// Always make labels scheduling barriers
- insn->useMask = insn->defMask = ENCODE_ALL;
+ insn->use_mask = insn->def_mask = ENCODE_ALL;
}
return insn;
}
@@ -357,74 +357,74 @@
* The following are building blocks to construct low-level IRs with 0 - 4
* operands.
*/
-LIR* NewLIR0(CompilationUnit* cUnit, int opcode)
+LIR* NewLIR0(CompilationUnit* cu, int opcode)
{
- DCHECK(isPseudoOpcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
<< GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode);
- AppendLIR(cUnit, insn);
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode);
+ AppendLIR(cu, insn);
return insn;
}
-LIR* NewLIR1(CompilationUnit* cUnit, int opcode,
+LIR* NewLIR1(CompilationUnit* cu, int opcode,
int dest)
{
- DCHECK(isPseudoOpcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
<< GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest);
- AppendLIR(cUnit, insn);
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest);
+ AppendLIR(cu, insn);
return insn;
}
-LIR* NewLIR2(CompilationUnit* cUnit, int opcode,
+LIR* NewLIR2(CompilationUnit* cu, int opcode,
int dest, int src1)
{
- DCHECK(isPseudoOpcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
<< GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1);
- AppendLIR(cUnit, insn);
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1);
+ AppendLIR(cu, insn);
return insn;
}
-LIR* NewLIR3(CompilationUnit* cUnit, int opcode,
+LIR* NewLIR3(CompilationUnit* cu, int opcode,
int dest, int src1, int src2)
{
- DCHECK(isPseudoOpcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
<< GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1, src2);
- AppendLIR(cUnit, insn);
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2);
+ AppendLIR(cu, insn);
return insn;
}
-LIR* NewLIR4(CompilationUnit* cUnit, int opcode,
+LIR* NewLIR4(CompilationUnit* cu, int opcode,
int dest, int src1, int src2, int info)
{
- DCHECK(isPseudoOpcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
<< GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1, src2, info);
- AppendLIR(cUnit, insn);
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info);
+ AppendLIR(cu, insn);
return insn;
}
-LIR* NewLIR5(CompilationUnit* cUnit, int opcode,
+LIR* NewLIR5(CompilationUnit* cu, int opcode,
int dest, int src1, int src2, int info1, int info2)
{
- DCHECK(isPseudoOpcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
<< GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1, src2, info1, info2);
- AppendLIR(cUnit, insn);
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info1, info2);
+ AppendLIR(cu, insn);
return insn;
}
@@ -432,31 +432,31 @@
* Search the existing constants in the literal pool for an exact or close match
* within specified delta (greater or equal to 0).
*/
-LIR* ScanLiteralPool(LIR* dataTarget, int value, unsigned int delta)
+LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta)
{
- while (dataTarget) {
- if ((static_cast<unsigned>(value - dataTarget->operands[0])) <= delta)
- return dataTarget;
- dataTarget = dataTarget->next;
+ while (data_target) {
+ if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
+ return data_target;
+ data_target = data_target->next;
}
return NULL;
}
/* Search the existing constants in the literal pool for an exact wide match */
-LIR* ScanLiteralPoolWide(LIR* dataTarget, int valLo, int valHi)
+LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi)
{
- bool loMatch = false;
- LIR* loTarget = NULL;
- while (dataTarget) {
- if (loMatch && (dataTarget->operands[0] == valHi)) {
- return loTarget;
+ bool lo_match = false;
+ LIR* lo_target = NULL;
+ while (data_target) {
+ if (lo_match && (data_target->operands[0] == val_hi)) {
+ return lo_target;
}
- loMatch = false;
- if (dataTarget->operands[0] == valLo) {
- loMatch = true;
- loTarget = dataTarget;
+ lo_match = false;
+ if (data_target->operands[0] == val_lo) {
+ lo_match = true;
+ lo_target = data_target;
}
- dataTarget = dataTarget->next;
+ data_target = data_target->next;
}
return NULL;
}
@@ -467,25 +467,25 @@
*/
/* Add a 32-bit constant either in the constant pool */
-LIR* AddWordData(CompilationUnit* cUnit, LIR* *constantListP, int value)
+LIR* AddWordData(CompilationUnit* cu, LIR* *constant_list_p, int value)
{
/* Add the constant to the literal pool */
- if (constantListP) {
- LIR* newValue = static_cast<LIR*>(NewMem(cUnit, sizeof(LIR), true, kAllocData));
- newValue->operands[0] = value;
- newValue->next = *constantListP;
- *constantListP = newValue;
- return newValue;
+ if (constant_list_p) {
+ LIR* new_value = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocData));
+ new_value->operands[0] = value;
+ new_value->next = *constant_list_p;
+ *constant_list_p = new_value;
+ return new_value;
}
return NULL;
}
/* Add a 64-bit constant to the constant pool or mixed with code */
-LIR* AddWideData(CompilationUnit* cUnit, LIR* *constantListP,
- int valLo, int valHi)
+LIR* AddWideData(CompilationUnit* cu, LIR* *constant_list_p,
+ int val_lo, int val_hi)
{
- AddWordData(cUnit, constantListP, valHi);
- return AddWordData(cUnit, constantListP, valLo);
+ AddWordData(cu, constant_list_p, val_hi);
+ return AddWordData(cu, constant_list_p, val_lo);
}
static void PushWord(std::vector<uint8_t>&buf, int data) {
@@ -502,119 +502,119 @@
}
/* Write the literal pool to the output stream */
-static void InstallLiteralPools(CompilationUnit* cUnit)
+static void InstallLiteralPools(CompilationUnit* cu)
{
- AlignBuffer(cUnit->codeBuffer, cUnit->dataOffset);
- LIR* dataLIR = cUnit->literalList;
- while (dataLIR != NULL) {
- PushWord(cUnit->codeBuffer, dataLIR->operands[0]);
- dataLIR = NEXT_LIR(dataLIR);
+ AlignBuffer(cu->code_buffer, cu->data_offset);
+ LIR* data_lir = cu->literal_list;
+ while (data_lir != NULL) {
+ PushWord(cu->code_buffer, data_lir->operands[0]);
+ data_lir = NEXT_LIR(data_lir);
}
// Push code and method literals, record offsets for the compiler to patch.
- dataLIR = cUnit->codeLiteralList;
- while (dataLIR != NULL) {
- uint32_t target = dataLIR->operands[0];
- cUnit->compiler->AddCodePatch(cUnit->dex_file,
- cUnit->method_idx,
- cUnit->invoke_type,
+ data_lir = cu->code_literal_list;
+ while (data_lir != NULL) {
+ uint32_t target = data_lir->operands[0];
+ cu->compiler->AddCodePatch(cu->dex_file,
+ cu->method_idx,
+ cu->invoke_type,
target,
- static_cast<InvokeType>(dataLIR->operands[1]),
- cUnit->codeBuffer.size());
- const DexFile::MethodId& id = cUnit->dex_file->GetMethodId(target);
+ static_cast<InvokeType>(data_lir->operands[1]),
+ cu->code_buffer.size());
+ const DexFile::MethodId& id = cu->dex_file->GetMethodId(target);
// unique based on target to ensure code deduplication works
uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- PushWord(cUnit->codeBuffer, unique_patch_value);
- dataLIR = NEXT_LIR(dataLIR);
+ PushWord(cu->code_buffer, unique_patch_value);
+ data_lir = NEXT_LIR(data_lir);
}
- dataLIR = cUnit->methodLiteralList;
- while (dataLIR != NULL) {
- uint32_t target = dataLIR->operands[0];
- cUnit->compiler->AddMethodPatch(cUnit->dex_file,
- cUnit->method_idx,
- cUnit->invoke_type,
+ data_lir = cu->method_literal_list;
+ while (data_lir != NULL) {
+ uint32_t target = data_lir->operands[0];
+ cu->compiler->AddMethodPatch(cu->dex_file,
+ cu->method_idx,
+ cu->invoke_type,
target,
- static_cast<InvokeType>(dataLIR->operands[1]),
- cUnit->codeBuffer.size());
- const DexFile::MethodId& id = cUnit->dex_file->GetMethodId(target);
+ static_cast<InvokeType>(data_lir->operands[1]),
+ cu->code_buffer.size());
+ const DexFile::MethodId& id = cu->dex_file->GetMethodId(target);
// unique based on target to ensure code deduplication works
uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- PushWord(cUnit->codeBuffer, unique_patch_value);
- dataLIR = NEXT_LIR(dataLIR);
+ PushWord(cu->code_buffer, unique_patch_value);
+ data_lir = NEXT_LIR(data_lir);
}
}
/* Write the switch tables to the output stream */
-static void InstallSwitchTables(CompilationUnit* cUnit)
+static void InstallSwitchTables(CompilationUnit* cu)
{
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->switchTables, &iterator);
+ GrowableListIteratorInit(&cu->switch_tables, &iterator);
while (true) {
- SwitchTable* tabRec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext( &iterator));
- if (tabRec == NULL) break;
- AlignBuffer(cUnit->codeBuffer, tabRec->offset);
+ SwitchTable* tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext( &iterator));
+ if (tab_rec == NULL) break;
+ AlignBuffer(cu->code_buffer, tab_rec->offset);
/*
* For Arm, our reference point is the address of the bx
* instruction that does the launch, so we have to subtract
* the auto pc-advance. For other targets the reference point
* is a label, so we can use the offset as-is.
*/
- int bxOffset = INVALID_OFFSET;
- switch (cUnit->instructionSet) {
+ int bx_offset = INVALID_OFFSET;
+ switch (cu->instruction_set) {
case kThumb2:
- bxOffset = tabRec->anchor->offset + 4;
+ bx_offset = tab_rec->anchor->offset + 4;
break;
case kX86:
- bxOffset = 0;
+ bx_offset = 0;
break;
case kMips:
- bxOffset = tabRec->anchor->offset;
+ bx_offset = tab_rec->anchor->offset;
break;
- default: LOG(FATAL) << "Unexpected instruction set: " << cUnit->instructionSet;
+ default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
}
- if (cUnit->printMe) {
- LOG(INFO) << "Switch table for offset 0x" << std::hex << bxOffset;
+ if (cu->verbose) {
+ LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
}
- if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
- const int* keys = reinterpret_cast<const int*>(&(tabRec->table[2]));
- for (int elems = 0; elems < tabRec->table[1]; elems++) {
- int disp = tabRec->targets[elems]->offset - bxOffset;
- if (cUnit->printMe) {
+ if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+ const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
+ for (int elems = 0; elems < tab_rec->table[1]; elems++) {
+ int disp = tab_rec->targets[elems]->offset - bx_offset;
+ if (cu->verbose) {
LOG(INFO) << " Case[" << elems << "] key: 0x"
<< std::hex << keys[elems] << ", disp: 0x"
<< std::hex << disp;
}
- PushWord(cUnit->codeBuffer, keys[elems]);
- PushWord(cUnit->codeBuffer,
- tabRec->targets[elems]->offset - bxOffset);
+ PushWord(cu->code_buffer, keys[elems]);
+ PushWord(cu->code_buffer,
+ tab_rec->targets[elems]->offset - bx_offset);
}
} else {
- DCHECK_EQ(static_cast<int>(tabRec->table[0]),
+ DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
static_cast<int>(Instruction::kPackedSwitchSignature));
- for (int elems = 0; elems < tabRec->table[1]; elems++) {
- int disp = tabRec->targets[elems]->offset - bxOffset;
- if (cUnit->printMe) {
+ for (int elems = 0; elems < tab_rec->table[1]; elems++) {
+ int disp = tab_rec->targets[elems]->offset - bx_offset;
+ if (cu->verbose) {
LOG(INFO) << " Case[" << elems << "] disp: 0x"
<< std::hex << disp;
}
- PushWord(cUnit->codeBuffer, tabRec->targets[elems]->offset - bxOffset);
+ PushWord(cu->code_buffer, tab_rec->targets[elems]->offset - bx_offset);
}
}
}
}
/* Write the fill array dta to the output stream */
-static void InstallFillArrayData(CompilationUnit* cUnit)
+static void InstallFillArrayData(CompilationUnit* cu)
{
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->fillArrayData, &iterator);
+ GrowableListIteratorInit(&cu->fill_array_data, &iterator);
while (true) {
- FillArrayData *tabRec =
+ FillArrayData *tab_rec =
reinterpret_cast<FillArrayData*>(GrowableListIteratorNext( &iterator));
- if (tabRec == NULL) break;
- AlignBuffer(cUnit->codeBuffer, tabRec->offset);
- for (int i = 0; i < (tabRec->size + 1) / 2; i++) {
- cUnit->codeBuffer.push_back( tabRec->table[i] & 0xFF);
- cUnit->codeBuffer.push_back( (tabRec->table[i] >> 8) & 0xFF);
+ if (tab_rec == NULL) break;
+ AlignBuffer(cu->code_buffer, tab_rec->offset);
+ for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
+ cu->code_buffer.push_back( tab_rec->table[i] & 0xFF);
+ cu->code_buffer.push_back( (tab_rec->table[i] >> 8) & 0xFF);
}
}
}
@@ -629,61 +629,61 @@
}
// Make sure we have a code address for every declared catch entry
-static bool VerifyCatchEntries(CompilationUnit* cUnit)
+static bool VerifyCatchEntries(CompilationUnit* cu)
{
bool success = true;
- for (std::set<uint32_t>::const_iterator it = cUnit->catches.begin(); it != cUnit->catches.end(); ++it) {
- uint32_t dexPc = *it;
+ for (std::set<uint32_t>::const_iterator it = cu->catches.begin(); it != cu->catches.end(); ++it) {
+ uint32_t dex_pc = *it;
bool found = false;
- for (size_t i = 0; i < cUnit->dex2pcMappingTable.size(); i += 2) {
- if (dexPc == cUnit->dex2pcMappingTable[i+1]) {
+ for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
+ if (dex_pc == cu->dex2pcMappingTable[i+1]) {
found = true;
break;
}
}
if (!found) {
- LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dexPc;
+ LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
success = false;
}
}
// Now, try in the other direction
- for (size_t i = 0; i < cUnit->dex2pcMappingTable.size(); i += 2) {
- uint32_t dexPc = cUnit->dex2pcMappingTable[i+1];
- if (cUnit->catches.find(dexPc) == cUnit->catches.end()) {
- LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dexPc;
+ for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
+ uint32_t dex_pc = cu->dex2pcMappingTable[i+1];
+ if (cu->catches.find(dex_pc) == cu->catches.end()) {
+ LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
success = false;
}
}
if (!success) {
- LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- LOG(INFO) << "Entries @ decode: " << cUnit->catches.size() << ", Entries in table: "
- << cUnit->dex2pcMappingTable.size()/2;
+ LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(INFO) << "Entries @ decode: " << cu->catches.size() << ", Entries in table: "
+ << cu->dex2pcMappingTable.size()/2;
}
return success;
}
-static void CreateMappingTables(CompilationUnit* cUnit)
+static void CreateMappingTables(CompilationUnit* cu)
{
- for (LIR* tgtLIR = cUnit->firstLIRInsn; tgtLIR != NULL; tgtLIR = NEXT_LIR(tgtLIR)) {
- if (!tgtLIR->flags.isNop && (tgtLIR->opcode == kPseudoSafepointPC)) {
- cUnit->pc2dexMappingTable.push_back(tgtLIR->offset);
- cUnit->pc2dexMappingTable.push_back(tgtLIR->dalvikOffset);
+ for (LIR* tgt_lir = cu->first_lir_insn; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
+ cu->pc2dexMappingTable.push_back(tgt_lir->offset);
+ cu->pc2dexMappingTable.push_back(tgt_lir->dalvik_offset);
}
- if (!tgtLIR->flags.isNop && (tgtLIR->opcode == kPseudoExportedPC)) {
- cUnit->dex2pcMappingTable.push_back(tgtLIR->offset);
- cUnit->dex2pcMappingTable.push_back(tgtLIR->dalvikOffset);
+ if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
+ cu->dex2pcMappingTable.push_back(tgt_lir->offset);
+ cu->dex2pcMappingTable.push_back(tgt_lir->dalvik_offset);
}
}
- DCHECK(VerifyCatchEntries(cUnit));
- cUnit->combinedMappingTable.push_back(cUnit->pc2dexMappingTable.size() +
- cUnit->dex2pcMappingTable.size());
- cUnit->combinedMappingTable.push_back(cUnit->pc2dexMappingTable.size());
- cUnit->combinedMappingTable.insert(cUnit->combinedMappingTable.end(),
- cUnit->pc2dexMappingTable.begin(),
- cUnit->pc2dexMappingTable.end());
- cUnit->combinedMappingTable.insert(cUnit->combinedMappingTable.end(),
- cUnit->dex2pcMappingTable.begin(),
- cUnit->dex2pcMappingTable.end());
+ DCHECK(VerifyCatchEntries(cu));
+ cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size() +
+ cu->dex2pcMappingTable.size());
+ cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size());
+ cu->combined_mapping_table.insert(cu->combined_mapping_table.end(),
+ cu->pc2dexMappingTable.begin(),
+ cu->pc2dexMappingTable.end());
+ cu->combined_mapping_table.insert(cu->combined_mapping_table.end(),
+ cu->dex2pcMappingTable.begin(),
+ cu->dex2pcMappingTable.end());
}
class NativePcToReferenceMapBuilder {
@@ -764,8 +764,8 @@
std::vector<uint8_t>* const table_;
};
-static void CreateNativeGcMap(CompilationUnit* cUnit) {
- const std::vector<uint32_t>& mapping_table = cUnit->pc2dexMappingTable;
+static void CreateNativeGcMap(CompilationUnit* cu) {
+ const std::vector<uint32_t>& mapping_table = cu->pc2dexMappingTable;
uint32_t max_native_offset = 0;
for (size_t i = 0; i < mapping_table.size(); i += 2) {
uint32_t native_offset = mapping_table[i + 0];
@@ -773,11 +773,11 @@
max_native_offset = native_offset;
}
}
- Compiler::MethodReference method_ref(cUnit->dex_file, cUnit->method_idx);
+ Compiler::MethodReference method_ref(cu->dex_file, cu->method_idx);
const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
// Compute native offset to references size.
- NativePcToReferenceMapBuilder native_gc_map_builder(&cUnit->nativeGcMap,
+ NativePcToReferenceMapBuilder native_gc_map_builder(&cu->native_gc_map,
mapping_table.size() / 2, max_native_offset,
dex_gc_map.RegWidth());
@@ -791,43 +791,43 @@
}
/* Determine the offset of each literal field */
-static int AssignLiteralOffset(CompilationUnit* cUnit, int offset)
+static int AssignLiteralOffset(CompilationUnit* cu, int offset)
{
- offset = AssignLiteralOffsetCommon(cUnit->literalList, offset);
- offset = AssignLiteralOffsetCommon(cUnit->codeLiteralList, offset);
- offset = AssignLiteralOffsetCommon(cUnit->methodLiteralList, offset);
+ offset = AssignLiteralOffsetCommon(cu->literal_list, offset);
+ offset = AssignLiteralOffsetCommon(cu->code_literal_list, offset);
+ offset = AssignLiteralOffsetCommon(cu->method_literal_list, offset);
return offset;
}
-static int AssignSwitchTablesOffset(CompilationUnit* cUnit, int offset)
+static int AssignSwitchTablesOffset(CompilationUnit* cu, int offset)
{
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->switchTables, &iterator);
+ GrowableListIteratorInit(&cu->switch_tables, &iterator);
while (true) {
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
- if (tabRec == NULL) break;
- tabRec->offset = offset;
- if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
- offset += tabRec->table[1] * (sizeof(int) * 2);
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
+ if (tab_rec == NULL) break;
+ tab_rec->offset = offset;
+ if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+ offset += tab_rec->table[1] * (sizeof(int) * 2);
} else {
- DCHECK_EQ(static_cast<int>(tabRec->table[0]),
+ DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
static_cast<int>(Instruction::kPackedSwitchSignature));
- offset += tabRec->table[1] * sizeof(int);
+ offset += tab_rec->table[1] * sizeof(int);
}
}
return offset;
}
-static int AssignFillArrayDataOffset(CompilationUnit* cUnit, int offset)
+static int AssignFillArrayDataOffset(CompilationUnit* cu, int offset)
{
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->fillArrayData, &iterator);
+ GrowableListIteratorInit(&cu->fill_array_data, &iterator);
while (true) {
- FillArrayData *tabRec =
+ FillArrayData *tab_rec =
reinterpret_cast<FillArrayData*>(GrowableListIteratorNext(&iterator));
- if (tabRec == NULL) break;
- tabRec->offset = offset;
- offset += tabRec->size;
+ if (tab_rec == NULL) break;
+ tab_rec->offset = offset;
+ offset += tab_rec->size;
// word align
offset = (offset + 3) & ~3;
}
@@ -838,23 +838,23 @@
* Walk the compilation unit and assign offsets to instructions
* and literals and compute the total size of the compiled unit.
*/
-static void AssignOffsets(CompilationUnit* cUnit)
+static void AssignOffsets(CompilationUnit* cu)
{
- int offset = AssignInsnOffsets(cUnit);
+ int offset = AssignInsnOffsets(cu);
/* Const values have to be word aligned */
offset = (offset + 3) & ~3;
/* Set up offsets for literals */
- cUnit->dataOffset = offset;
+ cu->data_offset = offset;
- offset = AssignLiteralOffset(cUnit, offset);
+ offset = AssignLiteralOffset(cu, offset);
- offset = AssignSwitchTablesOffset(cUnit, offset);
+ offset = AssignSwitchTablesOffset(cu, offset);
- offset = AssignFillArrayDataOffset(cUnit, offset);
+ offset = AssignFillArrayDataOffset(cu, offset);
- cUnit->totalSize = offset;
+ cu->total_size = offset;
}
/*
@@ -862,43 +862,43 @@
* before sending them off to the assembler. If out-of-range branch distance is
* seen rearrange the instructions a bit to correct it.
*/
-void AssembleLIR(CompilationUnit* cUnit)
+void AssembleLIR(CompilationUnit* cu)
{
- AssignOffsets(cUnit);
+ AssignOffsets(cu);
/*
* Assemble here. Note that we generate code with optimistic assumptions
* and if found now to work, we'll have to redo the sequence and retry.
*/
while (true) {
- AssemblerStatus res = AssembleInstructions(cUnit, 0);
+ AssemblerStatus res = AssembleInstructions(cu, 0);
if (res == kSuccess) {
break;
} else {
- cUnit->assemblerRetries++;
- if (cUnit->assemblerRetries > MAX_ASSEMBLER_RETRIES) {
- CodegenDump(cUnit);
+ cu->assembler_retries++;
+ if (cu->assembler_retries > MAX_ASSEMBLER_RETRIES) {
+ CodegenDump(cu);
LOG(FATAL) << "Assembler error - too many retries";
}
// Redo offsets and try again
- AssignOffsets(cUnit);
- cUnit->codeBuffer.clear();
+ AssignOffsets(cu);
+ cu->code_buffer.clear();
}
}
// Install literals
- InstallLiteralPools(cUnit);
+ InstallLiteralPools(cu);
// Install switch tables
- InstallSwitchTables(cUnit);
+ InstallSwitchTables(cu);
// Install fill array data
- InstallFillArrayData(cUnit);
+ InstallFillArrayData(cu);
// Create the mapping table and native offset to reference map.
- CreateMappingTables(cUnit);
+ CreateMappingTables(cu);
- CreateNativeGcMap(cUnit);
+ CreateNativeGcMap(cu);
}
/*
@@ -908,57 +908,57 @@
* all resource flags on this to prevent code motion across
* target boundaries. KeyVal is just there for debugging.
*/
-static LIR* InsertCaseLabel(CompilationUnit* cUnit, int vaddr, int keyVal)
+static LIR* InsertCaseLabel(CompilationUnit* cu, int vaddr, int keyVal)
{
SafeMap<unsigned int, LIR*>::iterator it;
- it = cUnit->boundaryMap.find(vaddr);
- if (it == cUnit->boundaryMap.end()) {
+ it = cu->boundary_map.find(vaddr);
+ if (it == cu->boundary_map.end()) {
LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
}
- LIR* newLabel = static_cast<LIR*>(NewMem(cUnit, sizeof(LIR), true, kAllocLIR));
- newLabel->dalvikOffset = vaddr;
- newLabel->opcode = kPseudoCaseLabel;
- newLabel->operands[0] = keyVal;
- InsertLIRAfter(it->second, newLabel);
- return newLabel;
+ LIR* new_label = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ new_label->dalvik_offset = vaddr;
+ new_label->opcode = kPseudoCaseLabel;
+ new_label->operands[0] = keyVal;
+ InsertLIRAfter(it->second, new_label);
+ return new_label;
}
-static void MarkPackedCaseLabels(CompilationUnit* cUnit, SwitchTable *tabRec)
+static void MarkPackedCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
{
- const uint16_t* table = tabRec->table;
- int baseVaddr = tabRec->vaddr;
+ const uint16_t* table = tab_rec->table;
+ int base_vaddr = tab_rec->vaddr;
const int *targets = reinterpret_cast<const int*>(&table[4]);
int entries = table[1];
- int lowKey = s4FromSwitchData(&table[2]);
+ int low_key = s4FromSwitchData(&table[2]);
for (int i = 0; i < entries; i++) {
- tabRec->targets[i] = InsertCaseLabel(cUnit, baseVaddr + targets[i], i + lowKey);
+ tab_rec->targets[i] = InsertCaseLabel(cu, base_vaddr + targets[i], i + low_key);
}
}
-static void MarkSparseCaseLabels(CompilationUnit* cUnit, SwitchTable *tabRec)
+static void MarkSparseCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
{
- const uint16_t* table = tabRec->table;
- int baseVaddr = tabRec->vaddr;
+ const uint16_t* table = tab_rec->table;
+ int base_vaddr = tab_rec->vaddr;
int entries = table[1];
const int* keys = reinterpret_cast<const int*>(&table[2]);
const int* targets = &keys[entries];
for (int i = 0; i < entries; i++) {
- tabRec->targets[i] = InsertCaseLabel(cUnit, baseVaddr + targets[i], keys[i]);
+ tab_rec->targets[i] = InsertCaseLabel(cu, base_vaddr + targets[i], keys[i]);
}
}
-void ProcessSwitchTables(CompilationUnit* cUnit)
+void ProcessSwitchTables(CompilationUnit* cu)
{
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->switchTables, &iterator);
+ GrowableListIteratorInit(&cu->switch_tables, &iterator);
while (true) {
- SwitchTable *tabRec =
+ SwitchTable *tab_rec =
reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
- if (tabRec == NULL) break;
- if (tabRec->table[0] == Instruction::kPackedSwitchSignature) {
- MarkPackedCaseLabels(cUnit, tabRec);
- } else if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
- MarkSparseCaseLabels(cUnit, tabRec);
+ if (tab_rec == NULL) break;
+ if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
+ MarkPackedCaseLabels(cu, tab_rec);
+ } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+ MarkSparseCaseLabels(cu, tab_rec);
} else {
LOG(FATAL) << "Invalid switch table";
}
@@ -1001,26 +1001,26 @@
uint16_t ident = table[0];
const int* targets = reinterpret_cast<const int*>(&table[4]);
int entries = table[1];
- int lowKey = s4FromSwitchData(&table[2]);
+ int low_key = s4FromSwitchData(&table[2]);
LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
- << ", entries: " << std::dec << entries << ", lowKey: " << lowKey;
+ << ", entries: " << std::dec << entries << ", low_key: " << low_key;
for (int i = 0; i < entries; i++) {
- LOG(INFO) << " Key[" << (i + lowKey) << "] -> 0x" << std::hex
+ LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex
<< targets[i];
}
}
/*
* Set up special LIR to mark a Dalvik byte-code instruction start and
- * record it in the boundaryMap. NOTE: in cases such as kMirOpCheck in
+ * record it in the boundary_map. NOTE: in cases such as kMirOpCheck in
* which we split a single Dalvik instruction, only the first MIR op
* associated with a Dalvik PC should be entered into the map.
*/
-LIR* MarkBoundary(CompilationUnit* cUnit, int offset, const char* instStr)
+LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str)
{
- LIR* res = NewLIR1(cUnit, kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(instStr));
- if (cUnit->boundaryMap.find(offset) == cUnit->boundaryMap.end()) {
- cUnit->boundaryMap.Put(offset, res);
+ LIR* res = NewLIR1(cu, kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
+ if (cu->boundary_map.find(offset) == cu->boundary_map.end()) {
+ cu->boundary_map.Put(offset, res);
}
return res;
}
diff --git a/src/compiler/codegen/codegen_util.h b/src/compiler/codegen/codegen_util.h
index a4574f9..380203a 100644
--- a/src/compiler/codegen/codegen_util.h
+++ b/src/compiler/codegen/codegen_util.h
@@ -19,33 +19,33 @@
namespace art {
-inline int32_t s4FromSwitchData(const void* switchData) { return *reinterpret_cast<const int32_t*>(switchData); }
-inline RegisterClass oatRegClassBySize(OpSize size) { return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || size == kSignedByte ) ? kCoreReg : kAnyReg; }
-void AssembleLIR(CompilationUnit* cUnit);
-void SetMemRefType(LIR* lir, bool isLoad, int memType);
-void AnnotateDalvikRegAccess(LIR* lir, int regId, bool isLoad, bool is64bit);
-uint64_t GetRegMaskCommon(CompilationUnit* cUnit, int reg);
-void SetupRegMask(CompilationUnit* cUnit, uint64_t* mask, int reg);
-void SetupResourceMasks(CompilationUnit* cUnit, LIR* lir);
-void DumpLIRInsn(CompilationUnit* cUnit, LIR* arg, unsigned char* baseAddr);
-void DumpPromotionMap(CompilationUnit *cUnit);
-void CodegenDump(CompilationUnit* cUnit);
+inline int32_t s4FromSwitchData(const void* switch_data) { return *reinterpret_cast<const int32_t*>(switch_data); }
+inline RegisterClass oat_reg_class_by_size(OpSize size) { return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || size == kSignedByte ) ? kCoreReg : kAnyReg; }
+void AssembleLIR(CompilationUnit* cu);
+void SetMemRefType(LIR* lir, bool is_load, int mem_type);
+void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
+uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg);
+void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
+void DumpLIRInsn(CompilationUnit* cu, LIR* arg, unsigned char* base_addr);
+void DumpPromotionMap(CompilationUnit *cu);
+void CodegenDump(CompilationUnit* cu);
// TODO: remove default parameters
-LIR* RawLIR(CompilationUnit* cUnit, int dalvikOffset, int opcode, int op0 = 0, int op1 = 0, int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
-LIR* NewLIR0(CompilationUnit* cUnit, int opcode);
-LIR* NewLIR1(CompilationUnit* cUnit, int opcode, int dest);
-LIR* NewLIR2(CompilationUnit* cUnit, int opcode, int dest, int src1);
-LIR* NewLIR3(CompilationUnit* cUnit, int opcode, int dest, int src1, int src2);
-LIR* NewLIR4(CompilationUnit* cUnit, int opcode, int dest, int src1, int src2, int info);
-LIR* NewLIR5(CompilationUnit* cUnit, int opcode, int dest, int src1, int src2, int info1, int info2);
-LIR* ScanLiteralPool(LIR* dataTarget, int value, unsigned int delta);
-LIR* ScanLiteralPoolWide(LIR* dataTarget, int valLo, int valHi);
-LIR* AddWordData(CompilationUnit* cUnit, LIR* *constantListP, int value);
-LIR* AddWideData(CompilationUnit* cUnit, LIR* *constantListP, int valLo, int valHi);
-void ProcessSwitchTables(CompilationUnit* cUnit);
+LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0 = 0, int op1 = 0, int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+LIR* NewLIR0(CompilationUnit* cu, int opcode);
+LIR* NewLIR1(CompilationUnit* cu, int opcode, int dest);
+LIR* NewLIR2(CompilationUnit* cu, int opcode, int dest, int src1);
+LIR* NewLIR3(CompilationUnit* cu, int opcode, int dest, int src1, int src2);
+LIR* NewLIR4(CompilationUnit* cu, int opcode, int dest, int src1, int src2, int info);
+LIR* NewLIR5(CompilationUnit* cu, int opcode, int dest, int src1, int src2, int info1, int info2);
+LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
+LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
+LIR* AddWordData(CompilationUnit* cu, LIR* *constant_list_p, int value);
+LIR* AddWideData(CompilationUnit* cu, LIR* *constant_list_p, int val_lo, int val_hi);
+void ProcessSwitchTables(CompilationUnit* cu);
void DumpSparseSwitchTable(const uint16_t* table);
void DumpPackedSwitchTable(const uint16_t* table);
-LIR* MarkBoundary(CompilationUnit* cUnit, int offset, const char* instStr);
+LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str);
} // namespace art
diff --git a/src/compiler/codegen/gen_common.cc b/src/compiler/codegen/gen_common.cc
index 7108825..22b919a 100644
--- a/src/compiler/codegen/gen_common.cc
+++ b/src/compiler/codegen/gen_common.cc
@@ -22,7 +22,7 @@
namespace art {
//TODO: remove decl.
-void GenInvoke(CompilationUnit* cUnit, CallInfo* info);
+void GenInvoke(CompilationUnit* cu, CallInfo* info);
/*
* This source files contains "gen" codegen routines that should
@@ -30,11 +30,11 @@
* and "op" calls may be used here.
*/
-void MarkSafepointPC(CompilationUnit* cUnit, LIR* inst)
+void MarkSafepointPC(CompilationUnit* cu, LIR* inst)
{
- inst->defMask = ENCODE_ALL;
- LIR* safepointPC = NewLIR0(cUnit, kPseudoSafepointPC);
- DCHECK_EQ(safepointPC->defMask, ENCODE_ALL);
+ inst->def_mask = ENCODE_ALL;
+ LIR* safepoint_pc = NewLIR0(cu, kPseudoSafepointPC);
+ DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
}
/*
@@ -43,275 +43,275 @@
* has a memory call operation, part 1 is a NOP for x86. For other targets,
* load arguments between the two parts.
*/
-int CallHelperSetup(CompilationUnit* cUnit, int helperOffset)
+int CallHelperSetup(CompilationUnit* cu, int helper_offset)
{
- return (cUnit->instructionSet == kX86) ? 0 : LoadHelper(cUnit, helperOffset);
+ return (cu->instruction_set == kX86) ? 0 : LoadHelper(cu, helper_offset);
}
-/* NOTE: if rTgt is a temp, it will be freed following use */
-LIR* CallHelper(CompilationUnit* cUnit, int rTgt, int helperOffset, bool safepointPC)
+/* NOTE: if r_tgt is a temp, it will be freed following use */
+LIR* CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc)
{
- LIR* callInst;
- if (cUnit->instructionSet == kX86) {
- callInst = OpThreadMem(cUnit, kOpBlx, helperOffset);
+ LIR* call_inst;
+ if (cu->instruction_set == kX86) {
+ call_inst = OpThreadMem(cu, kOpBlx, helper_offset);
} else {
- callInst = OpReg(cUnit, kOpBlx, rTgt);
- FreeTemp(cUnit, rTgt);
+ call_inst = OpReg(cu, kOpBlx, r_tgt);
+ FreeTemp(cu, r_tgt);
}
- if (safepointPC) {
- MarkSafepointPC(cUnit, callInst);
+ if (safepoint_pc) {
+ MarkSafepointPC(cu, call_inst);
}
- return callInst;
+ return call_inst;
}
-void CallRuntimeHelperImm(CompilationUnit* cUnit, int helperOffset, int arg0, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperReg(CompilationUnit* cUnit, int helperOffset, int arg0, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- OpRegCopy(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperRegLocation(CompilationUnit* cUnit, int helperOffset, RegLocation arg0,
- bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
+void CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
if (arg0.wide == 0) {
- LoadValueDirectFixed(cUnit, arg0, TargetReg(kArg0));
+ LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
} else {
- LoadValueDirectWideFixed(cUnit, arg0, TargetReg(kArg0), TargetReg(kArg1));
+ LoadValueDirectWideFixed(cu, arg0, TargetReg(kArg0), TargetReg(kArg1));
}
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperImmImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1,
- bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- LoadConstant(cUnit, TargetReg(kArg1), arg1);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ LoadConstant(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperImmRegLocation(CompilationUnit* cUnit, int helperOffset, int arg0,
- RegLocation arg1, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
+void CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+ RegLocation arg1, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
if (arg1.wide == 0) {
- LoadValueDirectFixed(cUnit, arg1, TargetReg(kArg1));
+ LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
} else {
- LoadValueDirectWideFixed(cUnit, arg1, TargetReg(kArg1), TargetReg(kArg2));
+ LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
}
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperRegLocationImm(CompilationUnit* cUnit, int helperOffset, RegLocation arg0,
- int arg1, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- LoadValueDirectFixed(cUnit, arg0, TargetReg(kArg0));
- LoadConstant(cUnit, TargetReg(kArg1), arg1);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+ int arg1, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
+ LoadConstant(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperImmReg(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1,
- bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- OpRegCopy(cUnit, TargetReg(kArg1), arg1);
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ OpRegCopy(cu, TargetReg(kArg1), arg1);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperRegImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1,
- bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- OpRegCopy(cUnit, TargetReg(kArg0), arg0);
- LoadConstant(cUnit, TargetReg(kArg1), arg1);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ LoadConstant(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperImmMethod(CompilationUnit* cUnit, int helperOffset, int arg0, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- LoadCurrMethodDirect(cUnit, TargetReg(kArg1));
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1));
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cUnit, int helperOffset,
- RegLocation arg0, RegLocation arg1, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
+void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+ RegLocation arg0, RegLocation arg1, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
if (arg0.wide == 0) {
- LoadValueDirectFixed(cUnit, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+ LoadValueDirectFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
if (arg1.wide == 0) {
- if (cUnit->instructionSet == kMips) {
- LoadValueDirectFixed(cUnit, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
+ if (cu->instruction_set == kMips) {
+ LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
} else {
- LoadValueDirectFixed(cUnit, arg1, TargetReg(kArg1));
+ LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
}
} else {
- if (cUnit->instructionSet == kMips) {
- LoadValueDirectWideFixed(cUnit, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
+ if (cu->instruction_set == kMips) {
+ LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
} else {
- LoadValueDirectWideFixed(cUnit, arg1, TargetReg(kArg1), TargetReg(kArg2));
+ LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
}
}
} else {
- LoadValueDirectWideFixed(cUnit, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ LoadValueDirectWideFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
if (arg1.wide == 0) {
- LoadValueDirectFixed(cUnit, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
+ LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
} else {
- LoadValueDirectWideFixed(cUnit, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
+ LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
}
}
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1,
- bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
+void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
- OpRegCopy(cUnit, TargetReg(kArg0), arg0);
- OpRegCopy(cUnit, TargetReg(kArg1), arg1);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ OpRegCopy(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperRegRegImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1,
- int arg2, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
+void CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ int arg2, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
- OpRegCopy(cUnit, TargetReg(kArg0), arg0);
- OpRegCopy(cUnit, TargetReg(kArg1), arg1);
- LoadConstant(cUnit, TargetReg(kArg2), arg2);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ OpRegCopy(cu, TargetReg(kArg1), arg1);
+ LoadConstant(cu, TargetReg(kArg2), arg2);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cUnit, int helperOffset, int arg0,
- RegLocation arg2, bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- LoadValueDirectFixed(cUnit, arg2, TargetReg(kArg2));
- LoadCurrMethodDirect(cUnit, TargetReg(kArg1));
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+ RegLocation arg2, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
+ LoadCurrMethodDirect(cu, TargetReg(kArg1));
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperImmMethodImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg2,
- bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- LoadCurrMethodDirect(cUnit, TargetReg(kArg1));
- LoadConstant(cUnit, TargetReg(kArg2), arg2);
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+void CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0, int arg2,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1));
+ LoadConstant(cu, TargetReg(kArg2), arg2);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
-void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cUnit, int helperOffset,
+void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
int arg0, RegLocation arg1, RegLocation arg2,
- bool safepointPC) {
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- LoadValueDirectFixed(cUnit, arg1, TargetReg(kArg1));
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
if (arg2.wide == 0) {
- LoadValueDirectFixed(cUnit, arg2, TargetReg(kArg2));
+ LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
} else {
- LoadValueDirectWideFixed(cUnit, arg2, TargetReg(kArg2), TargetReg(kArg3));
+ LoadValueDirectWideFixed(cu, arg2, TargetReg(kArg2), TargetReg(kArg3));
}
- LoadConstant(cUnit, TargetReg(kArg0), arg0);
- ClobberCalleeSave(cUnit);
- CallHelper(cUnit, rTgt, helperOffset, safepointPC);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
}
/*
* Generate an kPseudoBarrier marker to indicate the boundary of special
* blocks.
*/
-void GenBarrier(CompilationUnit* cUnit)
+void GenBarrier(CompilationUnit* cu)
{
- LIR* barrier = NewLIR0(cUnit, kPseudoBarrier);
+ LIR* barrier = NewLIR0(cu, kPseudoBarrier);
/* Mark all resources as being clobbered */
- barrier->defMask = -1;
+ barrier->def_mask = -1;
}
/* Generate unconditional branch instructions */
-LIR* OpUnconditionalBranch(CompilationUnit* cUnit, LIR* target)
+LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
{
- LIR* branch = OpBranchUnconditional(cUnit, kOpUncondBr);
+ LIR* branch = OpBranchUnconditional(cu, kOpUncondBr);
branch->target = target;
return branch;
}
// FIXME: need to do some work to split out targets with
// condition codes and those without
-LIR* GenCheck(CompilationUnit* cUnit, ConditionCode cCode,
+LIR* GenCheck(CompilationUnit* cu, ConditionCode c_code,
ThrowKind kind)
{
- DCHECK_NE(cUnit->instructionSet, kMips);
- LIR* tgt = RawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- cUnit->currentDalvikOffset);
- LIR* branch = OpCondBranch(cUnit, cCode, tgt);
+ DCHECK_NE(cu->instruction_set, kMips);
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset);
+ LIR* branch = OpCondBranch(cu, c_code, tgt);
// Remember branch target - will process later
- InsertGrowableList(cUnit, &cUnit->throwLaunchpads, reinterpret_cast<uintptr_t>(tgt));
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
return branch;
}
-LIR* GenImmedCheck(CompilationUnit* cUnit, ConditionCode cCode,
- int reg, int immVal, ThrowKind kind)
+LIR* GenImmedCheck(CompilationUnit* cu, ConditionCode c_code,
+ int reg, int imm_val, ThrowKind kind)
{
- LIR* tgt = RawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- cUnit->currentDalvikOffset);
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset);
LIR* branch;
- if (cCode == kCondAl) {
- branch = OpUnconditionalBranch(cUnit, tgt);
+ if (c_code == kCondAl) {
+ branch = OpUnconditionalBranch(cu, tgt);
} else {
- branch = OpCmpImmBranch(cUnit, cCode, reg, immVal, tgt);
+ branch = OpCmpImmBranch(cu, c_code, reg, imm_val, tgt);
}
// Remember branch target - will process later
- InsertGrowableList(cUnit, &cUnit->throwLaunchpads, reinterpret_cast<uintptr_t>(tgt));
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
return branch;
}
/* Perform null-check on a register. */
-LIR* GenNullCheck(CompilationUnit* cUnit, int sReg, int mReg, int optFlags)
+LIR* GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags)
{
- if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
- optFlags & MIR_IGNORE_NULL_CHECK) {
+ if (!(cu->disable_opt & (1 << kNullCheckElimination)) &&
+ opt_flags & MIR_IGNORE_NULL_CHECK) {
return NULL;
}
- return GenImmedCheck(cUnit, kCondEq, mReg, 0, kThrowNullPointer);
+ return GenImmedCheck(cu, kCondEq, m_reg, 0, kThrowNullPointer);
}
/* Perform check on two registers */
-LIR* GenRegRegCheck(CompilationUnit* cUnit, ConditionCode cCode,
+LIR* GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code,
int reg1, int reg2, ThrowKind kind)
{
- LIR* tgt = RawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- cUnit->currentDalvikOffset, reg1, reg2);
- LIR* branch = OpCmpBranch(cUnit, cCode, reg1, reg2, tgt);
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset, reg1, reg2);
+ LIR* branch = OpCmpBranch(cu, c_code, reg1, reg2, tgt);
// Remember branch target - will process later
- InsertGrowableList(cUnit, &cUnit->throwLaunchpads, reinterpret_cast<uintptr_t>(tgt));
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
return branch;
}
-void GenCompareAndBranch(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlSrc1, RegLocation rlSrc2, LIR* taken,
- LIR* fallThrough)
+void GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_src1, RegLocation rl_src2, LIR* taken,
+ LIR* fall_through)
{
ConditionCode cond;
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
switch (opcode) {
case Instruction::IF_EQ:
cond = kCondEq;
@@ -335,15 +335,15 @@
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- OpCmpBranch(cUnit, cond, rlSrc1.lowReg, rlSrc2.lowReg, taken);
- OpUnconditionalBranch(cUnit, fallThrough);
+ OpCmpBranch(cu, cond, rl_src1.low_reg, rl_src2.low_reg, taken);
+ OpUnconditionalBranch(cu, fall_through);
}
-void GenCompareZeroAndBranch(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlSrc, LIR* taken, LIR* fallThrough)
+void GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_src, LIR* taken, LIR* fall_through)
{
ConditionCode cond;
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
switch (opcode) {
case Instruction::IF_EQZ:
cond = kCondEq;
@@ -367,33 +367,33 @@
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- if (cUnit->instructionSet == kThumb2) {
- OpRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
- OpCondBranch(cUnit, cond, taken);
+ if (cu->instruction_set == kThumb2) {
+ OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
+ OpCondBranch(cu, cond, taken);
} else {
- OpCmpImmBranch(cUnit, cond, rlSrc.lowReg, 0, taken);
+ OpCmpImmBranch(cu, cond, rl_src.low_reg, 0, taken);
}
- OpUnconditionalBranch(cUnit, fallThrough);
+ OpUnconditionalBranch(cu, fall_through);
}
-void GenIntToLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
+void GenIntToLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src)
{
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- if (rlSrc.location == kLocPhysReg) {
- OpRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (rl_src.location == kLocPhysReg) {
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
} else {
- LoadValueDirect(cUnit, rlSrc, rlResult.lowReg);
+ LoadValueDirect(cu, rl_src, rl_result.low_reg);
}
- OpRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
- StoreValueWide(cUnit, rlDest, rlResult);
+ OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+ StoreValueWide(cu, rl_dest, rl_result);
}
-void GenIntNarrowing(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc)
+void GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src)
{
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
OpKind op = kOpInvalid;
switch (opcode) {
case Instruction::INT_TO_BYTE:
@@ -408,8 +408,8 @@
default:
LOG(ERROR) << "Bad int conversion type";
}
- OpRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ OpRegReg(cu, op, rl_result.low_reg, rl_src.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
/*
@@ -417,21 +417,21 @@
* Array::AllocFromCode(type_idx, method, count);
* Note: AllocFromCode will handle checks for errNegativeArraySize.
*/
-void GenNewArray(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest,
- RegLocation rlSrc)
+void GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src)
{
- FlushAllRegs(cUnit); /* Everything to home location */
- int funcOffset;
- if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- *cUnit->dex_file,
+ FlushAllRegs(cu); /* Everything to home location */
+ int func_offset;
+ if (cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
type_idx)) {
- funcOffset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+ func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
} else {
- funcOffset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+ func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
}
- CallRuntimeHelperImmMethodRegLocation(cUnit, funcOffset, type_idx, rlSrc, true);
- RegLocation rlResult = GetReturn(cUnit, false);
- StoreValue(cUnit, rlDest, rlResult);
+ CallRuntimeHelperImmMethodRegLocation(cu, func_offset, type_idx, rl_src, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
}
/*
@@ -440,22 +440,22 @@
* code throws runtime exception "bad Filled array req" for 'D' and 'J'.
* Current code also throws internal unimp if not 'L', '[' or 'I'.
*/
-void GenFilledNewArray(CompilationUnit* cUnit, CallInfo* info)
+void GenFilledNewArray(CompilationUnit* cu, CallInfo* info)
{
- int elems = info->numArgWords;
- int typeIdx = info->index;
- FlushAllRegs(cUnit); /* Everything to home location */
- int funcOffset;
- if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- *cUnit->dex_file,
- typeIdx)) {
- funcOffset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+ int elems = info->num_arg_words;
+ int type_idx = info->index;
+ FlushAllRegs(cu); /* Everything to home location */
+ int func_offset;
+ if (cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
+ type_idx)) {
+ func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
} else {
- funcOffset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+ func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
}
- CallRuntimeHelperImmMethodImm(cUnit, funcOffset, typeIdx, elems, true);
- FreeTemp(cUnit, TargetReg(kArg2));
- FreeTemp(cUnit, TargetReg(kArg1));
+ CallRuntimeHelperImmMethodImm(cu, func_offset, type_idx, elems, true);
+ FreeTemp(cu, TargetReg(kArg2));
+ FreeTemp(cu, TargetReg(kArg1));
/*
* NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
* return region. Because AllocFromCode placed the new array
@@ -463,14 +463,14 @@
* added, it may be necessary to additionally copy all return
* values to a home location in thread-local storage
*/
- LockTemp(cUnit, TargetReg(kRet0));
+ LockTemp(cu, TargetReg(kRet0));
// TODO: use the correct component size, currently all supported types
// share array alignment with ints (see comment at head of function)
size_t component_size = sizeof(int32_t);
// Having a range of 0 is legal
- if (info->isRange && (elems > 0)) {
+ if (info->is_range && (elems > 0)) {
/*
* Bit of ugliness here. We're going generate a mem copy loop
* on the register range, but it is possible that some regs
@@ -480,10 +480,10 @@
* home location.
*/
for (int i = 0; i < elems; i++) {
- RegLocation loc = UpdateLoc(cUnit, info->args[i]);
+ RegLocation loc = UpdateLoc(cu, info->args[i]);
if (loc.location == kLocPhysReg) {
- StoreBaseDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, kWord);
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+ loc.low_reg, kWord);
}
}
/*
@@ -491,602 +491,602 @@
* this is an uncommon operation and isn't especially performance
* critical.
*/
- int rSrc = AllocTemp(cUnit);
- int rDst = AllocTemp(cUnit);
- int rIdx = AllocTemp(cUnit);
- int rVal = INVALID_REG;
- switch(cUnit->instructionSet) {
+ int r_src = AllocTemp(cu);
+ int r_dst = AllocTemp(cu);
+ int r_idx = AllocTemp(cu);
+ int r_val = INVALID_REG;
+ switch(cu->instruction_set) {
case kThumb2:
- rVal = TargetReg(kLr);
+ r_val = TargetReg(kLr);
break;
case kX86:
- FreeTemp(cUnit, TargetReg(kRet0));
- rVal = AllocTemp(cUnit);
+ FreeTemp(cu, TargetReg(kRet0));
+ r_val = AllocTemp(cu);
break;
case kMips:
- rVal = AllocTemp(cUnit);
+ r_val = AllocTemp(cu);
break;
- default: LOG(FATAL) << "Unexpected instruction set: " << cUnit->instructionSet;
+ default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
}
// Set up source pointer
- RegLocation rlFirst = info->args[0];
- OpRegRegImm(cUnit, kOpAdd, rSrc, TargetReg(kSp),
- SRegOffset(cUnit, rlFirst.sRegLow));
+ RegLocation rl_first = info->args[0];
+ OpRegRegImm(cu, kOpAdd, r_src, TargetReg(kSp),
+ SRegOffset(cu, rl_first.s_reg_low));
// Set up the target pointer
- OpRegRegImm(cUnit, kOpAdd, rDst, TargetReg(kRet0),
+ OpRegRegImm(cu, kOpAdd, r_dst, TargetReg(kRet0),
Array::DataOffset(component_size).Int32Value());
// Set up the loop counter (known to be > 0)
- LoadConstant(cUnit, rIdx, elems - 1);
+ LoadConstant(cu, r_idx, elems - 1);
// Generate the copy loop. Going backwards for convenience
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
// Copy next element
- LoadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
- StoreBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
- FreeTemp(cUnit, rVal);
- OpDecAndBranch(cUnit, kCondGe, rIdx, target);
- if (cUnit->instructionSet == kX86) {
+ LoadBaseIndexed(cu, r_src, r_idx, r_val, 2, kWord);
+ StoreBaseIndexed(cu, r_dst, r_idx, r_val, 2, kWord);
+ FreeTemp(cu, r_val);
+ OpDecAndBranch(cu, kCondGe, r_idx, target);
+ if (cu->instruction_set == kX86) {
// Restore the target pointer
- OpRegRegImm(cUnit, kOpAdd, TargetReg(kRet0), rDst, -Array::DataOffset(component_size).Int32Value());
+ OpRegRegImm(cu, kOpAdd, TargetReg(kRet0), r_dst, -Array::DataOffset(component_size).Int32Value());
}
- } else if (!info->isRange) {
+ } else if (!info->is_range) {
// TUNING: interleave
for (int i = 0; i < elems; i++) {
- RegLocation rlArg = LoadValue(cUnit, info->args[i], kCoreReg);
- StoreBaseDisp(cUnit, TargetReg(kRet0),
+ RegLocation rl_arg = LoadValue(cu, info->args[i], kCoreReg);
+ StoreBaseDisp(cu, TargetReg(kRet0),
Array::DataOffset(component_size).Int32Value() +
- i * 4, rlArg.lowReg, kWord);
+ i * 4, rl_arg.low_reg, kWord);
// If the LoadValue caused a temp to be allocated, free it
- if (IsTemp(cUnit, rlArg.lowReg)) {
- FreeTemp(cUnit, rlArg.lowReg);
+ if (IsTemp(cu, rl_arg.low_reg)) {
+ FreeTemp(cu, rl_arg.low_reg);
}
}
}
if (info->result.location != kLocInvalid) {
- StoreValue(cUnit, info->result, GetReturn(cUnit, false /* not fp */));
+ StoreValue(cu, info->result, GetReturn(cu, false /* not fp */));
}
}
-void GenSput(CompilationUnit* cUnit, uint32_t fieldIdx, RegLocation rlSrc,
- bool isLongOrDouble, bool isObject)
+void GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
+ bool is_long_or_double, bool is_object)
{
- int fieldOffset;
- int ssbIndex;
- bool isVolatile;
- bool isReferrersClass;
+ int field_offset;
+ int ssb_index;
+ bool is_volatile;
+ bool is_referrers_class;
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker, *cUnit->dex_file,
- cUnit->code_item, cUnit->method_idx, cUnit->access_flags);
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker, *cu->dex_file,
+ cu->code_item, cu->method_idx, cu->access_flags);
- bool fastPath =
- cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
- fieldOffset, ssbIndex,
- isReferrersClass, isVolatile,
+ bool fast_path =
+ cu->compiler->ComputeStaticFieldInfo(field_idx, &m_unit,
+ field_offset, ssb_index,
+ is_referrers_class, is_volatile,
true);
- if (fastPath && !SLOW_FIELD_PATH) {
- DCHECK_GE(fieldOffset, 0);
+ if (fast_path && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_offset, 0);
int rBase;
- if (isReferrersClass) {
+ if (is_referrers_class) {
// Fast path, static storage base is this method's class
- RegLocation rlMethod = LoadCurrMethod(cUnit);
- rBase = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rlMethod.lowReg,
+ RegLocation rl_method = LoadCurrMethod(cu);
+ rBase = AllocTemp(cu);
+ LoadWordDisp(cu, rl_method.low_reg,
AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
- if (IsTemp(cUnit, rlMethod.lowReg)) {
- FreeTemp(cUnit, rlMethod.lowReg);
+ if (IsTemp(cu, rl_method.low_reg)) {
+ FreeTemp(cu, rl_method.low_reg);
}
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized.
- DCHECK_GE(ssbIndex, 0);
+ DCHECK_GE(ssb_index, 0);
// May do runtime call so everything to home locations.
- FlushAllRegs(cUnit);
+ FlushAllRegs(cu);
// Using fixed register to sync with possible call to runtime
// support.
- int rMethod = TargetReg(kArg1);
- LockTemp(cUnit, rMethod);
- LoadCurrMethodDirect(cUnit, rMethod);
+ int r_method = TargetReg(kArg1);
+ LockTemp(cu, r_method);
+ LoadCurrMethodDirect(cu, r_method);
rBase = TargetReg(kArg0);
- LockTemp(cUnit, rBase);
- LoadWordDisp(cUnit, rMethod,
+ LockTemp(cu, rBase);
+ LoadWordDisp(cu, r_method,
AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
rBase);
- LoadWordDisp(cUnit, rBase,
+ LoadWordDisp(cu, rBase,
Array::DataOffset(sizeof(Object*)).Int32Value() +
- sizeof(int32_t*) * ssbIndex, rBase);
+ sizeof(int32_t*) * ssb_index, rBase);
// rBase now points at appropriate static storage base (Class*)
// or NULL if not initialized. Check for NULL and call helper if NULL.
// TUNING: fast path should fall through
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondNe, rBase, 0, NULL);
- LoadConstant(cUnit, TargetReg(kArg0), ssbIndex);
- CallRuntimeHelperImm(cUnit, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssbIndex, true);
- if (cUnit->instructionSet == kMips) {
+ LIR* branch_over = OpCmpImmBranch(cu, kCondNe, rBase, 0, NULL);
+ LoadConstant(cu, TargetReg(kArg0), ssb_index);
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ if (cu->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
- OpRegCopy(cUnit, rBase, TargetReg(kRet0));
+ OpRegCopy(cu, rBase, TargetReg(kRet0));
}
- LIR* skipTarget = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = skipTarget;
- FreeTemp(cUnit, rMethod);
+ LIR* skip_target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = skip_target;
+ FreeTemp(cu, r_method);
}
// rBase now holds static storage base
- if (isLongOrDouble) {
- rlSrc = LoadValueWide(cUnit, rlSrc, kAnyReg);
+ if (is_long_or_double) {
+ rl_src = LoadValueWide(cu, rl_src, kAnyReg);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, kAnyReg);
+ rl_src = LoadValue(cu, rl_src, kAnyReg);
}
- if (isVolatile) {
- GenMemBarrier(cUnit, kStoreStore);
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreStore);
}
- if (isLongOrDouble) {
- StoreBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
- rlSrc.highReg);
+ if (is_long_or_double) {
+ StoreBaseDispWide(cu, rBase, field_offset, rl_src.low_reg,
+ rl_src.high_reg);
} else {
- StoreWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
+ StoreWordDisp(cu, rBase, field_offset, rl_src.low_reg);
}
- if (isVolatile) {
- GenMemBarrier(cUnit, kStoreLoad);
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreLoad);
}
- if (isObject) {
- MarkGCCard(cUnit, rlSrc.lowReg, rBase);
+ if (is_object) {
+ MarkGCCard(cu, rl_src.low_reg, rBase);
}
- FreeTemp(cUnit, rBase);
+ FreeTemp(cu, rBase);
} else {
- FlushAllRegs(cUnit); // Everything to home locations
- int setterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pSet64Static) :
- (isObject ? ENTRYPOINT_OFFSET(pSetObjStatic)
+ FlushAllRegs(cu); // Everything to home locations
+ int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
+ (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
: ENTRYPOINT_OFFSET(pSet32Static));
- CallRuntimeHelperImmRegLocation(cUnit, setterOffset, fieldIdx, rlSrc, true);
+ CallRuntimeHelperImmRegLocation(cu, setter_offset, field_idx, rl_src, true);
}
}
-void GenSget(CompilationUnit* cUnit, uint32_t fieldIdx, RegLocation rlDest,
- bool isLongOrDouble, bool isObject)
+void GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
+ bool is_long_or_double, bool is_object)
{
- int fieldOffset;
- int ssbIndex;
- bool isVolatile;
- bool isReferrersClass;
+ int field_offset;
+ int ssb_index;
+ bool is_volatile;
+ bool is_referrers_class;
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file,
+ cu->code_item, cu->method_idx,
+ cu->access_flags);
- bool fastPath =
- cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
- fieldOffset, ssbIndex,
- isReferrersClass, isVolatile,
+ bool fast_path =
+ cu->compiler->ComputeStaticFieldInfo(field_idx, &m_unit,
+ field_offset, ssb_index,
+ is_referrers_class, is_volatile,
false);
- if (fastPath && !SLOW_FIELD_PATH) {
- DCHECK_GE(fieldOffset, 0);
+ if (fast_path && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_offset, 0);
int rBase;
- if (isReferrersClass) {
+ if (is_referrers_class) {
// Fast path, static storage base is this method's class
- RegLocation rlMethod = LoadCurrMethod(cUnit);
- rBase = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rlMethod.lowReg,
+ RegLocation rl_method = LoadCurrMethod(cu);
+ rBase = AllocTemp(cu);
+ LoadWordDisp(cu, rl_method.low_reg,
AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
- DCHECK_GE(ssbIndex, 0);
+ DCHECK_GE(ssb_index, 0);
// May do runtime call so everything to home locations.
- FlushAllRegs(cUnit);
+ FlushAllRegs(cu);
// Using fixed register to sync with possible call to runtime
// support
- int rMethod = TargetReg(kArg1);
- LockTemp(cUnit, rMethod);
- LoadCurrMethodDirect(cUnit, rMethod);
+ int r_method = TargetReg(kArg1);
+ LockTemp(cu, r_method);
+ LoadCurrMethodDirect(cu, r_method);
rBase = TargetReg(kArg0);
- LockTemp(cUnit, rBase);
- LoadWordDisp(cUnit, rMethod,
+ LockTemp(cu, rBase);
+ LoadWordDisp(cu, r_method,
AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
rBase);
- LoadWordDisp(cUnit, rBase,
+ LoadWordDisp(cu, rBase,
Array::DataOffset(sizeof(Object*)).Int32Value() +
- sizeof(int32_t*) * ssbIndex, rBase);
+ sizeof(int32_t*) * ssb_index, rBase);
// rBase now points at appropriate static storage base (Class*)
// or NULL if not initialized. Check for NULL and call helper if NULL.
// TUNING: fast path should fall through
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondNe, rBase, 0, NULL);
- CallRuntimeHelperImm(cUnit, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssbIndex, true);
- if (cUnit->instructionSet == kMips) {
+ LIR* branch_over = OpCmpImmBranch(cu, kCondNe, rBase, 0, NULL);
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ if (cu->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
- OpRegCopy(cUnit, rBase, TargetReg(kRet0));
+ OpRegCopy(cu, rBase, TargetReg(kRet0));
}
- LIR* skipTarget = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = skipTarget;
- FreeTemp(cUnit, rMethod);
+ LIR* skip_target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = skip_target;
+ FreeTemp(cu, r_method);
}
// rBase now holds static storage base
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kAnyReg, true);
- if (isVolatile) {
- GenMemBarrier(cUnit, kLoadLoad);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
}
- if (isLongOrDouble) {
- LoadBaseDispWide(cUnit, rBase, fieldOffset, rlResult.lowReg,
- rlResult.highReg, INVALID_SREG);
+ if (is_long_or_double) {
+ LoadBaseDispWide(cu, rBase, field_offset, rl_result.low_reg,
+ rl_result.high_reg, INVALID_SREG);
} else {
- LoadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
+ LoadWordDisp(cu, rBase, field_offset, rl_result.low_reg);
}
- FreeTemp(cUnit, rBase);
- if (isLongOrDouble) {
- StoreValueWide(cUnit, rlDest, rlResult);
+ FreeTemp(cu, rBase);
+ if (is_long_or_double) {
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
}
} else {
- FlushAllRegs(cUnit); // Everything to home locations
- int getterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pGet64Static) :
- (isObject ? ENTRYPOINT_OFFSET(pGetObjStatic)
+ FlushAllRegs(cu); // Everything to home locations
+ int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
+ (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
: ENTRYPOINT_OFFSET(pGet32Static));
- CallRuntimeHelperImm(cUnit, getterOffset, fieldIdx, true);
- if (isLongOrDouble) {
- RegLocation rlResult = GetReturnWide(cUnit, rlDest.fp);
- StoreValueWide(cUnit, rlDest, rlResult);
+ CallRuntimeHelperImm(cu, getterOffset, field_idx, true);
+ if (is_long_or_double) {
+ RegLocation rl_result = GetReturnWide(cu, rl_dest.fp);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- RegLocation rlResult = GetReturn(cUnit, rlDest.fp);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result = GetReturn(cu, rl_dest.fp);
+ StoreValue(cu, rl_dest, rl_result);
}
}
}
// Debugging routine - if null target, branch to DebugMe
-void GenShowTarget(CompilationUnit* cUnit)
+void GenShowTarget(CompilationUnit* cu)
{
- DCHECK_NE(cUnit->instructionSet, kX86) << "unimplemented GenShowTarget";
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondNe, TargetReg(kInvokeTgt), 0, NULL);
- LoadWordDisp(cUnit, TargetReg(kSelf), ENTRYPOINT_OFFSET(pDebugMe), TargetReg(kInvokeTgt));
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
+ DCHECK_NE(cu->instruction_set, kX86) << "unimplemented GenShowTarget";
+ LIR* branch_over = OpCmpImmBranch(cu, kCondNe, TargetReg(kInvokeTgt), 0, NULL);
+ LoadWordDisp(cu, TargetReg(kSelf), ENTRYPOINT_OFFSET(pDebugMe), TargetReg(kInvokeTgt));
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
}
-void HandleSuspendLaunchPads(CompilationUnit *cUnit)
+void HandleSuspendLaunchPads(CompilationUnit *cu)
{
- LIR** suspendLabel = reinterpret_cast<LIR**>(cUnit->suspendLaunchpads.elemList);
- int numElems = cUnit->suspendLaunchpads.numUsed;
- int helperOffset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
- for (int i = 0; i < numElems; i++) {
- ResetRegPool(cUnit);
- ResetDefTracking(cUnit);
- LIR* lab = suspendLabel[i];
- LIR* resumeLab = reinterpret_cast<LIR*>(lab->operands[0]);
- cUnit->currentDalvikOffset = lab->operands[1];
- AppendLIR(cUnit, lab);
- int rTgt = CallHelperSetup(cUnit, helperOffset);
- CallHelper(cUnit, rTgt, helperOffset, true /* MarkSafepointPC */);
- OpUnconditionalBranch(cUnit, resumeLab);
+ LIR** suspend_label = reinterpret_cast<LIR**>(cu->suspend_launchpads.elem_list);
+ int num_elems = cu->suspend_launchpads.num_used;
+ int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+ for (int i = 0; i < num_elems; i++) {
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ LIR* lab = suspend_label[i];
+ LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
+ cu->current_dalvik_offset = lab->operands[1];
+ AppendLIR(cu, lab);
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ CallHelper(cu, r_tgt, helper_offset, true /* MarkSafepointPC */);
+ OpUnconditionalBranch(cu, resume_lab);
}
}
-void HandleIntrinsicLaunchPads(CompilationUnit *cUnit)
+void HandleIntrinsicLaunchPads(CompilationUnit *cu)
{
- LIR** intrinsicLabel = reinterpret_cast<LIR**>(cUnit->intrinsicLaunchpads.elemList);
- int numElems = cUnit->intrinsicLaunchpads.numUsed;
- for (int i = 0; i < numElems; i++) {
- ResetRegPool(cUnit);
- ResetDefTracking(cUnit);
- LIR* lab = intrinsicLabel[i];
+ LIR** intrinsic_label = reinterpret_cast<LIR**>(cu->intrinsic_launchpads.elem_list);
+ int num_elems = cu->intrinsic_launchpads.num_used;
+ for (int i = 0; i < num_elems; i++) {
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ LIR* lab = intrinsic_label[i];
CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
- cUnit->currentDalvikOffset = info->offset;
- AppendLIR(cUnit, lab);
+ cu->current_dalvik_offset = info->offset;
+ AppendLIR(cu, lab);
// NOTE: GenInvoke handles MarkSafepointPC
- GenInvoke(cUnit, info);
- LIR* resumeLab = reinterpret_cast<LIR*>(lab->operands[2]);
- if (resumeLab != NULL) {
- OpUnconditionalBranch(cUnit, resumeLab);
+ GenInvoke(cu, info);
+ LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
+ if (resume_lab != NULL) {
+ OpUnconditionalBranch(cu, resume_lab);
}
}
}
-void HandleThrowLaunchPads(CompilationUnit *cUnit)
+void HandleThrowLaunchPads(CompilationUnit *cu)
{
- LIR** throwLabel = reinterpret_cast<LIR**>(cUnit->throwLaunchpads.elemList);
- int numElems = cUnit->throwLaunchpads.numUsed;
- for (int i = 0; i < numElems; i++) {
- ResetRegPool(cUnit);
- ResetDefTracking(cUnit);
- LIR* lab = throwLabel[i];
- cUnit->currentDalvikOffset = lab->operands[1];
- AppendLIR(cUnit, lab);
- int funcOffset = 0;
+ LIR** throw_label = reinterpret_cast<LIR**>(cu->throw_launchpads.elem_list);
+ int num_elems = cu->throw_launchpads.num_used;
+ for (int i = 0; i < num_elems; i++) {
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ LIR* lab = throw_label[i];
+ cu->current_dalvik_offset = lab->operands[1];
+ AppendLIR(cu, lab);
+ int func_offset = 0;
int v1 = lab->operands[2];
int v2 = lab->operands[3];
- bool targetX86 = (cUnit->instructionSet == kX86);
+ bool target_x86 = (cu->instruction_set == kX86);
switch (lab->operands[0]) {
case kThrowNullPointer:
- funcOffset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+ func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
break;
case kThrowArrayBounds:
// Move v1 (array index) to kArg0 and v2 (array length) to kArg1
if (v2 != TargetReg(kArg0)) {
- OpRegCopy(cUnit, TargetReg(kArg0), v1);
- if (targetX86) {
+ OpRegCopy(cu, TargetReg(kArg0), v1);
+ if (target_x86) {
// x86 leaves the array pointer in v2, so load the array length that the handler expects
- OpRegMem(cUnit, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(cUnit, TargetReg(kArg1), v2);
+ OpRegCopy(cu, TargetReg(kArg1), v2);
}
} else {
if (v1 == TargetReg(kArg1)) {
// Swap v1 and v2, using kArg2 as a temp
- OpRegCopy(cUnit, TargetReg(kArg2), v1);
- if (targetX86) {
+ OpRegCopy(cu, TargetReg(kArg2), v1);
+ if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(cUnit, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(cUnit, TargetReg(kArg1), v2);
+ OpRegCopy(cu, TargetReg(kArg1), v2);
}
- OpRegCopy(cUnit, TargetReg(kArg0), TargetReg(kArg2));
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));
} else {
- if (targetX86) {
+ if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(cUnit, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
} else {
- OpRegCopy(cUnit, TargetReg(kArg1), v2);
+ OpRegCopy(cu, TargetReg(kArg1), v2);
}
- OpRegCopy(cUnit, TargetReg(kArg0), v1);
+ OpRegCopy(cu, TargetReg(kArg0), v1);
}
}
- funcOffset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
break;
case kThrowDivZero:
- funcOffset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+ func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
break;
case kThrowNoSuchMethod:
- OpRegCopy(cUnit, TargetReg(kArg0), v1);
- funcOffset =
+ OpRegCopy(cu, TargetReg(kArg0), v1);
+ func_offset =
ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
break;
case kThrowStackOverflow:
- funcOffset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+ func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
// Restore stack alignment
- if (targetX86) {
- OpRegImm(cUnit, kOpAdd, TargetReg(kSp), cUnit->frameSize);
+ if (target_x86) {
+ OpRegImm(cu, kOpAdd, TargetReg(kSp), cu->frame_size);
} else {
- OpRegImm(cUnit, kOpAdd, TargetReg(kSp), (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
+ OpRegImm(cu, kOpAdd, TargetReg(kSp), (cu->num_core_spills + cu->num_fp_spills) * 4);
}
break;
default:
LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
}
- ClobberCalleeSave(cUnit);
- int rTgt = CallHelperSetup(cUnit, funcOffset);
- CallHelper(cUnit, rTgt, funcOffset, true /* MarkSafepointPC */);
+ ClobberCalleeSave(cu);
+ int r_tgt = CallHelperSetup(cu, func_offset);
+ CallHelper(cu, r_tgt, func_offset, true /* MarkSafepointPC */);
}
}
-bool FastInstance(CompilationUnit* cUnit, uint32_t fieldIdx,
- int& fieldOffset, bool& isVolatile, bool isPut)
+bool FastInstance(CompilationUnit* cu, uint32_t field_idx,
+ int& field_offset, bool& is_volatile, bool is_put)
{
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
- return cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit,
- fieldOffset, isVolatile, isPut);
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file,
+ cu->code_item, cu->method_idx,
+ cu->access_flags);
+ return cu->compiler->ComputeInstanceFieldInfo(field_idx, &m_unit,
+ field_offset, is_volatile, is_put);
}
-void GenIGet(CompilationUnit* cUnit, uint32_t fieldIdx, int optFlags, OpSize size,
- RegLocation rlDest, RegLocation rlObj,
- bool isLongOrDouble, bool isObject)
+void GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+ RegLocation rl_dest, RegLocation rl_obj,
+ bool is_long_or_double, bool is_object)
{
- int fieldOffset;
- bool isVolatile;
+ int field_offset;
+ bool is_volatile;
- bool fastPath = FastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
- if (fastPath && !SLOW_FIELD_PATH) {
- RegLocation rlResult;
- RegisterClass regClass = oatRegClassBySize(size);
- DCHECK_GE(fieldOffset, 0);
- rlObj = LoadValue(cUnit, rlObj, kCoreReg);
- if (isLongOrDouble) {
- DCHECK(rlDest.wide);
- GenNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, optFlags);
- if (cUnit->instructionSet == kX86) {
- rlResult = EvalLoc(cUnit, rlDest, regClass, true);
- GenNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, optFlags);
- LoadBaseDispWide(cUnit, rlObj.lowReg, fieldOffset, rlResult.lowReg,
- rlResult.highReg, rlObj.sRegLow);
- if (isVolatile) {
- GenMemBarrier(cUnit, kLoadLoad);
+ if (fast_path && !SLOW_FIELD_PATH) {
+ RegLocation rl_result;
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ DCHECK_GE(field_offset, 0);
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ if (is_long_or_double) {
+ DCHECK(rl_dest.wide);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ if (cu->instruction_set == kX86) {
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ LoadBaseDispWide(cu, rl_obj.low_reg, field_offset, rl_result.low_reg,
+ rl_result.high_reg, rl_obj.s_reg_low);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
}
} else {
- int regPtr = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
- rlResult = EvalLoc(cUnit, rlDest, regClass, true);
- LoadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
- if (isVolatile) {
- GenMemBarrier(cUnit, kLoadLoad);
+ int reg_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ LoadPair(cu, reg_ptr, rl_result.low_reg, rl_result.high_reg);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
}
- FreeTemp(cUnit, regPtr);
+ FreeTemp(cu, reg_ptr);
}
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- rlResult = EvalLoc(cUnit, rlDest, regClass, true);
- GenNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, optFlags);
- LoadBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlResult.lowReg,
- kWord, rlObj.sRegLow);
- if (isVolatile) {
- GenMemBarrier(cUnit, kLoadLoad);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ LoadBaseDisp(cu, rl_obj.low_reg, field_offset, rl_result.low_reg,
+ kWord, rl_obj.s_reg_low);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
}
} else {
- int getterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pGet64Instance) :
- (isObject ? ENTRYPOINT_OFFSET(pGetObjInstance)
+ int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
+ (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
: ENTRYPOINT_OFFSET(pGet32Instance));
- CallRuntimeHelperImmRegLocation(cUnit, getterOffset, fieldIdx, rlObj, true);
- if (isLongOrDouble) {
- RegLocation rlResult = GetReturnWide(cUnit, rlDest.fp);
- StoreValueWide(cUnit, rlDest, rlResult);
+ CallRuntimeHelperImmRegLocation(cu, getterOffset, field_idx, rl_obj, true);
+ if (is_long_or_double) {
+ RegLocation rl_result = GetReturnWide(cu, rl_dest.fp);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- RegLocation rlResult = GetReturn(cUnit, rlDest.fp);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result = GetReturn(cu, rl_dest.fp);
+ StoreValue(cu, rl_dest, rl_result);
}
}
}
-void GenIPut(CompilationUnit* cUnit, uint32_t fieldIdx, int optFlags, OpSize size,
- RegLocation rlSrc, RegLocation rlObj, bool isLongOrDouble, bool isObject)
+void GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+ RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object)
{
- int fieldOffset;
- bool isVolatile;
+ int field_offset;
+ bool is_volatile;
- bool fastPath = FastInstance(cUnit, fieldIdx, fieldOffset, isVolatile,
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile,
true);
- if (fastPath && !SLOW_FIELD_PATH) {
- RegisterClass regClass = oatRegClassBySize(size);
- DCHECK_GE(fieldOffset, 0);
- rlObj = LoadValue(cUnit, rlObj, kCoreReg);
- if (isLongOrDouble) {
- int regPtr;
- rlSrc = LoadValueWide(cUnit, rlSrc, kAnyReg);
- GenNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, optFlags);
- regPtr = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
- if (isVolatile) {
- GenMemBarrier(cUnit, kStoreStore);
+ if (fast_path && !SLOW_FIELD_PATH) {
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ DCHECK_GE(field_offset, 0);
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ if (is_long_or_double) {
+ int reg_ptr;
+ rl_src = LoadValueWide(cu, rl_src, kAnyReg);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ reg_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreStore);
}
- StoreBaseDispWide(cUnit, regPtr, 0, rlSrc.lowReg, rlSrc.highReg);
- if (isVolatile) {
- GenMemBarrier(cUnit, kLoadLoad);
+ StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
}
- FreeTemp(cUnit, regPtr);
+ FreeTemp(cu, reg_ptr);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, regClass);
- GenNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, optFlags);
- if (isVolatile) {
- GenMemBarrier(cUnit, kStoreStore);
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreStore);
}
- StoreBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, kWord);
- if (isVolatile) {
- GenMemBarrier(cUnit, kLoadLoad);
+ StoreBaseDisp(cu, rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
}
- if (isObject) {
- MarkGCCard(cUnit, rlSrc.lowReg, rlObj.lowReg);
+ if (is_object) {
+ MarkGCCard(cu, rl_src.low_reg, rl_obj.low_reg);
}
}
} else {
- int setterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pSet64Instance) :
- (isObject ? ENTRYPOINT_OFFSET(pSetObjInstance)
+ int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
+ (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
: ENTRYPOINT_OFFSET(pSet32Instance));
- CallRuntimeHelperImmRegLocationRegLocation(cUnit, setterOffset, fieldIdx, rlObj, rlSrc, true);
+ CallRuntimeHelperImmRegLocationRegLocation(cu, setter_offset, field_idx, rl_obj, rl_src, true);
}
}
-void GenConstClass(CompilationUnit* cUnit, uint32_t type_idx,
- RegLocation rlDest)
+void GenConstClass(CompilationUnit* cu, uint32_t type_idx,
+ RegLocation rl_dest)
{
- RegLocation rlMethod = LoadCurrMethod(cUnit);
- int resReg = AllocTemp(cUnit);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- *cUnit->dex_file,
+ RegLocation rl_method = LoadCurrMethod(cu);
+ int res_reg = AllocTemp(cu);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (!cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
type_idx)) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
- CallRuntimeHelperImmReg(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
- type_idx, rlMethod.lowReg, true);
- RegLocation rlResult = GetReturn(cUnit, false);
- StoreValue(cUnit, rlDest, rlResult);
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ type_idx, rl_method.low_reg, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
} else {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadWordDisp(cUnit, rlMethod.lowReg, dex_cache_offset, resReg);
+ LoadWordDisp(cu, rl_method.low_reg, dex_cache_offset, res_reg);
int32_t offset_of_type =
Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
* type_idx);
- LoadWordDisp(cUnit, resReg, offset_of_type, rlResult.lowReg);
- if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(*cUnit->dex_file,
+ LoadWordDisp(cu, res_reg, offset_of_type, rl_result.low_reg);
+ if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(*cu->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
- FlushAllRegs(cUnit);
- LIR* branch1 = OpCmpImmBranch(cUnit, kCondEq, rlResult.lowReg, 0, NULL);
+ FlushAllRegs(cu);
+ LIR* branch1 = OpCmpImmBranch(cu, kCondEq, rl_result.low_reg, 0, NULL);
// Resolved, store and hop over following code
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
/*
* Because we have stores of the target value on two paths,
* clobber temp tracking for the destination using the ssa name
*/
- ClobberSReg(cUnit, rlDest.sRegLow);
- LIR* branch2 = OpUnconditionalBranch(cUnit,0);
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ LIR* branch2 = OpUnconditionalBranch(cu,0);
// TUNING: move slow path to end & remove unconditional branch
- LIR* target1 = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target1 = NewLIR0(cu, kPseudoTargetLabel);
// Call out to helper, which will return resolved type in kArg0
- CallRuntimeHelperImmReg(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
- rlMethod.lowReg, true);
- RegLocation rlResult = GetReturn(cUnit, false);
- StoreValue(cUnit, rlDest, rlResult);
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ rl_method.low_reg, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
/*
* Because we have stores of the target value on two paths,
* clobber temp tracking for the destination using the ssa name
*/
- ClobberSReg(cUnit, rlDest.sRegLow);
+ ClobberSReg(cu, rl_dest.s_reg_low);
// Rejoin code paths
- LIR* target2 = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target2 = NewLIR0(cu, kPseudoTargetLabel);
branch1->target = target1;
branch2->target = target2;
} else {
// Fast path, we're done - just store result
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
}
}
}
-void GenConstString(CompilationUnit* cUnit, uint32_t string_idx,
- RegLocation rlDest)
+void GenConstString(CompilationUnit* cu, uint32_t string_idx,
+ RegLocation rl_dest)
{
/* NOTE: Most strings should be available at compile time */
int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() +
(sizeof(String*) * string_idx);
- if (!cUnit->compiler->CanAssumeStringIsPresentInDexCache(
- *cUnit->dex_file, string_idx) || SLOW_STRING_PATH) {
+ if (!cu->compiler->CanAssumeStringIsPresentInDexCache(
+ *cu->dex_file, string_idx) || SLOW_STRING_PATH) {
// slow path, resolve string if not in dex cache
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Using explicit registers
- LoadCurrMethodDirect(cUnit, TargetReg(kArg2));
- LoadWordDisp(cUnit, TargetReg(kArg2),
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Using explicit registers
+ LoadCurrMethodDirect(cu, TargetReg(kArg2));
+ LoadWordDisp(cu, TargetReg(kArg2),
AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
- int rTgt = CallHelperSetup(cUnit, ENTRYPOINT_OFFSET(pResolveStringFromCode));
- LoadWordDisp(cUnit, TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
- LoadConstant(cUnit, TargetReg(kArg1), string_idx);
- if (cUnit->instructionSet == kThumb2) {
- OpRegImm(cUnit, kOpCmp, TargetReg(kRet0), 0); // Is resolved?
- GenBarrier(cUnit);
+ int r_tgt = CallHelperSetup(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode));
+ LoadWordDisp(cu, TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
+ LoadConstant(cu, TargetReg(kArg1), string_idx);
+ if (cu->instruction_set == kThumb2) {
+ OpRegImm(cu, kOpCmp, TargetReg(kRet0), 0); // Is resolved?
+ GenBarrier(cu);
// For testing, always force through helper
if (!EXERCISE_SLOWEST_STRING_PATH) {
- OpIT(cUnit, kArmCondEq, "T");
+ OpIT(cu, kArmCondEq, "T");
}
- OpRegCopy(cUnit, TargetReg(kArg0), TargetReg(kArg2)); // .eq
- LIR* callInst = OpReg(cUnit, kOpBlx, rTgt); // .eq, helper(Method*, string_idx)
- MarkSafepointPC(cUnit, callInst);
- FreeTemp(cUnit, rTgt);
- } else if (cUnit->instructionSet == kMips) {
- LIR* branch = OpCmpImmBranch(cUnit, kCondNe, TargetReg(kRet0), 0, NULL);
- OpRegCopy(cUnit, TargetReg(kArg0), TargetReg(kArg2)); // .eq
- LIR* callInst = OpReg(cUnit, kOpBlx, rTgt);
- MarkSafepointPC(cUnit, callInst);
- FreeTemp(cUnit, rTgt);
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .eq
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt); // .eq, helper(Method*, string_idx)
+ MarkSafepointPC(cu, call_inst);
+ FreeTemp(cu, r_tgt);
+ } else if (cu->instruction_set == kMips) {
+ LIR* branch = OpCmpImmBranch(cu, kCondNe, TargetReg(kRet0), 0, NULL);
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .eq
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
+ FreeTemp(cu, r_tgt);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
branch->target = target;
} else {
- DCHECK_EQ(cUnit->instructionSet, kX86);
- CallRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
+ DCHECK_EQ(cu->instruction_set, kX86);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
}
- GenBarrier(cUnit);
- StoreValue(cUnit, rlDest, GetReturn(cUnit, false));
+ GenBarrier(cu);
+ StoreValue(cu, rl_dest, GetReturn(cu, false));
} else {
- RegLocation rlMethod = LoadCurrMethod(cUnit);
- int resReg = AllocTemp(cUnit);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadWordDisp(cUnit, rlMethod.lowReg,
- AbstractMethod::DexCacheStringsOffset().Int32Value(), resReg);
- LoadWordDisp(cUnit, resReg, offset_of_string, rlResult.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_method = LoadCurrMethod(cu);
+ int res_reg = AllocTemp(cu);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadWordDisp(cu, rl_method.low_reg,
+ AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ LoadWordDisp(cu, res_reg, offset_of_string, rl_result.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
}
@@ -1094,193 +1094,193 @@
* Let helper function take care of everything. Will
* call Class::NewInstanceFromCode(type_idx, method);
*/
-void GenNewInstance(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest)
+void GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
{
- FlushAllRegs(cUnit); /* Everything to home location */
+ FlushAllRegs(cu); /* Everything to home location */
// alloc will always check for resolution, do we also need to verify
// access because the verifier was unable to?
- int funcOffset;
- if (cUnit->compiler->CanAccessInstantiableTypeWithoutChecks(
- cUnit->method_idx, *cUnit->dex_file, type_idx)) {
- funcOffset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+ int func_offset;
+ if (cu->compiler->CanAccessInstantiableTypeWithoutChecks(
+ cu->method_idx, *cu->dex_file, type_idx)) {
+ func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
} else {
- funcOffset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+ func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
}
- CallRuntimeHelperImmMethod(cUnit, funcOffset, type_idx, true);
- RegLocation rlResult = GetReturn(cUnit, false);
- StoreValue(cUnit, rlDest, rlResult);
+ CallRuntimeHelperImmMethod(cu, func_offset, type_idx, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
}
-void GenMoveException(CompilationUnit* cUnit, RegLocation rlDest)
+void GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
{
- FlushAllRegs(cUnit); /* Everything to home location */
- int funcOffset = ENTRYPOINT_OFFSET(pGetAndClearException);
- if (cUnit->instructionSet == kX86) {
+ FlushAllRegs(cu); /* Everything to home location */
+ int func_offset = ENTRYPOINT_OFFSET(pGetAndClearException);
+ if (cu->instruction_set == kX86) {
// Runtime helper will load argument for x86.
- CallRuntimeHelperReg(cUnit, funcOffset, TargetReg(kArg0), false);
+ CallRuntimeHelperReg(cu, func_offset, TargetReg(kArg0), false);
} else {
- CallRuntimeHelperReg(cUnit, funcOffset, TargetReg(kSelf), false);
+ CallRuntimeHelperReg(cu, func_offset, TargetReg(kSelf), false);
}
- RegLocation rlResult = GetReturn(cUnit, false);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
}
-void GenThrow(CompilationUnit* cUnit, RegLocation rlSrc)
+void GenThrow(CompilationUnit* cu, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
- CallRuntimeHelperRegLocation(cUnit, ENTRYPOINT_OFFSET(pDeliverException), rlSrc, true);
+ FlushAllRegs(cu);
+ CallRuntimeHelperRegLocation(cu, ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
}
-void GenInstanceof(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest,
- RegLocation rlSrc)
+void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src)
{
- FlushAllRegs(cUnit);
+ FlushAllRegs(cu);
// May generate a call - use explicit registers
- LockCallTemps(cUnit);
- LoadCurrMethodDirect(cUnit, TargetReg(kArg1)); // kArg1 <= current Method*
- int classReg = TargetReg(kArg2); // kArg2 will hold the Class*
- if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- *cUnit->dex_file,
+ LockCallTemps(cu);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1)); // kArg1 <= current Method*
+ int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ if (!cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
type_idx)) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
- CallRuntimeHelperImm(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, true);
- OpRegCopy(cUnit, classReg, TargetReg(kRet0)); // Align usage with fast path
- LoadValueDirectFixed(cUnit, rlSrc, TargetReg(kArg0)); // kArg0 <= ref
+ OpRegCopy(cu, class_reg, TargetReg(kRet0)); // Align usage with fast path
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref
} else {
- // Load dex cache entry into classReg (kArg2)
- LoadValueDirectFixed(cUnit, rlSrc, TargetReg(kArg0)); // kArg0 <= ref
- LoadWordDisp(cUnit, TargetReg(kArg1),
- AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), classReg);
+ // Load dex cache entry into class_reg (kArg2)
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref
+ LoadWordDisp(cu, TargetReg(kArg1),
+ AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
int32_t offset_of_type =
Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
* type_idx);
- LoadWordDisp(cUnit, classReg, offset_of_type, classReg);
- if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
- *cUnit->dex_file, type_idx)) {
+ LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
+ if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(
+ *cu->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
- LIR* hopBranch = OpCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL);
+ LIR* hop_branch = OpCmpImmBranch(cu, kCondNe, class_reg, 0, NULL);
// Not resolved
// Call out to helper, which will return resolved type in kRet0
- CallRuntimeHelperImm(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
- OpRegCopy(cUnit, TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
- LoadValueDirectFixed(cUnit, rlSrc, TargetReg(kArg0)); /* reload Ref */
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+ OpRegCopy(cu, TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); /* reload Ref */
// Rejoin code paths
- LIR* hopTarget = NewLIR0(cUnit, kPseudoTargetLabel);
- hopBranch->target = hopTarget;
+ LIR* hop_target = NewLIR0(cu, kPseudoTargetLabel);
+ hop_branch->target = hop_target;
}
}
/* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
- RegLocation rlResult = GetReturn(cUnit, false);
- if (cUnit->instructionSet == kMips) {
- LoadConstant(cUnit, rlResult.lowReg, 0); // store false result for if branch is taken
+ RegLocation rl_result = GetReturn(cu, false);
+ if (cu->instruction_set == kMips) {
+ LoadConstant(cu, rl_result.low_reg, 0); // store false result for if branch is taken
}
- LIR* branch1 = OpCmpImmBranch(cUnit, kCondEq, TargetReg(kArg0), 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(cUnit, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadWordDisp(cu, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
- LIR* callInst;
+ LIR* call_inst;
LIR* branchover = NULL;
- if (cUnit->instructionSet == kThumb2) {
+ if (cu->instruction_set == kThumb2) {
/* Uses conditional nullification */
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
- OpRegReg(cUnit, kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
- OpIT(cUnit, kArmCondEq, "EE"); // if-convert the test
- LoadConstant(cUnit, TargetReg(kArg0), 1); // .eq case - load true
- OpRegCopy(cUnit, TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
- callInst = OpReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
- FreeTemp(cUnit, rTgt);
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpRegReg(cu, kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
+ OpIT(cu, kArmCondEq, "EE"); // if-convert the test
+ LoadConstant(cu, TargetReg(kArg0), 1); // .eq case - load true
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
+ call_inst = OpReg(cu, kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
+ FreeTemp(cu, r_tgt);
} else {
/* Uses branchovers */
- LoadConstant(cUnit, rlResult.lowReg, 1); // assume true
- branchover = OpCmpBranch(cUnit, kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
- if (cUnit->instructionSet != kX86) {
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
- OpRegCopy(cUnit, TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
- callInst = OpReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
- FreeTemp(cUnit, rTgt);
+ LoadConstant(cu, rl_result.low_reg, 1); // assume true
+ branchover = OpCmpBranch(cu, kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
+ if (cu->instruction_set != kX86) {
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
+ call_inst = OpReg(cu, kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
+ FreeTemp(cu, r_tgt);
} else {
- OpRegCopy(cUnit, TargetReg(kArg0), TargetReg(kArg2));
- callInst = OpThreadMem(cUnit, kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));
+ call_inst = OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
}
}
- MarkSafepointPC(cUnit, callInst);
- ClobberCalleeSave(cUnit);
+ MarkSafepointPC(cu, call_inst);
+ ClobberCalleeSave(cu);
/* branch targets here */
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- StoreValue(cUnit, rlDest, rlResult);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
branch1->target = target;
- if (cUnit->instructionSet != kThumb2) {
+ if (cu->instruction_set != kThumb2) {
branchover->target = target;
}
}
-void GenCheckCast(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlSrc)
+void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
+ FlushAllRegs(cu);
// May generate a call - use explicit registers
- LockCallTemps(cUnit);
- LoadCurrMethodDirect(cUnit, TargetReg(kArg1)); // kArg1 <= current Method*
- int classReg = TargetReg(kArg2); // kArg2 will hold the Class*
- if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- *cUnit->dex_file,
+ LockCallTemps(cu);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1)); // kArg1 <= current Method*
+ int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ if (!cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
type_idx)) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
// InitializeTypeAndVerifyAccess(idx, method)
- CallRuntimeHelperImmReg(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, TargetReg(kArg1), true);
- OpRegCopy(cUnit, classReg, TargetReg(kRet0)); // Align usage with fast path
+ OpRegCopy(cu, class_reg, TargetReg(kRet0)); // Align usage with fast path
} else {
- // Load dex cache entry into classReg (kArg2)
- LoadWordDisp(cUnit, TargetReg(kArg1),
- AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), classReg);
+ // Load dex cache entry into class_reg (kArg2)
+ LoadWordDisp(cu, TargetReg(kArg1),
+ AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
int32_t offset_of_type =
Array::DataOffset(sizeof(Class*)).Int32Value() +
(sizeof(Class*) * type_idx);
- LoadWordDisp(cUnit, classReg, offset_of_type, classReg);
- if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
- *cUnit->dex_file, type_idx)) {
+ LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
+ if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(
+ *cu->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
- LIR* hopBranch = OpCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL);
+ LIR* hop_branch = OpCmpImmBranch(cu, kCondNe, class_reg, 0, NULL);
// Not resolved
// Call out to helper, which will return resolved type in kArg0
// InitializeTypeFromCode(idx, method)
- CallRuntimeHelperImmReg(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
true);
- OpRegCopy(cUnit, classReg, TargetReg(kRet0)); // Align usage with fast path
+ OpRegCopy(cu, class_reg, TargetReg(kRet0)); // Align usage with fast path
// Rejoin code paths
- LIR* hopTarget = NewLIR0(cUnit, kPseudoTargetLabel);
- hopBranch->target = hopTarget;
+ LIR* hop_target = NewLIR0(cu, kPseudoTargetLabel);
+ hop_branch->target = hop_target;
}
}
- // At this point, classReg (kArg2) has class
- LoadValueDirectFixed(cUnit, rlSrc, TargetReg(kArg0)); // kArg0 <= ref
+ // At this point, class_reg (kArg2) has class
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref
/* Null is OK - continue */
- LIR* branch1 = OpCmpImmBranch(cUnit, kCondEq, TargetReg(kArg0), 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(cUnit, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadWordDisp(cu, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg1 now contains object->klass_ */
LIR* branch2;
- if (cUnit->instructionSet == kThumb2) {
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pCheckCastFromCode));
- OpRegReg(cUnit, kOpCmp, TargetReg(kArg1), classReg);
- branch2 = OpCondBranch(cUnit, kCondEq, NULL); /* If eq, trivial yes */
- OpRegCopy(cUnit, TargetReg(kArg0), TargetReg(kArg1));
- OpRegCopy(cUnit, TargetReg(kArg1), TargetReg(kArg2));
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rTgt);
- MarkSafepointPC(cUnit, callInst);
- FreeTemp(cUnit, rTgt);
+ if (cu->instruction_set == kThumb2) {
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pCheckCastFromCode));
+ OpRegReg(cu, kOpCmp, TargetReg(kArg1), class_reg);
+ branch2 = OpCondBranch(cu, kCondEq, NULL); /* If eq, trivial yes */
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg1));
+ OpRegCopy(cu, TargetReg(kArg1), TargetReg(kArg2));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
+ FreeTemp(cu, r_tgt);
} else {
- branch2 = OpCmpBranch(cUnit, kCondEq, TargetReg(kArg1), classReg, NULL);
- CallRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), true);
+ branch2 = OpCmpBranch(cu, kCondEq, TargetReg(kArg1), class_reg, NULL);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), true);
}
/* branch target here */
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
branch1->target = target;
branch2->target = target;
}
@@ -1289,166 +1289,166 @@
* Generate array store
*
*/
-void GenArrayObjPut(CompilationUnit* cUnit, int optFlags, RegLocation rlArray,
- RegLocation rlIndex, RegLocation rlSrc, int scale)
+void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
{
- int lenOffset = Array::LengthOffset().Int32Value();
- int dataOffset = Array::DataOffset(sizeof(Object*)).Int32Value();
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
- FlushAllRegs(cUnit); // Use explicit registers
- LockCallTemps(cUnit);
+ FlushAllRegs(cu); // Use explicit registers
+ LockCallTemps(cu);
- int rValue = TargetReg(kArg0); // Register holding value
- int rArrayClass = TargetReg(kArg1); // Register holding array's Class
- int rArray = TargetReg(kArg2); // Register holding array
- int rIndex = TargetReg(kArg3); // Register holding index into array
+ int r_value = TargetReg(kArg0); // Register holding value
+ int r_array_class = TargetReg(kArg1); // Register holding array's Class
+ int r_array = TargetReg(kArg2); // Register holding array
+ int r_index = TargetReg(kArg3); // Register holding index into array
- LoadValueDirectFixed(cUnit, rlArray, rArray); // Grab array
- LoadValueDirectFixed(cUnit, rlSrc, rValue); // Grab value
- LoadValueDirectFixed(cUnit, rlIndex, rIndex); // Grab index
+ LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
+ LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
+ LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
- GenNullCheck(cUnit, rlArray.sRegLow, rArray, optFlags); // NPE?
+ GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
// Store of null?
- LIR* null_value_check = OpCmpImmBranch(cUnit, kCondEq, rValue, 0, NULL);
+ LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
// Get the array's class.
- LoadWordDisp(cUnit, rArray, Object::ClassOffset().Int32Value(), rArrayClass);
- CallRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), rValue,
- rArrayClass, true);
+ LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
- LoadValueDirectFixed(cUnit, rlArray, rArray); // Reload array
- LoadValueDirectFixed(cUnit, rlIndex, rIndex); // Reload index
- LoadValueDirectFixed(cUnit, rlSrc, rValue); // Reload value
- rArrayClass = INVALID_REG;
+ LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
+ LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
+ LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
+ r_array_class = INVALID_REG;
// Branch here if value to be stored == null
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
null_value_check->target = target;
- if (cUnit->instructionSet == kX86) {
+ if (cu->instruction_set == kX86) {
// make an extra temp available for card mark below
- FreeTemp(cUnit, TargetReg(kArg1));
- if (!(optFlags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
- GenRegMemCheck(cUnit, kCondUge, rIndex, rArray, lenOffset, kThrowArrayBounds);
+ FreeTemp(cu, TargetReg(kArg1));
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
}
- StoreBaseIndexedDisp(cUnit, rArray, rIndex, scale,
- dataOffset, rValue, INVALID_REG, kWord, INVALID_SREG);
+ StoreBaseIndexedDisp(cu, r_array, r_index, scale,
+ data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
} else {
- bool needsRangeCheck = (!(optFlags & MIR_IGNORE_RANGE_CHECK));
- int regLen = INVALID_REG;
- if (needsRangeCheck) {
- regLen = TargetReg(kArg1);
- LoadWordDisp(cUnit, rArray, lenOffset, regLen); // Get len
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = TargetReg(kArg1);
+ LoadWordDisp(cu, r_array, len_offset, reg_len); // Get len
}
- /* rPtr -> array data */
- int rPtr = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpAdd, rPtr, rArray, dataOffset);
- if (needsRangeCheck) {
- GenRegRegCheck(cUnit, kCondCs, rIndex, regLen, kThrowArrayBounds);
+ /* r_ptr -> array data */
+ int r_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
}
- StoreBaseIndexed(cUnit, rPtr, rIndex, rValue, scale, kWord);
- FreeTemp(cUnit, rPtr);
+ StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
+ FreeTemp(cu, r_ptr);
}
- FreeTemp(cUnit, rIndex);
- MarkGCCard(cUnit, rValue, rArray);
+ FreeTemp(cu, r_index);
+ MarkGCCard(cu, r_value, r_array);
}
/*
* Generate array load
*/
-void GenArrayGet(CompilationUnit* cUnit, int optFlags, OpSize size,
- RegLocation rlArray, RegLocation rlIndex,
- RegLocation rlDest, int scale)
+void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size,
+ RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale)
{
- RegisterClass regClass = oatRegClassBySize(size);
- int lenOffset = Array::LengthOffset().Int32Value();
- int dataOffset;
- RegLocation rlResult;
- rlArray = LoadValue(cUnit, rlArray, kCoreReg);
- rlIndex = LoadValue(cUnit, rlIndex, kCoreReg);
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
if (size == kLong || size == kDouble) {
- dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
}
/* null object? */
- GenNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, optFlags);
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
- if (cUnit->instructionSet == kX86) {
- if (!(optFlags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
- GenRegMemCheck(cUnit, kCondUge, rlIndex.lowReg, rlArray.lowReg,
- lenOffset, kThrowArrayBounds);
+ if (cu->instruction_set == kX86) {
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg,
+ len_offset, kThrowArrayBounds);
}
if ((size == kLong) || (size == kDouble)) {
- int regAddr = AllocTemp(cUnit);
- OpLea(cUnit, regAddr, rlArray.lowReg, rlIndex.lowReg, scale, dataOffset);
- FreeTemp(cUnit, rlArray.lowReg);
- FreeTemp(cUnit, rlIndex.lowReg);
- rlResult = EvalLoc(cUnit, rlDest, regClass, true);
- LoadBaseIndexedDisp(cUnit, regAddr, INVALID_REG, 0, 0, rlResult.lowReg,
- rlResult.highReg, size, INVALID_SREG);
- StoreValueWide(cUnit, rlDest, rlResult);
+ int reg_addr = AllocTemp(cu);
+ OpLea(cu, reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
+ FreeTemp(cu, rl_index.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ LoadBaseIndexedDisp(cu, reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
+ rl_result.high_reg, size, INVALID_SREG);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- rlResult = EvalLoc(cUnit, rlDest, regClass, true);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
- LoadBaseIndexedDisp(cUnit, rlArray.lowReg, rlIndex.lowReg, scale,
- dataOffset, rlResult.lowReg, INVALID_REG, size,
+ LoadBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale,
+ data_offset, rl_result.low_reg, INVALID_REG, size,
INVALID_SREG);
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
}
} else {
- int regPtr = AllocTemp(cUnit);
- bool needsRangeCheck = (!(optFlags & MIR_IGNORE_RANGE_CHECK));
- int regLen = INVALID_REG;
- if (needsRangeCheck) {
- regLen = AllocTemp(cUnit);
+ int reg_ptr = AllocTemp(cu);
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
/* Get len */
- LoadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
}
- /* regPtr -> array data */
- OpRegRegImm(cUnit, kOpAdd, regPtr, rlArray.lowReg, dataOffset);
- FreeTemp(cUnit, rlArray.lowReg);
+ /* reg_ptr -> array data */
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
if ((size == kLong) || (size == kDouble)) {
if (scale) {
- int rNewIndex = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
- OpRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
- FreeTemp(cUnit, rNewIndex);
+ int r_new_index = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(cu, r_new_index);
} else {
- OpRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
+ OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
}
- FreeTemp(cUnit, rlIndex.lowReg);
- rlResult = EvalLoc(cUnit, rlDest, regClass, true);
+ FreeTemp(cu, rl_index.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
- if (needsRangeCheck) {
+ if (needs_range_check) {
// TODO: change kCondCS to a more meaningful name, is the sense of
// carry-set/clear flipped?
- GenRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, kThrowArrayBounds);
- FreeTemp(cUnit, regLen);
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
}
- LoadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
+ LoadPair(cu, reg_ptr, rl_result.low_reg, rl_result.high_reg);
- FreeTemp(cUnit, regPtr);
- StoreValueWide(cUnit, rlDest, rlResult);
+ FreeTemp(cu, reg_ptr);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- rlResult = EvalLoc(cUnit, rlDest, regClass, true);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
- if (needsRangeCheck) {
+ if (needs_range_check) {
// TODO: change kCondCS to a more meaningful name, is the sense of
// carry-set/clear flipped?
- GenRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, kThrowArrayBounds);
- FreeTemp(cUnit, regLen);
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
}
- LoadBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlResult.lowReg, scale, size);
+ LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
- FreeTemp(cUnit, regPtr);
- StoreValue(cUnit, rlDest, rlResult);
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
}
}
}
@@ -1457,106 +1457,106 @@
* Generate array store
*
*/
-void GenArrayPut(CompilationUnit* cUnit, int optFlags, OpSize size,
- RegLocation rlArray, RegLocation rlIndex,
- RegLocation rlSrc, int scale)
+void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size,
+ RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale)
{
- RegisterClass regClass = oatRegClassBySize(size);
- int lenOffset = Array::LengthOffset().Int32Value();
- int dataOffset;
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
if (size == kLong || size == kDouble) {
- dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
}
- rlArray = LoadValue(cUnit, rlArray, kCoreReg);
- rlIndex = LoadValue(cUnit, rlIndex, kCoreReg);
- int regPtr = INVALID_REG;
- if (cUnit->instructionSet != kX86) {
- if (IsTemp(cUnit, rlArray.lowReg)) {
- Clobber(cUnit, rlArray.lowReg);
- regPtr = rlArray.lowReg;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+ int reg_ptr = INVALID_REG;
+ if (cu->instruction_set != kX86) {
+ if (IsTemp(cu, rl_array.low_reg)) {
+ Clobber(cu, rl_array.low_reg);
+ reg_ptr = rl_array.low_reg;
} else {
- regPtr = AllocTemp(cUnit);
- OpRegCopy(cUnit, regPtr, rlArray.lowReg);
+ reg_ptr = AllocTemp(cu);
+ OpRegCopy(cu, reg_ptr, rl_array.low_reg);
}
}
/* null object? */
- GenNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, optFlags);
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
- if (cUnit->instructionSet == kX86) {
- if (!(optFlags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
- GenRegMemCheck(cUnit, kCondUge, rlIndex.lowReg, rlArray.lowReg, lenOffset, kThrowArrayBounds);
+ if (cu->instruction_set == kX86) {
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
}
if ((size == kLong) || (size == kDouble)) {
- rlSrc = LoadValueWide(cUnit, rlSrc, regClass);
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, regClass);
+ rl_src = LoadValue(cu, rl_src, reg_class);
}
// If the src reg can't be byte accessed, move it to a temp first.
- if ((size == kSignedByte || size == kUnsignedByte) && rlSrc.lowReg >= 4) {
- int temp = AllocTemp(cUnit);
- OpRegCopy(cUnit, temp, rlSrc.lowReg);
- StoreBaseIndexedDisp(cUnit, rlArray.lowReg, rlIndex.lowReg, scale, dataOffset, temp,
+ if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
+ int temp = AllocTemp(cu);
+ OpRegCopy(cu, temp, rl_src.low_reg);
+ StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
INVALID_REG, size, INVALID_SREG);
} else {
- StoreBaseIndexedDisp(cUnit, rlArray.lowReg, rlIndex.lowReg, scale, dataOffset, rlSrc.lowReg,
- rlSrc.highReg, size, INVALID_SREG);
+ StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+ rl_src.high_reg, size, INVALID_SREG);
}
} else {
- bool needsRangeCheck = (!(optFlags & MIR_IGNORE_RANGE_CHECK));
- int regLen = INVALID_REG;
- if (needsRangeCheck) {
- regLen = AllocTemp(cUnit);
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
//NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
}
- /* regPtr -> array data */
- OpRegImm(cUnit, kOpAdd, regPtr, dataOffset);
- /* at this point, regPtr points to array, 2 live temps */
+ /* reg_ptr -> array data */
+ OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
+ /* at this point, reg_ptr points to array, 2 live temps */
if ((size == kLong) || (size == kDouble)) {
//TUNING: specific wide routine that can handle fp regs
if (scale) {
- int rNewIndex = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
- OpRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
- FreeTemp(cUnit, rNewIndex);
+ int r_new_index = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(cu, r_new_index);
} else {
- OpRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
+ OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
}
- rlSrc = LoadValueWide(cUnit, rlSrc, regClass);
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
- if (needsRangeCheck) {
- GenRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, kThrowArrayBounds);
- FreeTemp(cUnit, regLen);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
}
- StoreBaseDispWide(cUnit, regPtr, 0, rlSrc.lowReg, rlSrc.highReg);
+ StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
- FreeTemp(cUnit, regPtr);
+ FreeTemp(cu, reg_ptr);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, regClass);
- if (needsRangeCheck) {
- GenRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, kThrowArrayBounds);
- FreeTemp(cUnit, regLen);
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
}
- StoreBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
+ StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
scale, size);
}
}
}
-void GenLong3Addr(CompilationUnit* cUnit, OpKind firstOp,
- OpKind secondOp, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+void GenLong3Addr(CompilationUnit* cu, OpKind first_op,
+ OpKind second_op, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- RegLocation rlResult;
- if (cUnit->instructionSet == kThumb2) {
+ RegLocation rl_result;
+ if (cu->instruction_set == kThumb2) {
/*
* NOTE: This is the one place in the code in which we might have
* as many as six live temporary registers. There are 5 in the normal
@@ -1565,80 +1565,80 @@
* lr is used explicitly elsewhere in the code generator and cannot
* normally be used as a general temp register.
*/
- MarkTemp(cUnit, TargetReg(kLr)); // Add lr to the temp pool
- FreeTemp(cUnit, TargetReg(kLr)); // and make it available
+ MarkTemp(cu, TargetReg(kLr)); // Add lr to the temp pool
+ FreeTemp(cu, TargetReg(kLr)); // and make it available
}
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
// The longs may overlap - use intermediate temp if so
- if ((rlResult.lowReg == rlSrc1.highReg) || (rlResult.lowReg == rlSrc2.highReg)){
- int tReg = AllocTemp(cUnit);
- OpRegRegReg(cUnit, firstOp, tReg, rlSrc1.lowReg, rlSrc2.lowReg);
- OpRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg, rlSrc2.highReg);
- OpRegCopy(cUnit, rlResult.lowReg, tReg);
- FreeTemp(cUnit, tReg);
+ if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)){
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegCopy(cu, rl_result.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
} else {
- OpRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
- OpRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg,
- rlSrc2.highReg);
+ OpRegRegReg(cu, first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, second_op, rl_result.high_reg, rl_src1.high_reg,
+ rl_src2.high_reg);
}
/*
- * NOTE: If rlDest refers to a frame variable in a large frame, the
+ * NOTE: If rl_dest refers to a frame variable in a large frame, the
* following StoreValueWide might need to allocate a temp register.
* To further work around the lack of a spill capability, explicitly
- * free any temps from rlSrc1 & rlSrc2 that aren't still live in rlResult.
+ * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
* Remove when spill is functional.
*/
- FreeRegLocTemps(cUnit, rlResult, rlSrc1);
- FreeRegLocTemps(cUnit, rlResult, rlSrc2);
- StoreValueWide(cUnit, rlDest, rlResult);
- if (cUnit->instructionSet == kThumb2) {
- Clobber(cUnit, TargetReg(kLr));
- UnmarkTemp(cUnit, TargetReg(kLr)); // Remove lr from the temp pool
+ FreeRegLocTemps(cu, rl_result, rl_src1);
+ FreeRegLocTemps(cu, rl_result, rl_src2);
+ StoreValueWide(cu, rl_dest, rl_result);
+ if (cu->instruction_set == kThumb2) {
+ Clobber(cu, TargetReg(kLr));
+ UnmarkTemp(cu, TargetReg(kLr)); // Remove lr from the temp pool
}
}
-bool GenShiftOpLong(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlShift)
+bool GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift)
{
- int funcOffset;
+ int func_offset;
switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
- funcOffset = ENTRYPOINT_OFFSET(pShlLong);
+ func_offset = ENTRYPOINT_OFFSET(pShlLong);
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
- funcOffset = ENTRYPOINT_OFFSET(pShrLong);
+ func_offset = ENTRYPOINT_OFFSET(pShrLong);
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
- funcOffset = ENTRYPOINT_OFFSET(pUshrLong);
+ func_offset = ENTRYPOINT_OFFSET(pUshrLong);
break;
default:
LOG(FATAL) << "Unexpected case";
return true;
}
- FlushAllRegs(cUnit); /* Send everything to home location */
- CallRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlShift, false);
- RegLocation rlResult = GetReturnWide(cUnit, false);
- StoreValueWide(cUnit, rlDest, rlResult);
+ FlushAllRegs(cu); /* Send everything to home location */
+ CallRuntimeHelperRegLocationRegLocation(cu, func_offset, rl_src1, rl_shift, false);
+ RegLocation rl_result = GetReturnWide(cu, false);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenArithOpInt(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
OpKind op = kOpBkpt;
- bool isDivRem = false;
- bool checkZero = false;
+ bool is_div_rem = false;
+ bool check_zero = false;
bool unary = false;
- RegLocation rlResult;
- bool shiftOp = false;
+ RegLocation rl_result;
+ bool shift_op = false;
switch (opcode) {
case Instruction::NEG_INT:
op = kOpNeg;
@@ -1662,16 +1662,16 @@
break;
case Instruction::DIV_INT:
case Instruction::DIV_INT_2ADDR:
- checkZero = true;
+ check_zero = true;
op = kOpDiv;
- isDivRem = true;
+ is_div_rem = true;
break;
/* NOTE: returns in kArg1 */
case Instruction::REM_INT:
case Instruction::REM_INT_2ADDR:
- checkZero = true;
+ check_zero = true;
op = kOpRem;
- isDivRem = true;
+ is_div_rem = true;
break;
case Instruction::AND_INT:
case Instruction::AND_INT_2ADDR:
@@ -1687,76 +1687,76 @@
break;
case Instruction::SHL_INT:
case Instruction::SHL_INT_2ADDR:
- shiftOp = true;
+ shift_op = true;
op = kOpLsl;
break;
case Instruction::SHR_INT:
case Instruction::SHR_INT_2ADDR:
- shiftOp = true;
+ shift_op = true;
op = kOpAsr;
break;
case Instruction::USHR_INT:
case Instruction::USHR_INT_2ADDR:
- shiftOp = true;
+ shift_op = true;
op = kOpLsr;
break;
default:
LOG(FATAL) << "Invalid word arith op: " << opcode;
}
- if (!isDivRem) {
+ if (!is_div_rem) {
if (unary) {
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegReg(cUnit, op, rlResult.lowReg, rlSrc1.lowReg);
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg);
} else {
- if (shiftOp) {
- int tReg = INVALID_REG;
- if (cUnit->instructionSet == kX86) {
+ if (shift_op) {
+ int t_reg = INVALID_REG;
+ if (cu->instruction_set == kX86) {
// X86 doesn't require masking and must use ECX
- tReg = TargetReg(kCount); // rCX
- LoadValueDirectFixed(cUnit, rlSrc2, tReg);
+ t_reg = TargetReg(kCount); // rCX
+ LoadValueDirectFixed(cu, rl_src2, t_reg);
} else {
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
- tReg = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpAnd, tReg, rlSrc2.lowReg, 31);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAnd, t_reg, rl_src2.low_reg, 31);
}
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegRegReg(cUnit, op, rlResult.lowReg, rlSrc1.lowReg, tReg);
- FreeTemp(cUnit, tReg);
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
} else {
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegRegReg(cUnit, op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
}
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
} else {
- if (cUnit->instructionSet == kMips) {
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
- if (checkZero) {
- GenImmedCheck(cUnit, kCondEq, rlSrc2.lowReg, 0, kThrowDivZero);
+ if (cu->instruction_set == kMips) {
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ if (check_zero) {
+ GenImmedCheck(cu, kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
}
- rlResult = GenDivRem(cUnit, rlDest, rlSrc1.lowReg, rlSrc2.lowReg, op == kOpDiv);
+ rl_result = GenDivRem(cu, rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
} else {
- int funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
- FlushAllRegs(cUnit); /* Send everything to home location */
- LoadValueDirectFixed(cUnit, rlSrc2, TargetReg(kArg1));
- int rTgt = CallHelperSetup(cUnit, funcOffset);
- LoadValueDirectFixed(cUnit, rlSrc1, TargetReg(kArg0));
- if (checkZero) {
- GenImmedCheck(cUnit, kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
+ int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ FlushAllRegs(cu); /* Send everything to home location */
+ LoadValueDirectFixed(cu, rl_src2, TargetReg(kArg1));
+ int r_tgt = CallHelperSetup(cu, func_offset);
+ LoadValueDirectFixed(cu, rl_src1, TargetReg(kArg0));
+ if (check_zero) {
+ GenImmedCheck(cu, kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
}
// NOTE: callout here is not a safepoint
- CallHelper(cUnit, rTgt, funcOffset, false /* not a safepoint */ );
+ CallHelper(cu, r_tgt, func_offset, false /* not a safepoint */ );
if (op == kOpDiv)
- rlResult = GetReturn(cUnit, false);
+ rl_result = GetReturn(cu, false);
else
- rlResult = GetReturnAlt(cUnit);
+ rl_result = GetReturnAlt(cu);
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
}
return false;
}
@@ -1793,124 +1793,124 @@
return bit_posn;
}
-// Returns true if it added instructions to 'cUnit' to divide 'rlSrc' by 'lit'
-// and store the result in 'rlDest'.
-static bool HandleEasyDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
- RegLocation rlSrc, RegLocation rlDest, int lit)
+// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
+// and store the result in 'rl_dest'.
+static bool HandleEasyDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
{
- if ((lit < 2) || ((cUnit->instructionSet != kThumb2) && !IsPowerOfTwo(lit))) {
+ if ((lit < 2) || ((cu->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
return false;
}
// No divide instruction for Arm, so check for more special cases
- if ((cUnit->instructionSet == kThumb2) && !IsPowerOfTwo(lit)) {
- return SmallLiteralDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit);
+ if ((cu->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
+ return SmallLiteralDivide(cu, dalvik_opcode, rl_src, rl_dest, lit);
}
int k = LowestSetBit(lit);
if (k >= 30) {
// Avoid special cases.
return false;
}
- bool div = (dalvikOpcode == Instruction::DIV_INT_LIT8 ||
- dalvikOpcode == Instruction::DIV_INT_LIT16);
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
+ dalvik_opcode == Instruction::DIV_INT_LIT16);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
if (div) {
- int tReg = AllocTemp(cUnit);
+ int t_reg = AllocTemp(cu);
if (lit == 2) {
// Division by 2 is by far the most common division by constant.
- OpRegRegImm(cUnit, kOpLsr, tReg, rlSrc.lowReg, 32 - k);
- OpRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
- OpRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
+ OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, 32 - k);
+ OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
+ OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
} else {
- OpRegRegImm(cUnit, kOpAsr, tReg, rlSrc.lowReg, 31);
- OpRegRegImm(cUnit, kOpLsr, tReg, tReg, 32 - k);
- OpRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
- OpRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
+ OpRegRegImm(cu, kOpAsr, t_reg, rl_src.low_reg, 31);
+ OpRegRegImm(cu, kOpLsr, t_reg, t_reg, 32 - k);
+ OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
+ OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
}
} else {
- int tReg1 = AllocTemp(cUnit);
- int tReg2 = AllocTemp(cUnit);
+ int t_reg1 = AllocTemp(cu);
+ int t_reg2 = AllocTemp(cu);
if (lit == 2) {
- OpRegRegImm(cUnit, kOpLsr, tReg1, rlSrc.lowReg, 32 - k);
- OpRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
- OpRegRegImm(cUnit, kOpAnd, tReg2, tReg2, lit -1);
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
+ OpRegRegImm(cu, kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
+ OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit -1);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
} else {
- OpRegRegImm(cUnit, kOpAsr, tReg1, rlSrc.lowReg, 31);
- OpRegRegImm(cUnit, kOpLsr, tReg1, tReg1, 32 - k);
- OpRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
- OpRegRegImm(cUnit, kOpAnd, tReg2, tReg2, lit - 1);
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
+ OpRegRegImm(cu, kOpAsr, t_reg1, rl_src.low_reg, 31);
+ OpRegRegImm(cu, kOpLsr, t_reg1, t_reg1, 32 - k);
+ OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit - 1);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
}
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-// Returns true if it added instructions to 'cUnit' to multiply 'rlSrc' by 'lit'
-// and store the result in 'rlDest'.
-static bool HandleEasyMultiply(CompilationUnit* cUnit, RegLocation rlSrc,
- RegLocation rlDest, int lit)
+// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
+// and store the result in 'rl_dest'.
+static bool HandleEasyMultiply(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_dest, int lit)
{
// Can we simplify this multiplication?
- bool powerOfTwo = false;
- bool popCountLE2 = false;
- bool powerOfTwoMinusOne = false;
+ bool power_of_two = false;
+ bool pop_count_le2 = false;
+ bool power_of_two_minus_one = false;
if (lit < 2) {
// Avoid special cases.
return false;
} else if (IsPowerOfTwo(lit)) {
- powerOfTwo = true;
+ power_of_two = true;
} else if (IsPopCountLE2(lit)) {
- popCountLE2 = true;
+ pop_count_le2 = true;
} else if (IsPowerOfTwo(lit + 1)) {
- powerOfTwoMinusOne = true;
+ power_of_two_minus_one = true;
} else {
return false;
}
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- if (powerOfTwo) {
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (power_of_two) {
// Shift.
- OpRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlSrc.lowReg,
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_src.low_reg,
LowestSetBit(lit));
- } else if (popCountLE2) {
+ } else if (pop_count_le2) {
// Shift and add and shift.
- int firstBit = LowestSetBit(lit);
- int secondBit = LowestSetBit(lit ^ (1 << firstBit));
- GenMultiplyByTwoBitMultiplier(cUnit, rlSrc, rlResult, lit,
- firstBit, secondBit);
+ int first_bit = LowestSetBit(lit);
+ int second_bit = LowestSetBit(lit ^ (1 << first_bit));
+ GenMultiplyByTwoBitMultiplier(cu, rl_src, rl_result, lit,
+ first_bit, second_bit);
} else {
// Reverse subtract: (src << (shift + 1)) - src.
- DCHECK(powerOfTwoMinusOne);
+ DCHECK(power_of_two_minus_one);
// TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
- int tReg = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, LowestSetBit(lit + 1));
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+ int t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-bool GenArithOpIntLit(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc, int lit)
+bool GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src, int lit)
{
- RegLocation rlResult;
+ RegLocation rl_result;
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
- int shiftOp = false;
- bool isDiv = false;
+ int shift_op = false;
+ bool is_div = false;
switch (opcode) {
case Instruction::RSUB_INT_LIT8:
case Instruction::RSUB_INT: {
- int tReg;
+ int t_reg;
//TUNING: add support for use of Arm rsub op
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- tReg = AllocTemp(cUnit);
- LoadConstant(cUnit, tReg, lit);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, lit);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
return false;
break;
}
@@ -1921,7 +1921,7 @@
break;
case Instruction::MUL_INT_LIT8:
case Instruction::MUL_INT_LIT16: {
- if (HandleEasyMultiply(cUnit, rlSrc, rlDest, lit)) {
+ if (HandleEasyMultiply(cu, rl_src, rl_dest, lit)) {
return false;
}
op = kOpMul;
@@ -1942,19 +1942,19 @@
case Instruction::SHL_INT_LIT8:
case Instruction::SHL_INT:
lit &= 31;
- shiftOp = true;
+ shift_op = true;
op = kOpLsl;
break;
case Instruction::SHR_INT_LIT8:
case Instruction::SHR_INT:
lit &= 31;
- shiftOp = true;
+ shift_op = true;
op = kOpAsr;
break;
case Instruction::USHR_INT_LIT8:
case Instruction::USHR_INT:
lit &= 31;
- shiftOp = true;
+ shift_op = true;
op = kOpLsr;
break;
@@ -1963,318 +1963,318 @@
case Instruction::REM_INT_LIT8:
case Instruction::REM_INT_LIT16: {
if (lit == 0) {
- GenImmedCheck(cUnit, kCondAl, 0, 0, kThrowDivZero);
+ GenImmedCheck(cu, kCondAl, 0, 0, kThrowDivZero);
return false;
}
- if (HandleEasyDivide(cUnit, opcode, rlSrc, rlDest, lit)) {
+ if (HandleEasyDivide(cu, opcode, rl_src, rl_dest, lit)) {
return false;
}
if ((opcode == Instruction::DIV_INT_LIT8) ||
(opcode == Instruction::DIV_INT_LIT16)) {
- isDiv = true;
+ is_div = true;
} else {
- isDiv = false;
+ is_div = false;
}
- if (cUnit->instructionSet == kMips) {
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- rlResult = GenDivRemLit(cUnit, rlDest, rlSrc.lowReg, lit, isDiv);
+ if (cu->instruction_set == kMips) {
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = GenDivRemLit(cu, rl_dest, rl_src.low_reg, lit, is_div);
} else {
- FlushAllRegs(cUnit); /* Everything to home location */
- LoadValueDirectFixed(cUnit, rlSrc, TargetReg(kArg0));
- Clobber(cUnit, TargetReg(kArg0));
- int funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
- CallRuntimeHelperRegImm(cUnit, funcOffset, TargetReg(kArg0), lit, false);
- if (isDiv)
- rlResult = GetReturn(cUnit, false);
+ FlushAllRegs(cu); /* Everything to home location */
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0));
+ Clobber(cu, TargetReg(kArg0));
+ int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ CallRuntimeHelperRegImm(cu, func_offset, TargetReg(kArg0), lit, false);
+ if (is_div)
+ rl_result = GetReturn(cu, false);
else
- rlResult = GetReturnAlt(cUnit);
+ rl_result = GetReturnAlt(cu);
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return false;
break;
}
default:
return true;
}
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
// Avoid shifts by literal 0 - no support in Thumb. Change to copy
- if (shiftOp && (lit == 0)) {
- OpRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ if (shift_op && (lit == 0)) {
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
} else {
- OpRegRegImm(cUnit, op, rlResult.lowReg, rlSrc.lowReg, lit);
+ OpRegRegImm(cu, op, rl_result.low_reg, rl_src.low_reg, lit);
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-bool GenArithOpLong(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- RegLocation rlResult;
- OpKind firstOp = kOpBkpt;
- OpKind secondOp = kOpBkpt;
- bool callOut = false;
- bool checkZero = false;
- int funcOffset;
- int retReg = TargetReg(kRet0);
+ RegLocation rl_result;
+ OpKind first_op = kOpBkpt;
+ OpKind second_op = kOpBkpt;
+ bool call_out = false;
+ bool check_zero = false;
+ int func_offset;
+ int ret_reg = TargetReg(kRet0);
switch (opcode) {
case Instruction::NOT_LONG:
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
// Check for destructive overlap
- if (rlResult.lowReg == rlSrc2.highReg) {
- int tReg = AllocTemp(cUnit);
- OpRegCopy(cUnit, tReg, rlSrc2.highReg);
- OpRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
- OpRegReg(cUnit, kOpMvn, rlResult.highReg, tReg);
- FreeTemp(cUnit, tReg);
+ if (rl_result.low_reg == rl_src2.high_reg) {
+ int t_reg = AllocTemp(cu);
+ OpRegCopy(cu, t_reg, rl_src2.high_reg);
+ OpRegReg(cu, kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(cu, kOpMvn, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
} else {
- OpRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
- OpRegReg(cUnit, kOpMvn, rlResult.highReg, rlSrc2.highReg);
+ OpRegReg(cu, kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(cu, kOpMvn, rl_result.high_reg, rl_src2.high_reg);
}
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
break;
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
- if (cUnit->instructionSet != kThumb2) {
- return GenAddLong(cUnit, rlDest, rlSrc1, rlSrc2);
+ if (cu->instruction_set != kThumb2) {
+ return GenAddLong(cu, rl_dest, rl_src1, rl_src2);
}
- firstOp = kOpAdd;
- secondOp = kOpAdc;
+ first_op = kOpAdd;
+ second_op = kOpAdc;
break;
case Instruction::SUB_LONG:
case Instruction::SUB_LONG_2ADDR:
- if (cUnit->instructionSet != kThumb2) {
- return GenSubLong(cUnit, rlDest, rlSrc1, rlSrc2);
+ if (cu->instruction_set != kThumb2) {
+ return GenSubLong(cu, rl_dest, rl_src1, rl_src2);
}
- firstOp = kOpSub;
- secondOp = kOpSbc;
+ first_op = kOpSub;
+ second_op = kOpSbc;
break;
case Instruction::MUL_LONG:
case Instruction::MUL_LONG_2ADDR:
- callOut = true;
- retReg = TargetReg(kRet0);
- funcOffset = ENTRYPOINT_OFFSET(pLmul);
+ call_out = true;
+ ret_reg = TargetReg(kRet0);
+ func_offset = ENTRYPOINT_OFFSET(pLmul);
break;
case Instruction::DIV_LONG:
case Instruction::DIV_LONG_2ADDR:
- callOut = true;
- checkZero = true;
- retReg = TargetReg(kRet0);
- funcOffset = ENTRYPOINT_OFFSET(pLdiv);
+ call_out = true;
+ check_zero = true;
+ ret_reg = TargetReg(kRet0);
+ func_offset = ENTRYPOINT_OFFSET(pLdiv);
break;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
- callOut = true;
- checkZero = true;
- funcOffset = ENTRYPOINT_OFFSET(pLdivmod);
+ call_out = true;
+ check_zero = true;
+ func_offset = ENTRYPOINT_OFFSET(pLdivmod);
/* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
- retReg = (cUnit->instructionSet == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
+ ret_reg = (cu->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
break;
case Instruction::AND_LONG_2ADDR:
case Instruction::AND_LONG:
- if (cUnit->instructionSet == kX86) {
- return GenAndLong(cUnit, rlDest, rlSrc1, rlSrc2);
+ if (cu->instruction_set == kX86) {
+ return GenAndLong(cu, rl_dest, rl_src1, rl_src2);
}
- firstOp = kOpAnd;
- secondOp = kOpAnd;
+ first_op = kOpAnd;
+ second_op = kOpAnd;
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
- if (cUnit->instructionSet == kX86) {
- return GenOrLong(cUnit, rlDest, rlSrc1, rlSrc2);
+ if (cu->instruction_set == kX86) {
+ return GenOrLong(cu, rl_dest, rl_src1, rl_src2);
}
- firstOp = kOpOr;
- secondOp = kOpOr;
+ first_op = kOpOr;
+ second_op = kOpOr;
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
- if (cUnit->instructionSet == kX86) {
- return GenXorLong(cUnit, rlDest, rlSrc1, rlSrc2);
+ if (cu->instruction_set == kX86) {
+ return GenXorLong(cu, rl_dest, rl_src1, rl_src2);
}
- firstOp = kOpXor;
- secondOp = kOpXor;
+ first_op = kOpXor;
+ second_op = kOpXor;
break;
case Instruction::NEG_LONG: {
- return GenNegLong(cUnit, rlDest, rlSrc2);
+ return GenNegLong(cu, rl_dest, rl_src2);
}
default:
LOG(FATAL) << "Invalid long arith op";
}
- if (!callOut) {
- GenLong3Addr(cUnit, firstOp, secondOp, rlDest, rlSrc1, rlSrc2);
+ if (!call_out) {
+ GenLong3Addr(cu, first_op, second_op, rl_dest, rl_src1, rl_src2);
} else {
- FlushAllRegs(cUnit); /* Send everything to home location */
- if (checkZero) {
- LoadValueDirectWideFixed(cUnit, rlSrc2, TargetReg(kArg2), TargetReg(kArg3));
- int rTgt = CallHelperSetup(cUnit, funcOffset);
- GenDivZeroCheck(cUnit, TargetReg(kArg2), TargetReg(kArg3));
- LoadValueDirectWideFixed(cUnit, rlSrc1, TargetReg(kArg0), TargetReg(kArg1));
+ FlushAllRegs(cu); /* Send everything to home location */
+ if (check_zero) {
+ LoadValueDirectWideFixed(cu, rl_src2, TargetReg(kArg2), TargetReg(kArg3));
+ int r_tgt = CallHelperSetup(cu, func_offset);
+ GenDivZeroCheck(cu, TargetReg(kArg2), TargetReg(kArg3));
+ LoadValueDirectWideFixed(cu, rl_src1, TargetReg(kArg0), TargetReg(kArg1));
// NOTE: callout here is not a safepoint
- CallHelper(cUnit, rTgt, funcOffset, false /* not safepoint */);
+ CallHelper(cu, r_tgt, func_offset, false /* not safepoint */);
} else {
- CallRuntimeHelperRegLocationRegLocation(cUnit, funcOffset,
- rlSrc1, rlSrc2, false);
+ CallRuntimeHelperRegLocationRegLocation(cu, func_offset,
+ rl_src1, rl_src2, false);
}
// Adjust return regs in to handle case of rem returning kArg2/kArg3
- if (retReg == TargetReg(kRet0))
- rlResult = GetReturnWide(cUnit, false);
+ if (ret_reg == TargetReg(kRet0))
+ rl_result = GetReturnWide(cu, false);
else
- rlResult = GetReturnWideAlt(cUnit);
- StoreValueWide(cUnit, rlDest, rlResult);
+ rl_result = GetReturnWideAlt(cu);
+ StoreValueWide(cu, rl_dest, rl_result);
}
return false;
}
-bool GenConversionCall(CompilationUnit* cUnit, int funcOffset,
- RegLocation rlDest, RegLocation rlSrc)
+bool GenConversionCall(CompilationUnit* cu, int func_offset,
+ RegLocation rl_dest, RegLocation rl_src)
{
/*
* Don't optimize the register usage since it calls out to support
* functions
*/
- FlushAllRegs(cUnit); /* Send everything to home location */
- if (rlSrc.wide) {
- LoadValueDirectWideFixed(cUnit, rlSrc, rlSrc.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
- rlSrc.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ FlushAllRegs(cu); /* Send everything to home location */
+ if (rl_src.wide) {
+ LoadValueDirectWideFixed(cu, rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
+ rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
} else {
- LoadValueDirectFixed(cUnit, rlSrc, rlSrc.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+ LoadValueDirectFixed(cu, rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
}
- CallRuntimeHelperRegLocation(cUnit, funcOffset, rlSrc, false);
- if (rlDest.wide) {
- RegLocation rlResult;
- rlResult = GetReturnWide(cUnit, rlDest.fp);
- StoreValueWide(cUnit, rlDest, rlResult);
+ CallRuntimeHelperRegLocation(cu, func_offset, rl_src, false);
+ if (rl_dest.wide) {
+ RegLocation rl_result;
+ rl_result = GetReturnWide(cu, rl_dest.fp);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- RegLocation rlResult;
- rlResult = GetReturn(cUnit, rlDest.fp);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_result = GetReturn(cu, rl_dest.fp);
+ StoreValue(cu, rl_dest, rl_result);
}
return false;
}
-bool GenArithOpFloatPortable(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc1,
- RegLocation rlSrc2)
+bool GenArithOpFloatPortable(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
{
- RegLocation rlResult;
- int funcOffset;
+ RegLocation rl_result;
+ int func_offset;
switch (opcode) {
case Instruction::ADD_FLOAT_2ADDR:
case Instruction::ADD_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFadd);
+ func_offset = ENTRYPOINT_OFFSET(pFadd);
break;
case Instruction::SUB_FLOAT_2ADDR:
case Instruction::SUB_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFsub);
+ func_offset = ENTRYPOINT_OFFSET(pFsub);
break;
case Instruction::DIV_FLOAT_2ADDR:
case Instruction::DIV_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFdiv);
+ func_offset = ENTRYPOINT_OFFSET(pFdiv);
break;
case Instruction::MUL_FLOAT_2ADDR:
case Instruction::MUL_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFmul);
+ func_offset = ENTRYPOINT_OFFSET(pFmul);
break;
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFmodf);
+ func_offset = ENTRYPOINT_OFFSET(pFmodf);
break;
case Instruction::NEG_FLOAT: {
- GenNegFloat(cUnit, rlDest, rlSrc1);
+ GenNegFloat(cu, rl_dest, rl_src1);
return false;
}
default:
return true;
}
- FlushAllRegs(cUnit); /* Send everything to home location */
- CallRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlSrc2, false);
- rlResult = GetReturn(cUnit, true);
- StoreValue(cUnit, rlDest, rlResult);
+ FlushAllRegs(cu); /* Send everything to home location */
+ CallRuntimeHelperRegLocationRegLocation(cu, func_offset, rl_src1, rl_src2, false);
+ rl_result = GetReturn(cu, true);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-bool GenArithOpDoublePortable(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc1,
- RegLocation rlSrc2)
+bool GenArithOpDoublePortable(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
{
- RegLocation rlResult;
- int funcOffset;
+ RegLocation rl_result;
+ int func_offset;
switch (opcode) {
case Instruction::ADD_DOUBLE_2ADDR:
case Instruction::ADD_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDadd);
+ func_offset = ENTRYPOINT_OFFSET(pDadd);
break;
case Instruction::SUB_DOUBLE_2ADDR:
case Instruction::SUB_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDsub);
+ func_offset = ENTRYPOINT_OFFSET(pDsub);
break;
case Instruction::DIV_DOUBLE_2ADDR:
case Instruction::DIV_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDdiv);
+ func_offset = ENTRYPOINT_OFFSET(pDdiv);
break;
case Instruction::MUL_DOUBLE_2ADDR:
case Instruction::MUL_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDmul);
+ func_offset = ENTRYPOINT_OFFSET(pDmul);
break;
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pFmod);
+ func_offset = ENTRYPOINT_OFFSET(pFmod);
break;
case Instruction::NEG_DOUBLE: {
- GenNegDouble(cUnit, rlDest, rlSrc1);
+ GenNegDouble(cu, rl_dest, rl_src1);
return false;
}
default:
return true;
}
- FlushAllRegs(cUnit); /* Send everything to home location */
- CallRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlSrc2, false);
- rlResult = GetReturnWide(cUnit, true);
- StoreValueWide(cUnit, rlDest, rlResult);
+ FlushAllRegs(cu); /* Send everything to home location */
+ CallRuntimeHelperRegLocationRegLocation(cu, func_offset, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(cu, true);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenConversionPortable(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc)
+bool GenConversionPortable(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src)
{
switch (opcode) {
case Instruction::INT_TO_FLOAT:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pI2f),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pI2f),
+ rl_dest, rl_src);
case Instruction::FLOAT_TO_INT:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pF2iz),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2iz),
+ rl_dest, rl_src);
case Instruction::DOUBLE_TO_FLOAT:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pD2f),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2f),
+ rl_dest, rl_src);
case Instruction::FLOAT_TO_DOUBLE:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pF2d),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2d),
+ rl_dest, rl_src);
case Instruction::INT_TO_DOUBLE:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pI2d),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pI2d),
+ rl_dest, rl_src);
case Instruction::DOUBLE_TO_INT:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pD2iz),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2iz),
+ rl_dest, rl_src);
case Instruction::FLOAT_TO_LONG:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pF2l),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2l),
+ rl_dest, rl_src);
case Instruction::LONG_TO_FLOAT:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pL2f),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2f),
+ rl_dest, rl_src);
case Instruction::DOUBLE_TO_LONG:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pD2l),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2l),
+ rl_dest, rl_src);
case Instruction::LONG_TO_DOUBLE:
- return GenConversionCall(cUnit, ENTRYPOINT_OFFSET(pL2d),
- rlDest, rlSrc);
+ return GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2d),
+ rl_dest, rl_src);
default:
return true;
}
@@ -2282,34 +2282,34 @@
}
/* Check if we need to check for pending suspend request */
-void GenSuspendTest(CompilationUnit* cUnit, int optFlags)
+void GenSuspendTest(CompilationUnit* cu, int opt_flags)
{
- if (NO_SUSPEND || (optFlags & MIR_IGNORE_SUSPEND_CHECK)) {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
return;
}
- FlushAllRegs(cUnit);
- LIR* branch = OpTestSuspend(cUnit, NULL);
- LIR* retLab = NewLIR0(cUnit, kPseudoTargetLabel);
- LIR* target = RawLIR(cUnit, cUnit->currentDalvikOffset, kPseudoSuspendTarget,
- reinterpret_cast<uintptr_t>(retLab), cUnit->currentDalvikOffset);
+ FlushAllRegs(cu);
+ LIR* branch = OpTestSuspend(cu, NULL);
+ LIR* ret_lab = NewLIR0(cu, kPseudoTargetLabel);
+ LIR* target = RawLIR(cu, cu->current_dalvik_offset, kPseudoSuspendTarget,
+ reinterpret_cast<uintptr_t>(ret_lab), cu->current_dalvik_offset);
branch->target = target;
- InsertGrowableList(cUnit, &cUnit->suspendLaunchpads, reinterpret_cast<uintptr_t>(target));
+ InsertGrowableList(cu, &cu->suspend_launchpads, reinterpret_cast<uintptr_t>(target));
}
/* Check if we need to check for pending suspend request */
-void GenSuspendTestAndBranch(CompilationUnit* cUnit, int optFlags, LIR* target)
+void GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target)
{
- if (NO_SUSPEND || (optFlags & MIR_IGNORE_SUSPEND_CHECK)) {
- OpUnconditionalBranch(cUnit, target);
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ OpUnconditionalBranch(cu, target);
return;
}
- OpTestSuspend(cUnit, target);
- LIR* launchPad =
- RawLIR(cUnit, cUnit->currentDalvikOffset, kPseudoSuspendTarget,
- reinterpret_cast<uintptr_t>(target), cUnit->currentDalvikOffset);
- FlushAllRegs(cUnit);
- OpUnconditionalBranch(cUnit, launchPad);
- InsertGrowableList(cUnit, &cUnit->suspendLaunchpads, reinterpret_cast<uintptr_t>(launchPad));
+ OpTestSuspend(cu, target);
+ LIR* launch_pad =
+ RawLIR(cu, cu->current_dalvik_offset, kPseudoSuspendTarget,
+ reinterpret_cast<uintptr_t>(target), cu->current_dalvik_offset);
+ FlushAllRegs(cu);
+ OpUnconditionalBranch(cu, launch_pad);
+ InsertGrowableList(cu, &cu->suspend_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
}
} // namespace art
diff --git a/src/compiler/codegen/gen_common.h b/src/compiler/codegen/gen_common.h
index 3254756..413d828 100644
--- a/src/compiler/codegen/gen_common.h
+++ b/src/compiler/codegen/gen_common.h
@@ -17,64 +17,64 @@
#ifndef ART_SRC_COMPILER_CODEGEN_GENCOMMON_H_
#define ART_SRC_COMPILER_CODEGEN_GENCOMMON_H_
-void MarkSafepointPC(CompilationUnit* cUnit, LIR* inst);
-void CallRuntimeHelperImm(CompilationUnit* cUnit, int helperOffset, int arg0, bool safepointPC);
-void CallRuntimeHelperReg(CompilationUnit* cUnit, int helperOffset, int arg0, bool safepointPC);
-void CallRuntimeHelperRegLocation(CompilationUnit* cUnit, int helperOffset, RegLocation arg0, bool safepointPC);
-void CallRuntimeHelperImmImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1, bool safepointPC);
-void CallRuntimeHelperImmRegLocation(CompilationUnit* cUnit, int helperOffset, int arg0, RegLocation arg1, bool safepointPC);
-void CallRuntimeHelperRegLocationImm(CompilationUnit* cUnit, int helperOffset, RegLocation arg0, int arg1, bool safepointPC);
-void CallRuntimeHelperImmReg(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1, bool safepointPC);
-void CallRuntimeHelperRegImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1, bool safepointPC);
-void CallRuntimeHelperImmMethod(CompilationUnit* cUnit, int helperOffset, int arg0, bool safepointPC);
-void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cUnit, int helperOffset, RegLocation arg0, RegLocation arg1, bool safepointPC);
-void CallRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1, bool safepointPC);
-void CallRuntimeHelperRegRegImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1, int arg2, bool safepointPC);
-void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cUnit, int helperOffset, int arg0, RegLocation arg2, bool safepointPC);
-void CallRuntimeHelperImmMethodImm(CompilationUnit* cUnit, int helperOffset, int arg0, int arg2, bool safepointPC);
-void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cUnit, int helperOffset, int arg0, RegLocation arg1, RegLocation arg2, bool safepointPC);
-void GenBarrier(CompilationUnit* cUnit);
-LIR* OpUnconditionalBranch(CompilationUnit* cUnit, LIR* target);
-LIR* GenCheck(CompilationUnit* cUnit, ConditionCode cCode, ThrowKind kind);
-LIR* GenImmedCheck(CompilationUnit* cUnit, ConditionCode cCode, int reg, int immVal, ThrowKind kind);
-LIR* GenNullCheck(CompilationUnit* cUnit, int sReg, int mReg, int optFlags);
-LIR* GenRegRegCheck(CompilationUnit* cUnit, ConditionCode cCode, int reg1, int reg2, ThrowKind kind);
-void GenCompareAndBranch(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlSrc1, RegLocation rlSrc2, LIR* taken, LIR* fallThrough);
-void GenCompareZeroAndBranch(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlSrc, LIR* taken, LIR* fallThrough);
-void GenIntToLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
-void GenIntNarrowing(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc);
-void GenNewArray(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest, RegLocation rlSrc);
-void GenFilledNewArray(CompilationUnit* cUnit, CallInfo* info);
-void GenSput(CompilationUnit* cUnit, uint32_t fieldIdx, RegLocation rlSrc, bool isLongOrDouble, bool isObject);
-void GenSget(CompilationUnit* cUnit, uint32_t fieldIdx, RegLocation rlDest, bool isLongOrDouble, bool isObject);
-void GenShowTarget(CompilationUnit* cUnit);
-void HandleSuspendLaunchPads(CompilationUnit *cUnit);
-void HandleIntrinsicLaunchPads(CompilationUnit *cUnit);
-void HandleThrowLaunchPads(CompilationUnit *cUnit);
-void SetupResourceMasks(CompilationUnit* cUnit, LIR* lir);
-bool FastInstance(CompilationUnit* cUnit, uint32_t fieldIdx, int& fieldOffset, bool& isVolatile, bool isPut);
-void GenIGet(CompilationUnit* cUnit, uint32_t fieldIdx, int optFlags, OpSize size, RegLocation rlDest, RegLocation rlObj, bool isLongOrDouble, bool isObject);
-void GenIPut(CompilationUnit* cUnit, uint32_t fieldIdx, int optFlags, OpSize size, RegLocation rlSrc, RegLocation rlObj, bool isLongOrDouble, bool isObject);
-void GenConstClass(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest);
-void GenConstString(CompilationUnit* cUnit, uint32_t string_idx, RegLocation rlDest);
-void GenNewInstance(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest);
-void GenMoveException(CompilationUnit* cUnit, RegLocation rlDest);
-void GenThrow(CompilationUnit* cUnit, RegLocation rlSrc);
-void GenInstanceof(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest, RegLocation rlSrc);
-void GenCheckCast(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlSrc);
-void GenArrayObjPut(CompilationUnit* cUnit, int optFlags, RegLocation rlArray, RegLocation rlIndex, RegLocation rlSrc, int scale);
-void GenArrayGet(CompilationUnit* cUnit, int optFlags, OpSize size, RegLocation rlArray, RegLocation rlIndex, RegLocation rlDest, int scale);
-void GenArrayPut(CompilationUnit* cUnit, int optFlags, OpSize size, RegLocation rlArray, RegLocation rlIndex, RegLocation rlSrc, int scale);
-void GenLong3Addr(CompilationUnit* cUnit, OpKind firstOp, OpKind secondOp, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenShiftOpLong(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlShift);
-bool GenArithOpInt(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenArithOpIntLit(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc, int lit);
-bool GenArithOpLong(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenConversionCall(CompilationUnit* cUnit, int funcOffset, RegLocation rlDest, RegLocation rlSrc);
-bool GenArithOpFloatPortable(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenArithOpDoublePortable(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenConversionPortable(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc);
-void GenSuspendTest(CompilationUnit* cUnit, int optFlags);
-void GenSuspendTestAndBranch(CompilationUnit* cUnit, int optFlags, LIR* target);
+void MarkSafepointPC(CompilationUnit* cu, LIR* inst);
+void CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
+void CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
+void CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0, bool safepoint_pc);
+void CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
+void CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0, RegLocation arg1, bool safepoint_pc);
+void CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset, RegLocation arg0, int arg1, bool safepoint_pc);
+void CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
+void CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
+void CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
+void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0, RegLocation arg1, bool safepoint_pc);
+void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
+void CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1, int arg2, bool safepoint_pc);
+void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset, int arg0, RegLocation arg2, bool safepoint_pc);
+void CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0, int arg2, bool safepoint_pc);
+void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset, int arg0, RegLocation arg1, RegLocation arg2, bool safepoint_pc);
+void GenBarrier(CompilationUnit* cu);
+LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+LIR* GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind);
+LIR* GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val, ThrowKind kind);
+LIR* GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags);
+LIR* GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2, ThrowKind kind);
+void GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, LIR* fall_through);
+void GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src, LIR* taken, LIR* fall_through);
+void GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+void GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+void GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
+void GenFilledNewArray(CompilationUnit* cu, CallInfo* info);
+void GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src, bool is_long_or_double, bool is_object);
+void GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest, bool is_long_or_double, bool is_object);
+void GenShowTarget(CompilationUnit* cu);
+void HandleSuspendLaunchPads(CompilationUnit *cu);
+void HandleIntrinsicLaunchPads(CompilationUnit *cu);
+void HandleThrowLaunchPads(CompilationUnit *cu);
+void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
+bool FastInstance(CompilationUnit* cu, uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put);
+void GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+void GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+void GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
+void GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest);
+void GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
+void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
+void GenThrow(CompilationUnit* cu, RegLocation rl_src);
+void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
+void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src);
+void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src, int scale);
+void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index, RegLocation rl_dest, int scale);
+void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src, int scale);
+void GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
+bool GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, int lit);
+bool GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenConversionCall(CompilationUnit* cu, int func_offset, RegLocation rl_dest, RegLocation rl_src);
+bool GenArithOpFloatPortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenArithOpDoublePortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenConversionPortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+void GenSuspendTest(CompilationUnit* cu, int opt_flags);
+void GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target);
#endif // ART_SRC_COMPILER_CODEGEN_GENCOMMON_H_
diff --git a/src/compiler/codegen/gen_invoke.cc b/src/compiler/codegen/gen_invoke.cc
index bc55800..91d3db1 100644
--- a/src/compiler/codegen/gen_invoke.cc
+++ b/src/compiler/codegen/gen_invoke.cc
@@ -35,29 +35,29 @@
* ArgLocs is an array of location records describing the incoming arguments
* with one location record per word of argument.
*/
-void FlushIns(CompilationUnit* cUnit, RegLocation* ArgLocs, RegLocation rlMethod)
+void FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
{
/*
* Dummy up a RegLocation for the incoming Method*
* It will attempt to keep kArg0 live (or copy it to home location
* if promoted).
*/
- RegLocation rlSrc = rlMethod;
- rlSrc.location = kLocPhysReg;
- rlSrc.lowReg = TargetReg(kArg0);
- rlSrc.home = false;
- MarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
- StoreValue(cUnit, rlMethod, rlSrc);
+ RegLocation rl_src = rl_method;
+ rl_src.location = kLocPhysReg;
+ rl_src.low_reg = TargetReg(kArg0);
+ rl_src.home = false;
+ MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
+ StoreValue(cu, rl_method, rl_src);
// If Method* has been promoted, explicitly flush
- if (rlMethod.location == kLocPhysReg) {
- StoreWordDisp(cUnit, TargetReg(kSp), 0, TargetReg(kArg0));
+ if (rl_method.location == kLocPhysReg) {
+ StoreWordDisp(cu, TargetReg(kSp), 0, TargetReg(kArg0));
}
- if (cUnit->numIns == 0)
+ if (cu->num_ins == 0)
return;
- const int numArgRegs = 3;
- static SpecialTargetRegister argRegs[] = {kArg1, kArg2, kArg3};
- int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
+ const int num_arg_regs = 3;
+ static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
+ int start_vreg = cu->num_dalvik_registers - cu->num_ins;
/*
* Copy incoming arguments to their proper home locations.
* NOTE: an older version of dx had an issue in which
@@ -70,41 +70,41 @@
* end up half-promoted. In those cases, we must flush the promoted
* half to memory as well.
*/
- for (int i = 0; i < cUnit->numIns; i++) {
- PromotionMap* vMap = &cUnit->promotionMap[startVReg + i];
- if (i < numArgRegs) {
+ for (int i = 0; i < cu->num_ins; i++) {
+ PromotionMap* v_map = &cu->promotion_map[start_vreg + i];
+ if (i < num_arg_regs) {
// If arriving in register
- bool needFlush = true;
- RegLocation* tLoc = &ArgLocs[i];
- if ((vMap->coreLocation == kLocPhysReg) && !tLoc->fp) {
- OpRegCopy(cUnit, vMap->coreReg, TargetReg(argRegs[i]));
- needFlush = false;
- } else if ((vMap->fpLocation == kLocPhysReg) && tLoc->fp) {
- OpRegCopy(cUnit, vMap->FpReg, TargetReg(argRegs[i]));
- needFlush = false;
+ bool need_flush = true;
+ RegLocation* t_loc = &ArgLocs[i];
+ if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
+ OpRegCopy(cu, v_map->core_reg, TargetReg(arg_regs[i]));
+ need_flush = false;
+ } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
+ OpRegCopy(cu, v_map->FpReg, TargetReg(arg_regs[i]));
+ need_flush = false;
} else {
- needFlush = true;
+ need_flush = true;
}
// For wide args, force flush if only half is promoted
- if (tLoc->wide) {
- PromotionMap* pMap = vMap + (tLoc->highWord ? -1 : +1);
- needFlush |= (pMap->coreLocation != vMap->coreLocation) ||
- (pMap->fpLocation != vMap->fpLocation);
+ if (t_loc->wide) {
+ PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
+ need_flush |= (p_map->core_location != v_map->core_location) ||
+ (p_map->fp_location != v_map->fp_location);
}
- if (needFlush) {
- StoreBaseDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, startVReg + i),
- TargetReg(argRegs[i]), kWord);
+ if (need_flush) {
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+ TargetReg(arg_regs[i]), kWord);
}
} else {
// If arriving in frame & promoted
- if (vMap->coreLocation == kLocPhysReg) {
- LoadWordDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, startVReg + i),
- vMap->coreReg);
+ if (v_map->core_location == kLocPhysReg) {
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+ v_map->core_reg);
}
- if (vMap->fpLocation == kLocPhysReg) {
- LoadWordDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, startVReg + i),
- vMap->FpReg);
+ if (v_map->fp_location == kLocPhysReg) {
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+ v_map->FpReg);
}
}
}
@@ -114,42 +114,42 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int NextSDCallInsn(CompilationUnit* cUnit, CallInfo* info,
- int state, uint32_t dexIdx, uint32_t unused,
- uintptr_t directCode, uintptr_t directMethod,
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+ int state, uint32_t dex_idx, uint32_t unused,
+ uintptr_t direct_code, uintptr_t direct_method,
InvokeType type)
{
- if (cUnit->instructionSet != kThumb2) {
+ if (cu->instruction_set != kThumb2) {
// Disable sharpening
- directCode = 0;
- directMethod = 0;
+ direct_code = 0;
+ direct_method = 0;
}
- if (directCode != 0 && directMethod != 0) {
+ if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
- if (directCode != static_cast<unsigned int>(-1)) {
- LoadConstant(cUnit, TargetReg(kInvokeTgt), directCode);
+ if (direct_code != static_cast<unsigned int>(-1)) {
+ LoadConstant(cu, TargetReg(kInvokeTgt), direct_code);
} else {
- LIR* dataTarget = ScanLiteralPool(cUnit->codeLiteralList, dexIdx, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->codeLiteralList, dexIdx);
- dataTarget->operands[1] = type;
+ LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
+ data_target->operands[1] = type;
}
- LIR* loadPcRel = OpPcRelLoad(cUnit, TargetReg(kInvokeTgt), dataTarget);
- AppendLIR(cUnit, loadPcRel);
- DCHECK_EQ(cUnit->instructionSet, kThumb2) << reinterpret_cast<void*>(dataTarget);
+ LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kInvokeTgt), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
}
- if (directMethod != static_cast<unsigned int>(-1)) {
- LoadConstant(cUnit, TargetReg(kArg0), directMethod);
+ if (direct_method != static_cast<unsigned int>(-1)) {
+ LoadConstant(cu, TargetReg(kArg0), direct_method);
} else {
- LIR* dataTarget = ScanLiteralPool(cUnit->methodLiteralList, dexIdx, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->methodLiteralList, dexIdx);
- dataTarget->operands[1] = type;
+ LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
+ data_target->operands[1] = type;
}
- LIR* loadPcRel = OpPcRelLoad(cUnit, TargetReg(kArg0), dataTarget);
- AppendLIR(cUnit, loadPcRel);
- DCHECK_EQ(cUnit->instructionSet, kThumb2) << reinterpret_cast<void*>(dataTarget);
+ LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kArg0), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
}
break;
default:
@@ -159,35 +159,35 @@
switch (state) {
case 0: // Get the current Method* [sets kArg0]
// TUNING: we can save a reg copy if Method* has been promoted.
- LoadCurrMethodDirect(cUnit, TargetReg(kArg0));
+ LoadCurrMethodDirect(cu, TargetReg(kArg0));
break;
case 1: // Get method->dex_cache_resolved_methods_
- LoadWordDisp(cUnit, TargetReg(kArg0),
+ LoadWordDisp(cu, TargetReg(kArg0),
AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), TargetReg(kArg0));
// Set up direct code if known.
- if (directCode != 0) {
- if (directCode != static_cast<unsigned int>(-1)) {
- LoadConstant(cUnit, TargetReg(kInvokeTgt), directCode);
+ if (direct_code != 0) {
+ if (direct_code != static_cast<unsigned int>(-1)) {
+ LoadConstant(cu, TargetReg(kInvokeTgt), direct_code);
} else {
- LIR* dataTarget = ScanLiteralPool(cUnit->codeLiteralList, dexIdx, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->codeLiteralList, dexIdx);
- dataTarget->operands[1] = type;
+ LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
+ data_target->operands[1] = type;
}
- LIR* loadPcRel = OpPcRelLoad(cUnit, TargetReg(kInvokeTgt), dataTarget);
- AppendLIR(cUnit, loadPcRel);
- DCHECK_EQ(cUnit->instructionSet, kThumb2) << reinterpret_cast<void*>(dataTarget);
+ LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kInvokeTgt), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
}
}
break;
case 2: // Grab target method*
- LoadWordDisp(cUnit, TargetReg(kArg0),
- Array::DataOffset(sizeof(Object*)).Int32Value() + dexIdx * 4, TargetReg(kArg0));
+ LoadWordDisp(cu, TargetReg(kArg0),
+ Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4, TargetReg(kArg0));
break;
case 3: // Grab the code from the method*
- if (cUnit->instructionSet != kX86) {
- if (directCode == 0) {
- LoadWordDisp(cUnit, TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
+ if (cu->instruction_set != kX86) {
+ if (direct_code == 0) {
+ LoadWordDisp(cu, TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
TargetReg(kInvokeTgt));
}
break;
@@ -207,8 +207,8 @@
* Note also that we'll load the first argument ("this") into
* kArg1 here rather than the standard LoadArgRegs.
*/
-static int NextVCallInsn(CompilationUnit* cUnit, CallInfo* info,
- int state, uint32_t dexIdx, uint32_t methodIdx,
+static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
+ int state, uint32_t dex_idx, uint32_t method_idx,
uintptr_t unused, uintptr_t unused2, InvokeType unused3)
{
/*
@@ -217,27 +217,27 @@
*/
switch (state) {
case 0: { // Get "this" [set kArg1]
- RegLocation rlArg = info->args[0];
- LoadValueDirectFixed(cUnit, rlArg, TargetReg(kArg1));
+ RegLocation rl_arg = info->args[0];
+ LoadValueDirectFixed(cu, rl_arg, TargetReg(kArg1));
break;
}
case 1: // Is "this" null? [use kArg1]
- GenNullCheck(cUnit, info->args[0].sRegLow, TargetReg(kArg1), info->optFlags);
+ GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
- LoadWordDisp(cUnit, TargetReg(kArg1), Object::ClassOffset().Int32Value(),
+ LoadWordDisp(cu, TargetReg(kArg1), Object::ClassOffset().Int32Value(),
TargetReg(kInvokeTgt));
break;
case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
- LoadWordDisp(cUnit, TargetReg(kInvokeTgt), Class::VTableOffset().Int32Value(),
+ LoadWordDisp(cu, TargetReg(kInvokeTgt), Class::VTableOffset().Int32Value(),
TargetReg(kInvokeTgt));
break;
case 3: // Get target method [use kInvokeTgt, set kArg0]
- LoadWordDisp(cUnit, TargetReg(kInvokeTgt), (methodIdx * 4) +
+ LoadWordDisp(cu, TargetReg(kInvokeTgt), (method_idx * 4) +
Array::DataOffset(sizeof(Object*)).Int32Value(), TargetReg(kArg0));
break;
case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
- if (cUnit->instructionSet != kX86) {
- LoadWordDisp(cUnit, TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
+ if (cu->instruction_set != kX86) {
+ LoadWordDisp(cu, TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
TargetReg(kInvokeTgt));
break;
}
@@ -252,35 +252,35 @@
* All invoke-interface calls bounce off of art_invoke_interface_trampoline,
* which will locate the target and continue on via a tail call.
*/
-static int NextInterfaceCallInsn(CompilationUnit* cUnit, CallInfo* info, int state,
- uint32_t dexIdx, uint32_t unused, uintptr_t unused2,
- uintptr_t directMethod, InvokeType unused4)
+static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t unused, uintptr_t unused2,
+ uintptr_t direct_method, InvokeType unused4)
{
- if (cUnit->instructionSet != kThumb2) {
+ if (cu->instruction_set != kThumb2) {
// Disable sharpening
- directMethod = 0;
+ direct_method = 0;
}
- int trampoline = (cUnit->instructionSet == kX86) ? 0
+ int trampoline = (cu->instruction_set == kX86) ? 0
: ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
- if (directMethod != 0) {
+ if (direct_method != 0) {
switch (state) {
case 0: // Load the trampoline target [sets kInvokeTgt].
- if (cUnit->instructionSet != kX86) {
- LoadWordDisp(cUnit, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
+ if (cu->instruction_set != kX86) {
+ LoadWordDisp(cu, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
}
// Get the interface Method* [sets kArg0]
- if (directMethod != static_cast<unsigned int>(-1)) {
- LoadConstant(cUnit, TargetReg(kArg0), directMethod);
+ if (direct_method != static_cast<unsigned int>(-1)) {
+ LoadConstant(cu, TargetReg(kArg0), direct_method);
} else {
- LIR* dataTarget = ScanLiteralPool(cUnit->methodLiteralList, dexIdx, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->methodLiteralList, dexIdx);
- dataTarget->operands[1] = kInterface;
+ LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
+ data_target->operands[1] = kInterface;
}
- LIR* loadPcRel = OpPcRelLoad(cUnit, TargetReg(kArg0), dataTarget);
- AppendLIR(cUnit, loadPcRel);
- DCHECK_EQ(cUnit->instructionSet, kThumb2) << reinterpret_cast<void*>(dataTarget);
+ LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kArg0), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
}
break;
default:
@@ -290,20 +290,20 @@
switch (state) {
case 0:
// Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
- LoadCurrMethodDirect(cUnit, TargetReg(kArg0));
+ LoadCurrMethodDirect(cu, TargetReg(kArg0));
// Load the trampoline target [sets kInvokeTgt].
- if (cUnit->instructionSet != kX86) {
- LoadWordDisp(cUnit, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
+ if (cu->instruction_set != kX86) {
+ LoadWordDisp(cu, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
}
break;
case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0]
- LoadWordDisp(cUnit, TargetReg(kArg0),
+ LoadWordDisp(cu, TargetReg(kArg0),
AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
TargetReg(kArg0));
break;
case 2: // Grab target method* [set/use kArg0]
- LoadWordDisp(cUnit, TargetReg(kArg0),
- Array::DataOffset(sizeof(Object*)).Int32Value() + dexIdx * 4,
+ LoadWordDisp(cu, TargetReg(kArg0),
+ Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4,
TargetReg(kArg0));
break;
default:
@@ -313,95 +313,95 @@
return state + 1;
}
-static int NextInvokeInsnSP(CompilationUnit* cUnit, CallInfo* info, int trampoline,
- int state, uint32_t dexIdx, uint32_t methodIdx)
+static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
+ int state, uint32_t dex_idx, uint32_t method_idx)
{
/*
* This handles the case in which the base method is not fully
* resolved at compile time, we bail to a runtime helper.
*/
if (state == 0) {
- if (cUnit->instructionSet != kX86) {
+ if (cu->instruction_set != kX86) {
// Load trampoline target
- LoadWordDisp(cUnit, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
+ LoadWordDisp(cu, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
}
// Load kArg0 with method index
- LoadConstant(cUnit, TargetReg(kArg0), dexIdx);
+ LoadConstant(cu, TargetReg(kArg0), dex_idx);
return 1;
}
return -1;
}
-static int NextStaticCallInsnSP(CompilationUnit* cUnit, CallInfo* info,
- int state, uint32_t dexIdx, uint32_t methodIdx,
+static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
+ int state, uint32_t dex_idx, uint32_t method_idx,
uintptr_t unused, uintptr_t unused2,
InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cUnit, info, trampoline, state, dexIdx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
}
-static int NextDirectCallInsnSP(CompilationUnit* cUnit, CallInfo* info, int state,
- uint32_t dexIdx, uint32_t methodIdx, uintptr_t unused,
+static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cUnit, info, trampoline, state, dexIdx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
}
-static int NextSuperCallInsnSP(CompilationUnit* cUnit, CallInfo* info, int state,
- uint32_t dexIdx, uint32_t methodIdx, uintptr_t unused,
+static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cUnit, info, trampoline, state, dexIdx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
}
-static int NextVCallInsnSP(CompilationUnit* cUnit, CallInfo* info, int state,
- uint32_t dexIdx, uint32_t methodIdx, uintptr_t unused,
+static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cUnit, info, trampoline, state, dexIdx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
}
-static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cUnit,
+static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
- uint32_t dexIdx, uint32_t unused,
+ uint32_t dex_idx, uint32_t unused,
uintptr_t unused2, uintptr_t unused3,
InvokeType unused4)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cUnit, info, trampoline, state, dexIdx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
}
-static int LoadArgRegs(CompilationUnit* cUnit, CallInfo* info, int callState,
- NextCallInsn nextCallInsn, uint32_t dexIdx,
- uint32_t methodIdx, uintptr_t directCode,
- uintptr_t directMethod, InvokeType type, bool skipThis)
+static int LoadArgRegs(CompilationUnit* cu, CallInfo* info, int call_state,
+ NextCallInsn next_call_insn, uint32_t dex_idx,
+ uint32_t method_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this)
{
- int lastArgReg = TargetReg(kArg3);
- int nextReg = TargetReg(kArg1);
- int nextArg = 0;
- if (skipThis) {
- nextReg++;
- nextArg++;
+ int last_arg_reg = TargetReg(kArg3);
+ int next_reg = TargetReg(kArg1);
+ int next_arg = 0;
+ if (skip_this) {
+ next_reg++;
+ next_arg++;
}
- for (; (nextReg <= lastArgReg) && (nextArg < info->numArgWords); nextReg++) {
- RegLocation rlArg = info->args[nextArg++];
- rlArg = UpdateRawLoc(cUnit, rlArg);
- if (rlArg.wide && (nextReg <= TargetReg(kArg2))) {
- LoadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
- nextReg++;
- nextArg++;
+ for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
+ RegLocation rl_arg = info->args[next_arg++];
+ rl_arg = UpdateRawLoc(cu, rl_arg);
+ if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
+ LoadValueDirectWideFixed(cu, rl_arg, next_reg, next_reg + 1);
+ next_reg++;
+ next_arg++;
} else {
- rlArg.wide = false;
- LoadValueDirectFixed(cUnit, rlArg, nextReg);
+ rl_arg.wide = false;
+ LoadValueDirectFixed(cu, rl_arg, next_reg);
}
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
}
- return callState;
+ return call_state;
}
/*
@@ -411,90 +411,90 @@
* the target method pointer. Note, this may also be called
* for "range" variants if the number of arguments is 5 or fewer.
*/
-int GenDalvikArgsNoRange(CompilationUnit* cUnit, CallInfo* info,
- int callState,
- LIR** pcrLabel, NextCallInsn nextCallInsn,
- uint32_t dexIdx, uint32_t methodIdx,
- uintptr_t directCode, uintptr_t directMethod,
- InvokeType type, bool skipThis)
+int GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info,
+ int call_state,
+ LIR** pcrLabel, NextCallInsn next_call_insn,
+ uint32_t dex_idx, uint32_t method_idx,
+ uintptr_t direct_code, uintptr_t direct_method,
+ InvokeType type, bool skip_this)
{
- RegLocation rlArg;
+ RegLocation rl_arg;
/* If no arguments, just return */
- if (info->numArgWords == 0)
- return callState;
+ if (info->num_arg_words == 0)
+ return call_state;
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
- DCHECK_LE(info->numArgWords, 5);
- if (info->numArgWords > 3) {
- int32_t nextUse = 3;
+ DCHECK_LE(info->num_arg_words, 5);
+ if (info->num_arg_words > 3) {
+ int32_t next_use = 3;
//Detect special case of wide arg spanning arg3/arg4
- RegLocation rlUse0 = info->args[0];
- RegLocation rlUse1 = info->args[1];
- RegLocation rlUse2 = info->args[2];
- if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
- rlUse2.wide) {
+ RegLocation rl_use0 = info->args[0];
+ RegLocation rl_use1 = info->args[1];
+ RegLocation rl_use2 = info->args[2];
+ if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
+ rl_use2.wide) {
int reg = -1;
// Wide spans, we need the 2nd half of uses[2].
- rlArg = UpdateLocWide(cUnit, rlUse2);
- if (rlArg.location == kLocPhysReg) {
- reg = rlArg.highReg;
+ rl_arg = UpdateLocWide(cu, rl_use2);
+ if (rl_arg.location == kLocPhysReg) {
+ reg = rl_arg.high_reg;
} else {
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
- LoadWordDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
- callState = nextCallInsn(cUnit, info, callState, dexIdx,
- methodIdx, directCode, directMethod, type);
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_arg.s_reg_low) + 4, reg);
+ call_state = next_call_insn(cu, info, call_state, dex_idx,
+ method_idx, direct_code, direct_method, type);
}
- StoreBaseDisp(cUnit, TargetReg(kSp), (nextUse + 1) * 4, reg, kWord);
- StoreBaseDisp(cUnit, TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- nextUse++;
+ StoreBaseDisp(cu, TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
+ StoreBaseDisp(cu, TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ next_use++;
}
// Loop through the rest
- while (nextUse < info->numArgWords) {
- int lowReg;
- int highReg = -1;
- rlArg = info->args[nextUse];
- rlArg = UpdateRawLoc(cUnit, rlArg);
- if (rlArg.location == kLocPhysReg) {
- lowReg = rlArg.lowReg;
- highReg = rlArg.highReg;
+ while (next_use < info->num_arg_words) {
+ int low_reg;
+ int high_reg = -1;
+ rl_arg = info->args[next_use];
+ rl_arg = UpdateRawLoc(cu, rl_arg);
+ if (rl_arg.location == kLocPhysReg) {
+ low_reg = rl_arg.low_reg;
+ high_reg = rl_arg.high_reg;
} else {
- lowReg = TargetReg(kArg2);
- if (rlArg.wide) {
- highReg = TargetReg(kArg3);
- LoadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
+ low_reg = TargetReg(kArg2);
+ if (rl_arg.wide) {
+ high_reg = TargetReg(kArg3);
+ LoadValueDirectWideFixed(cu, rl_arg, low_reg, high_reg);
} else {
- LoadValueDirectFixed(cUnit, rlArg, lowReg);
+ LoadValueDirectFixed(cu, rl_arg, low_reg);
}
- callState = nextCallInsn(cUnit, info, callState, dexIdx,
- methodIdx, directCode, directMethod, type);
+ call_state = next_call_insn(cu, info, call_state, dex_idx,
+ method_idx, direct_code, direct_method, type);
}
- int outsOffset = (nextUse + 1) * 4;
- if (rlArg.wide) {
- StoreBaseDispWide(cUnit, TargetReg(kSp), outsOffset, lowReg, highReg);
- nextUse += 2;
+ int outs_offset = (next_use + 1) * 4;
+ if (rl_arg.wide) {
+ StoreBaseDispWide(cu, TargetReg(kSp), outs_offset, low_reg, high_reg);
+ next_use += 2;
} else {
- StoreWordDisp(cUnit, TargetReg(kSp), outsOffset, lowReg);
- nextUse++;
+ StoreWordDisp(cu, TargetReg(kSp), outs_offset, low_reg);
+ next_use++;
}
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
}
}
- callState = LoadArgRegs(cUnit, info, callState, nextCallInsn,
- dexIdx, methodIdx, directCode, directMethod,
- type, skipThis);
+ call_state = LoadArgRegs(cu, info, call_state, next_call_insn,
+ dex_idx, method_idx, direct_code, direct_method,
+ type, skip_this);
if (pcrLabel) {
- *pcrLabel = GenNullCheck(cUnit, info->args[0].sRegLow, TargetReg(kArg1), info->optFlags);
+ *pcrLabel = GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
}
- return callState;
+ return call_state;
}
/*
@@ -512,401 +512,401 @@
* Pass arg0, arg1 & arg2 in kArg1-kArg3
*
*/
-int GenDalvikArgsRange(CompilationUnit* cUnit, CallInfo* info, int callState,
- LIR** pcrLabel, NextCallInsn nextCallInsn,
- uint32_t dexIdx, uint32_t methodIdx,
- uintptr_t directCode, uintptr_t directMethod,
- InvokeType type, bool skipThis)
+int GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state,
+ LIR** pcrLabel, NextCallInsn next_call_insn,
+ uint32_t dex_idx, uint32_t method_idx,
+ uintptr_t direct_code, uintptr_t direct_method,
+ InvokeType type, bool skip_this)
{
// If we can treat it as non-range (Jumbo ops will use range form)
- if (info->numArgWords <= 5)
- return GenDalvikArgsNoRange(cUnit, info, callState, pcrLabel,
- nextCallInsn, dexIdx, methodIdx,
- directCode, directMethod, type, skipThis);
+ if (info->num_arg_words <= 5)
+ return GenDalvikArgsNoRange(cu, info, call_state, pcrLabel,
+ next_call_insn, dex_idx, method_idx,
+ direct_code, direct_method, type, skip_this);
/*
* First load the non-register arguments. Both forms expect all
* of the source arguments to be in their home frame location, so
- * scan the sReg names and flush any that have been promoted to
+ * scan the s_reg names and flush any that have been promoted to
* frame backing storage.
*/
- // Scan the rest of the args - if in physReg flush to memory
- for (int nextArg = 0; nextArg < info->numArgWords;) {
- RegLocation loc = info->args[nextArg];
+ // Scan the rest of the args - if in phys_reg flush to memory
+ for (int next_arg = 0; next_arg < info->num_arg_words;) {
+ RegLocation loc = info->args[next_arg];
if (loc.wide) {
- loc = UpdateLocWide(cUnit, loc);
- if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
- StoreBaseDispWide(cUnit, TargetReg(kSp), SRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, loc.highReg);
+ loc = UpdateLocWide(cu, loc);
+ if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
+ StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+ loc.low_reg, loc.high_reg);
}
- nextArg += 2;
+ next_arg += 2;
} else {
- loc = UpdateLoc(cUnit, loc);
- if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
- StoreBaseDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, kWord);
+ loc = UpdateLoc(cu, loc);
+ if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+ loc.low_reg, kWord);
}
- nextArg++;
+ next_arg++;
}
}
- int startOffset = SRegOffset(cUnit, info->args[3].sRegLow);
- int outsOffset = 4 /* Method* */ + (3 * 4);
- if (cUnit->instructionSet != kThumb2) {
+ int start_offset = SRegOffset(cu, info->args[3].s_reg_low);
+ int outs_offset = 4 /* Method* */ + (3 * 4);
+ if (cu->instruction_set != kThumb2) {
// Generate memcpy
- OpRegRegImm(cUnit, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outsOffset);
- OpRegRegImm(cUnit, kOpAdd, TargetReg(kArg1), TargetReg(kSp), startOffset);
- CallRuntimeHelperRegRegImm(cUnit, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
- TargetReg(kArg1), (info->numArgWords - 3) * 4, false);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+ CallRuntimeHelperRegRegImm(cu, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
- if (info->numArgWords >= 20) {
+ if (info->num_arg_words >= 20) {
// Generate memcpy
- OpRegRegImm(cUnit, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outsOffset);
- OpRegRegImm(cUnit, kOpAdd, TargetReg(kArg1), TargetReg(kSp), startOffset);
- CallRuntimeHelperRegRegImm(cUnit, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
- TargetReg(kArg1), (info->numArgWords - 3) * 4, false);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+ CallRuntimeHelperRegRegImm(cu, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
// Use vldm/vstm pair using kArg3 as a temp
- int regsLeft = std::min(info->numArgWords - 3, 16);
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- OpRegRegImm(cUnit, kOpAdd, TargetReg(kArg3), TargetReg(kSp), startOffset);
- LIR* ld = OpVldm(cUnit, TargetReg(kArg3), regsLeft);
+ int regs_left = std::min(info->num_arg_words - 3, 16);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
+ LIR* ld = OpVldm(cu, TargetReg(kArg3), regs_left);
//TUNING: loosen barrier
- ld->defMask = ENCODE_ALL;
- SetMemRefType(ld, true /* isLoad */, kDalvikReg);
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- OpRegRegImm(cUnit, kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- LIR* st = OpVstm(cUnit, TargetReg(kArg3), regsLeft);
- SetMemRefType(st, false /* isLoad */, kDalvikReg);
- st->defMask = ENCODE_ALL;
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
+ ld->def_mask = ENCODE_ALL;
+ SetMemRefType(ld, true /* is_load */, kDalvikReg);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ LIR* st = OpVstm(cu, TargetReg(kArg3), regs_left);
+ SetMemRefType(st, false /* is_load */, kDalvikReg);
+ st->def_mask = ENCODE_ALL;
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
}
}
- callState = LoadArgRegs(cUnit, info, callState, nextCallInsn,
- dexIdx, methodIdx, directCode, directMethod,
- type, skipThis);
+ call_state = LoadArgRegs(cu, info, call_state, next_call_insn,
+ dex_idx, method_idx, direct_code, direct_method,
+ type, skip_this);
- callState = nextCallInsn(cUnit, info, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
if (pcrLabel) {
- *pcrLabel = GenNullCheck(cUnit, info->args[0].sRegLow, TargetReg(kArg1),
- info->optFlags);
+ *pcrLabel = GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1),
+ info->opt_flags);
}
- return callState;
+ return call_state;
}
-RegLocation InlineTarget(CompilationUnit* cUnit, CallInfo* info)
+RegLocation InlineTarget(CompilationUnit* cu, CallInfo* info)
{
RegLocation res;
if (info->result.location == kLocInvalid) {
- res = GetReturn(cUnit, false);
+ res = GetReturn(cu, false);
} else {
res = info->result;
}
return res;
}
-RegLocation InlineTargetWide(CompilationUnit* cUnit, CallInfo* info)
+RegLocation InlineTargetWide(CompilationUnit* cu, CallInfo* info)
{
RegLocation res;
if (info->result.location == kLocInvalid) {
- res = GetReturnWide(cUnit, false);
+ res = GetReturnWide(cu, false);
} else {
res = info->result;
}
return res;
}
-bool GenInlinedCharAt(CompilationUnit* cUnit, CallInfo* info)
+bool GenInlinedCharAt(CompilationUnit* cu, CallInfo* info)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
// Location of reference to data array
- int valueOffset = String::ValueOffset().Int32Value();
+ int value_offset = String::ValueOffset().Int32Value();
// Location of count
- int countOffset = String::CountOffset().Int32Value();
+ int count_offset = String::CountOffset().Int32Value();
// Starting offset within data array
- int offsetOffset = String::OffsetOffset().Int32Value();
+ int offset_offset = String::OffsetOffset().Int32Value();
// Start of char data with array_
- int dataOffset = Array::DataOffset(sizeof(uint16_t)).Int32Value();
+ int data_offset = Array::DataOffset(sizeof(uint16_t)).Int32Value();
- RegLocation rlObj = info->args[0];
- RegLocation rlIdx = info->args[1];
- rlObj = LoadValue(cUnit, rlObj, kCoreReg);
- rlIdx = LoadValue(cUnit, rlIdx, kCoreReg);
- int regMax;
- GenNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, info->optFlags);
- bool rangeCheck = (!(info->optFlags & MIR_IGNORE_RANGE_CHECK));
- LIR* launchPad = NULL;
- int regOff = INVALID_REG;
- int regPtr = INVALID_REG;
- if (cUnit->instructionSet != kX86) {
- regOff = AllocTemp(cUnit);
- regPtr = AllocTemp(cUnit);
- if (rangeCheck) {
- regMax = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rlObj.lowReg, countOffset, regMax);
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_idx = info->args[1];
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ rl_idx = LoadValue(cu, rl_idx, kCoreReg);
+ int reg_max;
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+ bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
+ LIR* launch_pad = NULL;
+ int reg_off = INVALID_REG;
+ int reg_ptr = INVALID_REG;
+ if (cu->instruction_set != kX86) {
+ reg_off = AllocTemp(cu);
+ reg_ptr = AllocTemp(cu);
+ if (range_check) {
+ reg_max = AllocTemp(cu);
+ LoadWordDisp(cu, rl_obj.low_reg, count_offset, reg_max);
}
- LoadWordDisp(cUnit, rlObj.lowReg, offsetOffset, regOff);
- LoadWordDisp(cUnit, rlObj.lowReg, valueOffset, regPtr);
- if (rangeCheck) {
+ LoadWordDisp(cu, rl_obj.low_reg, offset_offset, reg_off);
+ LoadWordDisp(cu, rl_obj.low_reg, value_offset, reg_ptr);
+ if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
- launchPad = RawLIR(cUnit, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
- reinterpret_cast<uintptr_t>(launchPad));
- OpRegReg(cUnit, kOpCmp, rlIdx.lowReg, regMax);
- FreeTemp(cUnit, regMax);
- OpCondBranch(cUnit, kCondCs, launchPad);
+ launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads,
+ reinterpret_cast<uintptr_t>(launch_pad));
+ OpRegReg(cu, kOpCmp, rl_idx.low_reg, reg_max);
+ FreeTemp(cu, reg_max);
+ OpCondBranch(cu, kCondCs, launch_pad);
}
} else {
- if (rangeCheck) {
- regMax = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rlObj.lowReg, countOffset, regMax);
+ if (range_check) {
+ reg_max = AllocTemp(cu);
+ LoadWordDisp(cu, rl_obj.low_reg, count_offset, reg_max);
// Set up a launch pad to allow retry in case of bounds violation */
- launchPad = RawLIR(cUnit, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
- reinterpret_cast<uintptr_t>(launchPad));
- OpRegReg(cUnit, kOpCmp, rlIdx.lowReg, regMax);
- FreeTemp(cUnit, regMax);
- OpCondBranch(cUnit, kCondCc, launchPad);
+ launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads,
+ reinterpret_cast<uintptr_t>(launch_pad));
+ OpRegReg(cu, kOpCmp, rl_idx.low_reg, reg_max);
+ FreeTemp(cu, reg_max);
+ OpCondBranch(cu, kCondCc, launch_pad);
}
- regOff = AllocTemp(cUnit);
- regPtr = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rlObj.lowReg, offsetOffset, regOff);
- LoadWordDisp(cUnit, rlObj.lowReg, valueOffset, regPtr);
+ reg_off = AllocTemp(cu);
+ reg_ptr = AllocTemp(cu);
+ LoadWordDisp(cu, rl_obj.low_reg, offset_offset, reg_off);
+ LoadWordDisp(cu, rl_obj.low_reg, value_offset, reg_ptr);
}
- OpRegImm(cUnit, kOpAdd, regPtr, dataOffset);
- OpRegReg(cUnit, kOpAdd, regOff, rlIdx.lowReg);
- FreeTemp(cUnit, rlObj.lowReg);
- FreeTemp(cUnit, rlIdx.lowReg);
- RegLocation rlDest = InlineTarget(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadBaseIndexed(cUnit, regPtr, regOff, rlResult.lowReg, 1, kUnsignedHalf);
- FreeTemp(cUnit, regOff);
- FreeTemp(cUnit, regPtr);
- StoreValue(cUnit, rlDest, rlResult);
- if (rangeCheck) {
- launchPad->operands[2] = 0; // no resumption
+ OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
+ OpRegReg(cu, kOpAdd, reg_off, rl_idx.low_reg);
+ FreeTemp(cu, rl_obj.low_reg);
+ FreeTemp(cu, rl_idx.low_reg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadBaseIndexed(cu, reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
+ FreeTemp(cu, reg_off);
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
+ if (range_check) {
+ launch_pad->operands[2] = 0; // no resumption
}
// Record that we've already inlined & null checked
- info->optFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
return true;
}
-// Generates an inlined String.isEmpty or String.length.
-bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cUnit, CallInfo* info,
- bool isEmpty)
+// Generates an inlined String.is_empty or String.length.
+bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info,
+ bool is_empty)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
// dst = src.length();
- RegLocation rlObj = info->args[0];
- rlObj = LoadValue(cUnit, rlObj, kCoreReg);
- RegLocation rlDest = InlineTarget(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- GenNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, info->optFlags);
- LoadWordDisp(cUnit, rlObj.lowReg, String::CountOffset().Int32Value(),
- rlResult.lowReg);
- if (isEmpty) {
+ RegLocation rl_obj = info->args[0];
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+ LoadWordDisp(cu, rl_obj.low_reg, String::CountOffset().Int32Value(),
+ rl_result.low_reg);
+ if (is_empty) {
// dst = (dst == 0);
- if (cUnit->instructionSet == kThumb2) {
- int tReg = AllocTemp(cUnit);
- OpRegReg(cUnit, kOpNeg, tReg, rlResult.lowReg);
- OpRegRegReg(cUnit, kOpAdc, rlResult.lowReg, rlResult.lowReg, tReg);
+ if (cu->instruction_set == kThumb2) {
+ int t_reg = AllocTemp(cu);
+ OpRegReg(cu, kOpNeg, t_reg, rl_result.low_reg);
+ OpRegRegReg(cu, kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
} else {
- DCHECK_EQ(cUnit->instructionSet, kX86);
- OpRegImm(cUnit, kOpSub, rlResult.lowReg, 1);
- OpRegImm(cUnit, kOpLsr, rlResult.lowReg, 31);
+ DCHECK_EQ(cu->instruction_set, kX86);
+ OpRegImm(cu, kOpSub, rl_result.low_reg, 1);
+ OpRegImm(cu, kOpLsr, rl_result.low_reg, 31);
}
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-bool GenInlinedAbsInt(CompilationUnit *cUnit, CallInfo* info)
+bool GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
- RegLocation rlSrc = info->args[0];
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlDest = InlineTarget(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int signReg = AllocTemp(cUnit);
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int sign_reg = AllocTemp(cu);
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(cUnit, kOpAsr, signReg, rlSrc.lowReg, 31);
- OpRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
- OpRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
- StoreValue(cUnit, rlDest, rlResult);
+ OpRegRegImm(cu, kOpAsr, sign_reg, rl_src.low_reg, 31);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-bool GenInlinedAbsLong(CompilationUnit *cUnit, CallInfo* info)
+bool GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
- if (cUnit->instructionSet == kThumb2) {
- RegLocation rlSrc = info->args[0];
- rlSrc = LoadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlDest = InlineTargetWide(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int signReg = AllocTemp(cUnit);
+ if (cu->instruction_set == kThumb2) {
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTargetWide(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int sign_reg = AllocTemp(cu);
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(cUnit, kOpAsr, signReg, rlSrc.highReg, 31);
- OpRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
- OpRegRegReg(cUnit, kOpAdc, rlResult.highReg, rlSrc.highReg, signReg);
- OpRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
- OpRegReg(cUnit, kOpXor, rlResult.highReg, signReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ OpRegRegImm(cu, kOpAsr, sign_reg, rl_src.high_reg, 31);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+ OpRegRegReg(cu, kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.high_reg, sign_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
return true;
} else {
- DCHECK_EQ(cUnit->instructionSet, kX86);
+ DCHECK_EQ(cu->instruction_set, kX86);
// Reuse source registers to avoid running out of temps
- RegLocation rlSrc = info->args[0];
- rlSrc = LoadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlDest = InlineTargetWide(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegCopyWide(cUnit, rlResult.lowReg, rlResult.highReg, rlSrc.lowReg, rlSrc.highReg);
- FreeTemp(cUnit, rlSrc.lowReg);
- FreeTemp(cUnit, rlSrc.highReg);
- int signReg = AllocTemp(cUnit);
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTargetWide(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegCopyWide(cu, rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
+ FreeTemp(cu, rl_src.low_reg);
+ FreeTemp(cu, rl_src.high_reg);
+ int sign_reg = AllocTemp(cu);
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(cUnit, kOpAsr, signReg, rlResult.highReg, 31);
- OpRegReg(cUnit, kOpAdd, rlResult.lowReg, signReg);
- OpRegReg(cUnit, kOpAdc, rlResult.highReg, signReg);
- OpRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
- OpRegReg(cUnit, kOpXor, rlResult.highReg, signReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ OpRegRegImm(cu, kOpAsr, sign_reg, rl_result.high_reg, 31);
+ OpRegReg(cu, kOpAdd, rl_result.low_reg, sign_reg);
+ OpRegReg(cu, kOpAdc, rl_result.high_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.high_reg, sign_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
return true;
}
}
-bool GenInlinedFloatCvt(CompilationUnit *cUnit, CallInfo* info)
+bool GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
- RegLocation rlSrc = info->args[0];
- RegLocation rlDest = InlineTarget(cUnit, info);
- StoreValue(cUnit, rlDest, rlSrc);
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTarget(cu, info);
+ StoreValue(cu, rl_dest, rl_src);
return true;
}
-bool GenInlinedDoubleCvt(CompilationUnit *cUnit, CallInfo* info)
+bool GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
- RegLocation rlSrc = info->args[0];
- RegLocation rlDest = InlineTargetWide(cUnit, info);
- StoreValueWide(cUnit, rlDest, rlSrc);
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(cu, info);
+ StoreValueWide(cu, rl_dest, rl_src);
return true;
}
/*
- * Fast string.indexOf(I) & (II). Tests for simple case of char <= 0xffff,
+ * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
* otherwise bails to standard library code.
*/
-bool GenInlinedIndexOf(CompilationUnit* cUnit, CallInfo* info,
- bool zeroBased)
+bool GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info,
+ bool zero_based)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
- ClobberCalleeSave(cUnit);
- LockCallTemps(cUnit); // Using fixed registers
- int regPtr = TargetReg(kArg0);
- int regChar = TargetReg(kArg1);
- int regStart = TargetReg(kArg2);
+ ClobberCalleeSave(cu);
+ LockCallTemps(cu); // Using fixed registers
+ int reg_ptr = TargetReg(kArg0);
+ int reg_char = TargetReg(kArg1);
+ int reg_start = TargetReg(kArg2);
- RegLocation rlObj = info->args[0];
- RegLocation rlChar = info->args[1];
- RegLocation rlStart = info->args[2];
- LoadValueDirectFixed(cUnit, rlObj, regPtr);
- LoadValueDirectFixed(cUnit, rlChar, regChar);
- if (zeroBased) {
- LoadConstant(cUnit, regStart, 0);
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_char = info->args[1];
+ RegLocation rl_start = info->args[2];
+ LoadValueDirectFixed(cu, rl_obj, reg_ptr);
+ LoadValueDirectFixed(cu, rl_char, reg_char);
+ if (zero_based) {
+ LoadConstant(cu, reg_start, 0);
} else {
- LoadValueDirectFixed(cUnit, rlStart, regStart);
+ LoadValueDirectFixed(cu, rl_start, reg_start);
}
- int rTgt = (cUnit->instructionSet != kX86) ? LoadHelper(cUnit, ENTRYPOINT_OFFSET(pIndexOf)) : 0;
- GenNullCheck(cUnit, rlObj.sRegLow, regPtr, info->optFlags);
- LIR* launchPad = RawLIR(cUnit, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads, reinterpret_cast<uintptr_t>(launchPad));
- OpCmpImmBranch(cUnit, kCondGt, regChar, 0xFFFF, launchPad);
+ int r_tgt = (cu->instruction_set != kX86) ? LoadHelper(cu, ENTRYPOINT_OFFSET(pIndexOf)) : 0;
+ GenNullCheck(cu, rl_obj.s_reg_low, reg_ptr, info->opt_flags);
+ LIR* launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
+ OpCmpImmBranch(cu, kCondGt, reg_char, 0xFFFF, launch_pad);
// NOTE: not a safepoint
- if (cUnit->instructionSet != kX86) {
- OpReg(cUnit, kOpBlx, rTgt);
+ if (cu->instruction_set != kX86) {
+ OpReg(cu, kOpBlx, r_tgt);
} else {
- OpThreadMem(cUnit, kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
+ OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
}
- LIR* resumeTgt = NewLIR0(cUnit, kPseudoTargetLabel);
- launchPad->operands[2] = reinterpret_cast<uintptr_t>(resumeTgt);
+ LIR* resume_tgt = NewLIR0(cu, kPseudoTargetLabel);
+ launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
// Record that we've already inlined & null checked
- info->optFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
- RegLocation rlReturn = GetReturn(cUnit, false);
- RegLocation rlDest = InlineTarget(cUnit, info);
- StoreValue(cUnit, rlDest, rlReturn);
+ info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ RegLocation rl_return = GetReturn(cu, false);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ StoreValue(cu, rl_dest, rl_return);
return true;
}
/* Fast string.compareTo(Ljava/lang/string;)I. */
-bool GenInlinedStringCompareTo(CompilationUnit* cUnit, CallInfo* info)
+bool GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info)
{
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
- ClobberCalleeSave(cUnit);
- LockCallTemps(cUnit); // Using fixed registers
- int regThis = TargetReg(kArg0);
- int regCmp = TargetReg(kArg1);
+ ClobberCalleeSave(cu);
+ LockCallTemps(cu); // Using fixed registers
+ int reg_this = TargetReg(kArg0);
+ int reg_cmp = TargetReg(kArg1);
- RegLocation rlThis = info->args[0];
- RegLocation rlCmp = info->args[1];
- LoadValueDirectFixed(cUnit, rlThis, regThis);
- LoadValueDirectFixed(cUnit, rlCmp, regCmp);
- int rTgt = (cUnit->instructionSet != kX86) ?
- LoadHelper(cUnit, ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
- GenNullCheck(cUnit, rlThis.sRegLow, regThis, info->optFlags);
- //TUNING: check if rlCmp.sRegLow is already null checked
- LIR* launchPad = RawLIR(cUnit, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads, reinterpret_cast<uintptr_t>(launchPad));
- OpCmpImmBranch(cUnit, kCondEq, regCmp, 0, launchPad);
+ RegLocation rl_this = info->args[0];
+ RegLocation rl_cmp = info->args[1];
+ LoadValueDirectFixed(cu, rl_this, reg_this);
+ LoadValueDirectFixed(cu, rl_cmp, reg_cmp);
+ int r_tgt = (cu->instruction_set != kX86) ?
+ LoadHelper(cu, ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+ GenNullCheck(cu, rl_this.s_reg_low, reg_this, info->opt_flags);
+ //TUNING: check if rl_cmp.s_reg_low is already null checked
+ LIR* launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
+ OpCmpImmBranch(cu, kCondEq, reg_cmp, 0, launch_pad);
// NOTE: not a safepoint
- if (cUnit->instructionSet != kX86) {
- OpReg(cUnit, kOpBlx, rTgt);
+ if (cu->instruction_set != kX86) {
+ OpReg(cu, kOpBlx, r_tgt);
} else {
- OpThreadMem(cUnit, kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
+ OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
}
- launchPad->operands[2] = 0; // No return possible
+ launch_pad->operands[2] = 0; // No return possible
// Record that we've already inlined & null checked
- info->optFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
- RegLocation rlReturn = GetReturn(cUnit, false);
- RegLocation rlDest = InlineTarget(cUnit, info);
- StoreValue(cUnit, rlDest, rlReturn);
+ info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ RegLocation rl_return = GetReturn(cu, false);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ StoreValue(cu, rl_dest, rl_return);
return true;
}
-bool GenIntrinsic(CompilationUnit* cUnit, CallInfo* info)
+bool GenIntrinsic(CompilationUnit* cu, CallInfo* info)
{
- if (info->optFlags & MIR_INLINED) {
+ if (info->opt_flags & MIR_INLINED) {
return false;
}
/*
@@ -919,155 +919,155 @@
* method. By doing this during basic block construction, we can also
* take advantage of/generate new useful dataflow info.
*/
- std::string tgtMethod(PrettyMethod(info->index, *cUnit->dex_file));
- if (tgtMethod.find(" java.lang") != std::string::npos) {
- if (tgtMethod == "long java.lang.Double.doubleToRawLongBits(double)") {
- return GenInlinedDoubleCvt(cUnit, info);
+ std::string tgt_method(PrettyMethod(info->index, *cu->dex_file));
+ if (tgt_method.find(" java.lang") != std::string::npos) {
+ if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
+ return GenInlinedDoubleCvt(cu, info);
}
- if (tgtMethod == "double java.lang.Double.longBitsToDouble(long)") {
- return GenInlinedDoubleCvt(cUnit, info);
+ if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
+ return GenInlinedDoubleCvt(cu, info);
}
- if (tgtMethod == "int java.lang.Float.floatToRawIntBits(float)") {
- return GenInlinedFloatCvt(cUnit, info);
+ if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
+ return GenInlinedFloatCvt(cu, info);
}
- if (tgtMethod == "float java.lang.Float.intBitsToFloat(int)") {
- return GenInlinedFloatCvt(cUnit, info);
+ if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
+ return GenInlinedFloatCvt(cu, info);
}
- if (tgtMethod == "int java.lang.Math.abs(int)" ||
- tgtMethod == "int java.lang.StrictMath.abs(int)") {
- return GenInlinedAbsInt(cUnit, info);
+ if (tgt_method == "int java.lang.Math.abs(int)" ||
+ tgt_method == "int java.lang.StrictMath.abs(int)") {
+ return GenInlinedAbsInt(cu, info);
}
- if (tgtMethod == "long java.lang.Math.abs(long)" ||
- tgtMethod == "long java.lang.StrictMath.abs(long)") {
- return GenInlinedAbsLong(cUnit, info);
+ if (tgt_method == "long java.lang.Math.abs(long)" ||
+ tgt_method == "long java.lang.StrictMath.abs(long)") {
+ return GenInlinedAbsLong(cu, info);
}
- if (tgtMethod == "int java.lang.Math.max(int, int)" ||
- tgtMethod == "int java.lang.StrictMath.max(int, int)") {
- return GenInlinedMinMaxInt(cUnit, info, false /* isMin */);
+ if (tgt_method == "int java.lang.Math.max(int, int)" ||
+ tgt_method == "int java.lang.StrictMath.max(int, int)") {
+ return GenInlinedMinMaxInt(cu, info, false /* is_min */);
}
- if (tgtMethod == "int java.lang.Math.min(int, int)" ||
- tgtMethod == "int java.lang.StrictMath.min(int, int)") {
- return GenInlinedMinMaxInt(cUnit, info, true /* isMin */);
+ if (tgt_method == "int java.lang.Math.min(int, int)" ||
+ tgt_method == "int java.lang.StrictMath.min(int, int)") {
+ return GenInlinedMinMaxInt(cu, info, true /* is_min */);
}
- if (tgtMethod == "double java.lang.Math.sqrt(double)" ||
- tgtMethod == "double java.lang.StrictMath.sqrt(double)") {
- return GenInlinedSqrt(cUnit, info);
+ if (tgt_method == "double java.lang.Math.sqrt(double)" ||
+ tgt_method == "double java.lang.StrictMath.sqrt(double)") {
+ return GenInlinedSqrt(cu, info);
}
- if (tgtMethod == "char java.lang.String.charAt(int)") {
- return GenInlinedCharAt(cUnit, info);
+ if (tgt_method == "char java.lang.String.charAt(int)") {
+ return GenInlinedCharAt(cu, info);
}
- if (tgtMethod == "int java.lang.String.compareTo(java.lang.String)") {
- return GenInlinedStringCompareTo(cUnit, info);
+ if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
+ return GenInlinedStringCompareTo(cu, info);
}
- if (tgtMethod == "boolean java.lang.String.isEmpty()") {
- return GenInlinedStringIsEmptyOrLength(cUnit, info, true /* isEmpty */);
+ if (tgt_method == "boolean java.lang.String.is_empty()") {
+ return GenInlinedStringIsEmptyOrLength(cu, info, true /* is_empty */);
}
- if (tgtMethod == "int java.lang.String.indexOf(int, int)") {
- return GenInlinedIndexOf(cUnit, info, false /* base 0 */);
+ if (tgt_method == "int java.lang.String.index_of(int, int)") {
+ return GenInlinedIndexOf(cu, info, false /* base 0 */);
}
- if (tgtMethod == "int java.lang.String.indexOf(int)") {
- return GenInlinedIndexOf(cUnit, info, true /* base 0 */);
+ if (tgt_method == "int java.lang.String.index_of(int)") {
+ return GenInlinedIndexOf(cu, info, true /* base 0 */);
}
- if (tgtMethod == "int java.lang.String.length()") {
- return GenInlinedStringIsEmptyOrLength(cUnit, info, false /* isEmpty */);
+ if (tgt_method == "int java.lang.String.length()") {
+ return GenInlinedStringIsEmptyOrLength(cu, info, false /* is_empty */);
}
- } else if (tgtMethod.find("boolean sun.misc.Unsafe.compareAndSwap") != std::string::npos) {
- if (tgtMethod == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
- return GenInlinedCas32(cUnit, info, false);
+ } else if (tgt_method.find("boolean sun.misc.Unsafe.compareAndSwap") != std::string::npos) {
+ if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
+ return GenInlinedCas32(cu, info, false);
}
- if (tgtMethod == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
- return GenInlinedCas32(cUnit, info, true);
+ if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
+ return GenInlinedCas32(cu, info, true);
}
}
return false;
}
-void GenInvoke(CompilationUnit* cUnit, CallInfo* info)
+void GenInvoke(CompilationUnit* cu, CallInfo* info)
{
- if (GenIntrinsic(cUnit, info)) {
+ if (GenIntrinsic(cu, info)) {
return;
}
- InvokeType originalType = info->type; // avoiding mutation by ComputeInvokeInfo
- int callState = 0;
- LIR* nullCk;
- LIR** pNullCk = NULL;
- NextCallInsn nextCallInsn;
- FlushAllRegs(cUnit); /* Everything to home location */
+ InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo
+ int call_state = 0;
+ LIR* null_ck;
+ LIR** p_null_ck = NULL;
+ NextCallInsn next_call_insn;
+ FlushAllRegs(cu); /* Everything to home location */
// Explicit register usage
- LockCallTemps(cUnit);
+ LockCallTemps(cu);
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file,
+ cu->code_item, cu->method_idx,
+ cu->access_flags);
- uint32_t dexMethodIdx = info->index;
- int vtableIdx;
- uintptr_t directCode;
- uintptr_t directMethod;
- bool skipThis;
- bool fastPath =
- cUnit->compiler->ComputeInvokeInfo(dexMethodIdx, &mUnit, info->type,
- vtableIdx, directCode,
- directMethod)
+ uint32_t dex_method_idx = info->index;
+ int vtable_idx;
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ bool skip_this;
+ bool fast_path =
+ cu->compiler->ComputeInvokeInfo(dex_method_idx, &m_unit, info->type,
+ vtable_idx, direct_code,
+ direct_method)
&& !SLOW_INVOKE_PATH;
if (info->type == kInterface) {
- if (fastPath) {
- pNullCk = &nullCk;
+ if (fast_path) {
+ p_null_ck = &null_ck;
}
- nextCallInsn = fastPath ? NextInterfaceCallInsn
+ next_call_insn = fast_path ? NextInterfaceCallInsn
: NextInterfaceCallInsnWithAccessCheck;
- skipThis = false;
+ skip_this = false;
} else if (info->type == kDirect) {
- if (fastPath) {
- pNullCk = &nullCk;
+ if (fast_path) {
+ p_null_ck = &null_ck;
}
- nextCallInsn = fastPath ? NextSDCallInsn : NextDirectCallInsnSP;
- skipThis = false;
+ next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
+ skip_this = false;
} else if (info->type == kStatic) {
- nextCallInsn = fastPath ? NextSDCallInsn : NextStaticCallInsnSP;
- skipThis = false;
+ next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
+ skip_this = false;
} else if (info->type == kSuper) {
- DCHECK(!fastPath); // Fast path is a direct call.
- nextCallInsn = NextSuperCallInsnSP;
- skipThis = false;
+ DCHECK(!fast_path); // Fast path is a direct call.
+ next_call_insn = NextSuperCallInsnSP;
+ skip_this = false;
} else {
DCHECK_EQ(info->type, kVirtual);
- nextCallInsn = fastPath ? NextVCallInsn : NextVCallInsnSP;
- skipThis = fastPath;
+ next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
+ skip_this = fast_path;
}
- if (!info->isRange) {
- callState = GenDalvikArgsNoRange(cUnit, info, callState, pNullCk,
- nextCallInsn, dexMethodIdx,
- vtableIdx, directCode, directMethod,
- originalType, skipThis);
+ if (!info->is_range) {
+ call_state = GenDalvikArgsNoRange(cu, info, call_state, p_null_ck,
+ next_call_insn, dex_method_idx,
+ vtable_idx, direct_code, direct_method,
+ original_type, skip_this);
} else {
- callState = GenDalvikArgsRange(cUnit, info, callState, pNullCk,
- nextCallInsn, dexMethodIdx, vtableIdx,
- directCode, directMethod, originalType,
- skipThis);
+ call_state = GenDalvikArgsRange(cu, info, call_state, p_null_ck,
+ next_call_insn, dex_method_idx, vtable_idx,
+ direct_code, direct_method, original_type,
+ skip_this);
}
// Finish up any of the call sequence not interleaved in arg loading
- while (callState >= 0) {
- callState = nextCallInsn(cUnit, info, callState, dexMethodIdx,
- vtableIdx, directCode, directMethod,
- originalType);
+ while (call_state >= 0) {
+ call_state = next_call_insn(cu, info, call_state, dex_method_idx,
+ vtable_idx, direct_code, direct_method,
+ original_type);
}
- if (cUnit->enableDebug & (1 << kDebugDisplayMissingTargets)) {
- GenShowTarget(cUnit);
+ if (cu->enable_debug & (1 << kDebugDisplayMissingTargets)) {
+ GenShowTarget(cu);
}
- LIR* callInst;
- if (cUnit->instructionSet != kX86) {
- callInst = OpReg(cUnit, kOpBlx, TargetReg(kInvokeTgt));
+ LIR* call_inst;
+ if (cu->instruction_set != kX86) {
+ call_inst = OpReg(cu, kOpBlx, TargetReg(kInvokeTgt));
} else {
- if (fastPath && info->type != kInterface) {
- callInst = OpMem(cUnit, kOpBlx, TargetReg(kArg0),
+ if (fast_path && info->type != kInterface) {
+ call_inst = OpMem(cu, kOpBlx, TargetReg(kArg0),
AbstractMethod::GetCodeOffset().Int32Value());
} else {
int trampoline = 0;
switch (info->type) {
case kInterface:
- trampoline = fastPath ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
+ trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
: ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
break;
case kDirect:
@@ -1085,20 +1085,20 @@
default:
LOG(FATAL) << "Unexpected invoke type";
}
- callInst = OpThreadMem(cUnit, kOpBlx, trampoline);
+ call_inst = OpThreadMem(cu, kOpBlx, trampoline);
}
}
- MarkSafepointPC(cUnit, callInst);
+ MarkSafepointPC(cu, call_inst);
- ClobberCalleeSave(cUnit);
+ ClobberCalleeSave(cu);
if (info->result.location != kLocInvalid) {
// We have a following MOVE_RESULT - do it now.
if (info->result.wide) {
- RegLocation retLoc = GetReturnWide(cUnit, info->result.fp);
- StoreValueWide(cUnit, info->result, retLoc);
+ RegLocation ret_loc = GetReturnWide(cu, info->result.fp);
+ StoreValueWide(cu, info->result, ret_loc);
} else {
- RegLocation retLoc = GetReturn(cUnit, info->result.fp);
- StoreValue(cUnit, info->result, retLoc);
+ RegLocation ret_loc = GetReturn(cu, info->result.fp);
+ StoreValue(cu, info->result, ret_loc);
}
}
}
@@ -1109,26 +1109,26 @@
* high-word loc for wide arguments. Also pull up any following
* MOVE_RESULT and incorporate it into the invoke.
*/
-CallInfo* NewMemCallInfo(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- InvokeType type, bool isRange)
+CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ InvokeType type, bool is_range)
{
- CallInfo* info = static_cast<CallInfo*>(NewMem(cUnit, sizeof(CallInfo), true, kAllocMisc));
- MIR* moveResultMIR = FindMoveResult(cUnit, bb, mir);
- if (moveResultMIR == NULL) {
+ CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
+ MIR* move_result_mir = FindMoveResult(cu, bb, mir);
+ if (move_result_mir == NULL) {
info->result.location = kLocInvalid;
} else {
- info->result = GetRawDest(cUnit, moveResultMIR);
- moveResultMIR->dalvikInsn.opcode = Instruction::NOP;
+ info->result = GetRawDest(cu, move_result_mir);
+ move_result_mir->dalvikInsn.opcode = Instruction::NOP;
}
- info->numArgWords = mir->ssaRep->numUses;
- info->args = (info->numArgWords == 0) ? NULL : static_cast<RegLocation*>
- (NewMem(cUnit, sizeof(RegLocation) * info->numArgWords, false, kAllocMisc));
- for (int i = 0; i < info->numArgWords; i++) {
- info->args[i] = GetRawSrc(cUnit, mir, i);
+ info->num_arg_words = mir->ssa_rep->num_uses;
+ info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
+ (NewMem(cu, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
+ for (int i = 0; i < info->num_arg_words; i++) {
+ info->args[i] = GetRawSrc(cu, mir, i);
}
- info->optFlags = mir->optimizationFlags;
+ info->opt_flags = mir->optimization_flags;
info->type = type;
- info->isRange = isRange;
+ info->is_range = is_range;
info->index = mir->dalvikInsn.vB;
info->offset = mir->offset;
return info;
diff --git a/src/compiler/codegen/gen_invoke.h b/src/compiler/codegen/gen_invoke.h
index a20746d..c2d32fe 100644
--- a/src/compiler/codegen/gen_invoke.h
+++ b/src/compiler/codegen/gen_invoke.h
@@ -17,25 +17,25 @@
#ifndef ART_SRC_COMPILER_CODEGEN_GENINVOKE_H_
#define ART_SRC_COMPILER_CODEGEN_GENINVOKE_H_
-typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dexIdx,
- uint32_t methodIdx, uintptr_t directCode,
- uintptr_t directMethod, InvokeType type);
+typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dex_idx,
+ uint32_t method_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type);
-void FlushIns(CompilationUnit* cUnit, RegLocation* ArgLocs, RegLocation rlMethod);
-int GenDalvikArgsNoRange(CompilationUnit* cUnit, CallInfo* info, int callState, LIR** pcrLabel, NextCallInsn nextCallInsn, uint32_t dexIdx, uint32_t methodIdx, uintptr_t directCode, uintptr_t directMethod, InvokeType type, bool skipThis);
-int GenDalvikArgsRange(CompilationUnit* cUnit, CallInfo* info, int callState, LIR** pcrLabel, NextCallInsn nextCallInsn, uint32_t dexIdx, uint32_t methodIdx, uintptr_t directCode, uintptr_t directMethod, InvokeType type, bool skipThis);
-RegLocation InlineTarget(CompilationUnit* cUnit, CallInfo* info);
-RegLocation InlineTargetWide(CompilationUnit* cUnit, CallInfo* info);
-bool GenInlinedCharAt(CompilationUnit* cUnit, CallInfo* info);
-bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cUnit, CallInfo* info, bool isEmpty);
-bool GenInlinedAbsInt(CompilationUnit *cUnit, CallInfo* info);
-bool GenInlinedAbsLong(CompilationUnit *cUnit, CallInfo* info);
-bool GenInlinedFloatCvt(CompilationUnit *cUnit, CallInfo* info);
-bool GenInlinedDoubleCvt(CompilationUnit *cUnit, CallInfo* info);
-bool GenInlinedIndexOf(CompilationUnit* cUnit, CallInfo* info, bool zeroBased);
-bool GenInlinedStringCompareTo(CompilationUnit* cUnit, CallInfo* info);
-bool GenIntrinsic(CompilationUnit* cUnit, CallInfo* info);
-void GenInvoke(CompilationUnit* cUnit, CallInfo* info);
-CallInfo* NewMemCallInfo(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir, InvokeType type, bool isRange);
+void FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method);
+int GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this);
+int GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this);
+RegLocation InlineTarget(CompilationUnit* cu, CallInfo* info);
+RegLocation InlineTargetWide(CompilationUnit* cu, CallInfo* info);
+bool GenInlinedCharAt(CompilationUnit* cu, CallInfo* info);
+bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty);
+bool GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info);
+bool GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info);
+bool GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info);
+bool GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info);
+bool GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based);
+bool GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info);
+bool GenIntrinsic(CompilationUnit* cu, CallInfo* info);
+void GenInvoke(CompilationUnit* cu, CallInfo* info);
+CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
#endif // ART_SRC_COMPILER_CODEGEN_GENINVOKE_H_
diff --git a/src/compiler/codegen/gen_loadstore.cc b/src/compiler/codegen/gen_loadstore.cc
index 7262c13..438a16b 100644
--- a/src/compiler/codegen/gen_loadstore.cc
+++ b/src/compiler/codegen/gen_loadstore.cc
@@ -24,29 +24,29 @@
/*
* Load an immediate value into a fixed or temp register. Target
- * register is clobbered, and marked inUse.
+ * register is clobbered, and marked in_use.
*/
-LIR* LoadConstant(CompilationUnit* cUnit, int rDest, int value)
+LIR* LoadConstant(CompilationUnit* cu, int r_dest, int value)
{
- if (IsTemp(cUnit, rDest)) {
- Clobber(cUnit, rDest);
- MarkInUse(cUnit, rDest);
+ if (IsTemp(cu, r_dest)) {
+ Clobber(cu, r_dest);
+ MarkInUse(cu, r_dest);
}
- return LoadConstantNoClobber(cUnit, rDest, value);
+ return LoadConstantNoClobber(cu, r_dest, value);
}
/* Load a word at base + displacement. Displacement must be word multiple */
-LIR* LoadWordDisp(CompilationUnit* cUnit, int rBase, int displacement,
- int rDest)
+LIR* LoadWordDisp(CompilationUnit* cu, int rBase, int displacement,
+ int r_dest)
{
- return LoadBaseDisp(cUnit, rBase, displacement, rDest, kWord,
+ return LoadBaseDisp(cu, rBase, displacement, r_dest, kWord,
INVALID_SREG);
}
-LIR* StoreWordDisp(CompilationUnit* cUnit, int rBase, int displacement,
- int rSrc)
+LIR* StoreWordDisp(CompilationUnit* cu, int rBase, int displacement,
+ int r_src)
{
- return StoreBaseDisp(cUnit, rBase, displacement, rSrc, kWord);
+ return StoreBaseDisp(cu, rBase, displacement, r_src, kWord);
}
/*
@@ -54,15 +54,15 @@
* using this routine, as it doesn't perform any bookkeeping regarding
* register liveness. That is the responsibility of the caller.
*/
-void LoadValueDirect(CompilationUnit* cUnit, RegLocation rlSrc, int rDest)
+void LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest)
{
- rlSrc = UpdateLoc(cUnit, rlSrc);
- if (rlSrc.location == kLocPhysReg) {
- OpRegCopy(cUnit, rDest, rlSrc.lowReg);
+ rl_src = UpdateLoc(cu, rl_src);
+ if (rl_src.location == kLocPhysReg) {
+ OpRegCopy(cu, r_dest, rl_src.low_reg);
} else {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- LoadWordDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, rlSrc.sRegLow), rDest);
+ DCHECK((rl_src.location == kLocDalvikFrame) ||
+ (rl_src.location == kLocCompilerTemp));
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low), r_dest);
}
}
@@ -71,11 +71,11 @@
* register. Should be used when loading to a fixed register (for example,
* loading arguments to an out of line call.
*/
-void LoadValueDirectFixed(CompilationUnit* cUnit, RegLocation rlSrc, int rDest)
+void LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest)
{
- Clobber(cUnit, rDest);
- MarkInUse(cUnit, rDest);
- LoadValueDirect(cUnit, rlSrc, rDest);
+ Clobber(cu, r_dest);
+ MarkInUse(cu, r_dest);
+ LoadValueDirect(cu, rl_src, r_dest);
}
/*
@@ -83,17 +83,17 @@
* using this routine, as it doesn't perform any bookkeeping regarding
* register liveness. That is the responsibility of the caller.
*/
-void LoadValueDirectWide(CompilationUnit* cUnit, RegLocation rlSrc, int regLo,
- int regHi)
+void LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
+ int reg_hi)
{
- rlSrc = UpdateLocWide(cUnit, rlSrc);
- if (rlSrc.location == kLocPhysReg) {
- OpRegCopyWide(cUnit, regLo, regHi, rlSrc.lowReg, rlSrc.highReg);
+ rl_src = UpdateLocWide(cu, rl_src);
+ if (rl_src.location == kLocPhysReg) {
+ OpRegCopyWide(cu, reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
} else {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- LoadBaseDispWide(cUnit, TargetReg(kSp), SRegOffset(cUnit, rlSrc.sRegLow),
- regLo, regHi, INVALID_SREG);
+ DCHECK((rl_src.location == kLocDalvikFrame) ||
+ (rl_src.location == kLocCompilerTemp));
+ LoadBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low),
+ reg_lo, reg_hi, INVALID_SREG);
}
}
@@ -102,31 +102,31 @@
* registers. Should be used when loading to a fixed registers (for example,
* loading arguments to an out of line call.
*/
-void LoadValueDirectWideFixed(CompilationUnit* cUnit, RegLocation rlSrc,
- int regLo, int regHi)
+void LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src,
+ int reg_lo, int reg_hi)
{
- Clobber(cUnit, regLo);
- Clobber(cUnit, regHi);
- MarkInUse(cUnit, regLo);
- MarkInUse(cUnit, regHi);
- LoadValueDirectWide(cUnit, rlSrc, regLo, regHi);
+ Clobber(cu, reg_lo);
+ Clobber(cu, reg_hi);
+ MarkInUse(cu, reg_lo);
+ MarkInUse(cu, reg_hi);
+ LoadValueDirectWide(cu, rl_src, reg_lo, reg_hi);
}
-RegLocation LoadValue(CompilationUnit* cUnit, RegLocation rlSrc,
- RegisterClass opKind)
+RegLocation LoadValue(CompilationUnit* cu, RegLocation rl_src,
+ RegisterClass op_kind)
{
- rlSrc = EvalLoc(cUnit, rlSrc, opKind, false);
- if (rlSrc.location != kLocPhysReg) {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- LoadValueDirect(cUnit, rlSrc, rlSrc.lowReg);
- rlSrc.location = kLocPhysReg;
- MarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
+ rl_src = EvalLoc(cu, rl_src, op_kind, false);
+ if (rl_src.location != kLocPhysReg) {
+ DCHECK((rl_src.location == kLocDalvikFrame) ||
+ (rl_src.location == kLocCompilerTemp));
+ LoadValueDirect(cu, rl_src, rl_src.low_reg);
+ rl_src.location = kLocPhysReg;
+ MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
}
- return rlSrc;
+ return rl_src;
}
-void StoreValue(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+void StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
#ifndef NDEBUG
/*
@@ -134,70 +134,70 @@
* ssa name during the compilation of a single instruction
* without an intervening ClobberSReg().
*/
- DCHECK((cUnit->liveSReg == INVALID_SREG) ||
- (rlDest.sRegLow != cUnit->liveSReg));
- cUnit->liveSReg = rlDest.sRegLow;
+ DCHECK((cu->live_sreg == INVALID_SREG) ||
+ (rl_dest.s_reg_low != cu->live_sreg));
+ cu->live_sreg = rl_dest.s_reg_low;
#endif
- LIR* defStart;
- LIR* defEnd;
- DCHECK(!rlDest.wide);
- DCHECK(!rlSrc.wide);
- rlSrc = UpdateLoc(cUnit, rlSrc);
- rlDest = UpdateLoc(cUnit, rlDest);
- if (rlSrc.location == kLocPhysReg) {
- if (IsLive(cUnit, rlSrc.lowReg) ||
- IsPromoted(cUnit, rlSrc.lowReg) ||
- (rlDest.location == kLocPhysReg)) {
+ LIR* def_start;
+ LIR* def_end;
+ DCHECK(!rl_dest.wide);
+ DCHECK(!rl_src.wide);
+ rl_src = UpdateLoc(cu, rl_src);
+ rl_dest = UpdateLoc(cu, rl_dest);
+ if (rl_src.location == kLocPhysReg) {
+ if (IsLive(cu, rl_src.low_reg) ||
+ IsPromoted(cu, rl_src.low_reg) ||
+ (rl_dest.location == kLocPhysReg)) {
// Src is live/promoted or Dest has assigned reg.
- rlDest = EvalLoc(cUnit, rlDest, kAnyReg, false);
- OpRegCopy(cUnit, rlDest.lowReg, rlSrc.lowReg);
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ OpRegCopy(cu, rl_dest.low_reg, rl_src.low_reg);
} else {
// Just re-assign the registers. Dest gets Src's regs
- rlDest.lowReg = rlSrc.lowReg;
- Clobber(cUnit, rlSrc.lowReg);
+ rl_dest.low_reg = rl_src.low_reg;
+ Clobber(cu, rl_src.low_reg);
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
- rlDest = EvalLoc(cUnit, rlDest, kAnyReg, false);
- LoadValueDirect(cUnit, rlSrc, rlDest.lowReg);
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ LoadValueDirect(cu, rl_src, rl_dest.low_reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
- MarkDirty(cUnit, rlDest);
+ MarkLive(cu, rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkDirty(cu, rl_dest);
- ResetDefLoc(cUnit, rlDest);
- if (IsDirty(cUnit, rlDest.lowReg) &&
- oatLiveOut(cUnit, rlDest.sRegLow)) {
- defStart = cUnit->lastLIRInsn;
- StoreBaseDisp(cUnit, TargetReg(kSp), SRegOffset(cUnit, rlDest.sRegLow),
- rlDest.lowReg, kWord);
- MarkClean(cUnit, rlDest);
- defEnd = cUnit->lastLIRInsn;
- MarkDef(cUnit, rlDest, defStart, defEnd);
+ ResetDefLoc(cu, rl_dest);
+ if (IsDirty(cu, rl_dest.low_reg) &&
+ oat_live_out(cu, rl_dest.s_reg_low)) {
+ def_start = cu->last_lir_insn;
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
+ rl_dest.low_reg, kWord);
+ MarkClean(cu, rl_dest);
+ def_end = cu->last_lir_insn;
+ MarkDef(cu, rl_dest, def_start, def_end);
}
}
-RegLocation LoadValueWide(CompilationUnit* cUnit, RegLocation rlSrc,
- RegisterClass opKind)
+RegLocation LoadValueWide(CompilationUnit* cu, RegLocation rl_src,
+ RegisterClass op_kind)
{
- DCHECK(rlSrc.wide);
- rlSrc = EvalLoc(cUnit, rlSrc, opKind, false);
- if (rlSrc.location != kLocPhysReg) {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- LoadValueDirectWide(cUnit, rlSrc, rlSrc.lowReg, rlSrc.highReg);
- rlSrc.location = kLocPhysReg;
- MarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
- MarkLive(cUnit, rlSrc.highReg,
- oatSRegHi(rlSrc.sRegLow));
+ DCHECK(rl_src.wide);
+ rl_src = EvalLoc(cu, rl_src, op_kind, false);
+ if (rl_src.location != kLocPhysReg) {
+ DCHECK((rl_src.location == kLocDalvikFrame) ||
+ (rl_src.location == kLocCompilerTemp));
+ LoadValueDirectWide(cu, rl_src, rl_src.low_reg, rl_src.high_reg);
+ rl_src.location = kLocPhysReg;
+ MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
+ MarkLive(cu, rl_src.high_reg,
+ GetSRegHi(rl_src.s_reg_low));
}
- return rlSrc;
+ return rl_src;
}
-void StoreValueWide(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
+void StoreValueWide(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src)
{
#ifndef NDEBUG
/*
@@ -205,70 +205,70 @@
* ssa name during the compilation of a single instruction
* without an intervening ClobberSReg().
*/
- DCHECK((cUnit->liveSReg == INVALID_SREG) ||
- (rlDest.sRegLow != cUnit->liveSReg));
- cUnit->liveSReg = rlDest.sRegLow;
+ DCHECK((cu->live_sreg == INVALID_SREG) ||
+ (rl_dest.s_reg_low != cu->live_sreg));
+ cu->live_sreg = rl_dest.s_reg_low;
#endif
- LIR* defStart;
- LIR* defEnd;
- DCHECK_EQ(FpReg(rlSrc.lowReg), FpReg(rlSrc.highReg));
- DCHECK(rlDest.wide);
- DCHECK(rlSrc.wide);
- if (rlSrc.location == kLocPhysReg) {
- if (IsLive(cUnit, rlSrc.lowReg) ||
- IsLive(cUnit, rlSrc.highReg) ||
- IsPromoted(cUnit, rlSrc.lowReg) ||
- IsPromoted(cUnit, rlSrc.highReg) ||
- (rlDest.location == kLocPhysReg)) {
+ LIR* def_start;
+ LIR* def_end;
+ DCHECK_EQ(FpReg(rl_src.low_reg), FpReg(rl_src.high_reg));
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_src.wide);
+ if (rl_src.location == kLocPhysReg) {
+ if (IsLive(cu, rl_src.low_reg) ||
+ IsLive(cu, rl_src.high_reg) ||
+ IsPromoted(cu, rl_src.low_reg) ||
+ IsPromoted(cu, rl_src.high_reg) ||
+ (rl_dest.location == kLocPhysReg)) {
// Src is live or promoted or Dest has assigned reg.
- rlDest = EvalLoc(cUnit, rlDest, kAnyReg, false);
- OpRegCopyWide(cUnit, rlDest.lowReg, rlDest.highReg,
- rlSrc.lowReg, rlSrc.highReg);
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ OpRegCopyWide(cu, rl_dest.low_reg, rl_dest.high_reg,
+ rl_src.low_reg, rl_src.high_reg);
} else {
// Just re-assign the registers. Dest gets Src's regs
- rlDest.lowReg = rlSrc.lowReg;
- rlDest.highReg = rlSrc.highReg;
- Clobber(cUnit, rlSrc.lowReg);
- Clobber(cUnit, rlSrc.highReg);
+ rl_dest.low_reg = rl_src.low_reg;
+ rl_dest.high_reg = rl_src.high_reg;
+ Clobber(cu, rl_src.low_reg);
+ Clobber(cu, rl_src.high_reg);
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
- rlDest = EvalLoc(cUnit, rlDest, kAnyReg, false);
- LoadValueDirectWide(cUnit, rlSrc, rlDest.lowReg, rlDest.highReg);
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ LoadValueDirectWide(cu, rl_src, rl_dest.low_reg, rl_dest.high_reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
- MarkLive(cUnit, rlDest.highReg, oatSRegHi(rlDest.sRegLow));
- MarkDirty(cUnit, rlDest);
- MarkPair(cUnit, rlDest.lowReg, rlDest.highReg);
+ MarkLive(cu, rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(cu, rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+ MarkDirty(cu, rl_dest);
+ MarkPair(cu, rl_dest.low_reg, rl_dest.high_reg);
- ResetDefLocWide(cUnit, rlDest);
- if ((IsDirty(cUnit, rlDest.lowReg) ||
- IsDirty(cUnit, rlDest.highReg)) &&
- (oatLiveOut(cUnit, rlDest.sRegLow) ||
- oatLiveOut(cUnit, oatSRegHi(rlDest.sRegLow)))) {
- defStart = cUnit->lastLIRInsn;
- DCHECK_EQ((SRegToVReg(cUnit, rlDest.sRegLow)+1),
- SRegToVReg(cUnit, oatSRegHi(rlDest.sRegLow)));
- StoreBaseDispWide(cUnit, TargetReg(kSp), SRegOffset(cUnit, rlDest.sRegLow),
- rlDest.lowReg, rlDest.highReg);
- MarkClean(cUnit, rlDest);
- defEnd = cUnit->lastLIRInsn;
- MarkDefWide(cUnit, rlDest, defStart, defEnd);
+ ResetDefLocWide(cu, rl_dest);
+ if ((IsDirty(cu, rl_dest.low_reg) ||
+ IsDirty(cu, rl_dest.high_reg)) &&
+ (oat_live_out(cu, rl_dest.s_reg_low) ||
+ oat_live_out(cu, GetSRegHi(rl_dest.s_reg_low)))) {
+ def_start = cu->last_lir_insn;
+ DCHECK_EQ((SRegToVReg(cu, rl_dest.s_reg_low)+1),
+ SRegToVReg(cu, GetSRegHi(rl_dest.s_reg_low)));
+ StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
+ rl_dest.low_reg, rl_dest.high_reg);
+ MarkClean(cu, rl_dest);
+ def_end = cu->last_lir_insn;
+ MarkDefWide(cu, rl_dest, def_start, def_end);
}
}
/* Utilities to load the current Method* */
-void LoadCurrMethodDirect(CompilationUnit *cUnit, int rTgt)
+void LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt)
{
- LoadValueDirectFixed(cUnit, cUnit->methodLoc, rTgt);
+ LoadValueDirectFixed(cu, cu->method_loc, r_tgt);
}
-RegLocation LoadCurrMethod(CompilationUnit *cUnit)
+RegLocation LoadCurrMethod(CompilationUnit *cu)
{
- return LoadValue(cUnit, cUnit->methodLoc, kCoreReg);
+ return LoadValue(cu, cu->method_loc, kCoreReg);
}
} // namespace art
diff --git a/src/compiler/codegen/gen_loadstore.h b/src/compiler/codegen/gen_loadstore.h
index 12c8011..19c43ba 100644
--- a/src/compiler/codegen/gen_loadstore.h
+++ b/src/compiler/codegen/gen_loadstore.h
@@ -17,19 +17,19 @@
#ifndef ART_SRC_COMPILER_CODEGEN_GENLOADSTORE_H_
#define ART_SRC_COMPILER_CODEGEN_GENLOADSTORE_H_
-LIR* LoadConstant(CompilationUnit* cUnit, int rDest, int value);
-LIR* LoadWordDisp(CompilationUnit* cUnit, int rBase, int displacement, int rDest);
-LIR* StoreWordDisp(CompilationUnit* cUnit, int rBase, int displacement, int rSrc);
-void LoadValueDirect(CompilationUnit* cUnit, RegLocation rlSrc, int rDest);
-void LoadValueDirectFixed(CompilationUnit* cUnit, RegLocation rlSrc, int rDest);
-void LoadValueDirectWide(CompilationUnit* cUnit, RegLocation rlSrc, int regLo, int regHi);
-void LoadValueDirectWideFixed(CompilationUnit* cUnit, RegLocation rlSrc, int regLo, int regHi);
-RegLocation LoadValue(CompilationUnit* cUnit, RegLocation rlSrc, RegisterClass opKind);
-void StoreValue(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
-RegLocation LoadValueWide(CompilationUnit* cUnit, RegLocation rlSrc, RegisterClass opKind);
-void StoreValueWide(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
-void LoadCurrMethodDirect(CompilationUnit *cUnit, int rTgt);
-RegLocation LoadCurrMethod(CompilationUnit *cUnit);
-bool MethodStarInReg(CompilationUnit* cUnit);
+LIR* LoadConstant(CompilationUnit* cu, int r_dest, int value);
+LIR* LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest);
+LIR* StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src);
+void LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest);
+void LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest);
+void LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
+void LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
+RegLocation LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
+void StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+RegLocation LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
+void StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+void LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt);
+RegLocation LoadCurrMethod(CompilationUnit *cu);
+bool MethodStarInReg(CompilationUnit* cu);
#endif // ART_SRC_COMPILER_CODEGEN_GENLOADSTORE_H_
diff --git a/src/compiler/codegen/local_optimizations.cc b/src/compiler/codegen/local_optimizations.cc
index 1e6e0d8..d1a7444 100644
--- a/src/compiler/codegen/local_optimizations.cc
+++ b/src/compiler/codegen/local_optimizations.cc
@@ -21,8 +21,8 @@
#define DEBUG_OPT(X)
/* Check RAW, WAR, and WAR dependency on the register operands */
-#define CHECK_REG_DEP(use, def, check) ((def & check->useMask) || \
- ((use | def) & check->defMask))
+#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \
+ ((use | def) & check->def_mask))
/* Scheduler heuristics */
#define MAX_HOIST_DISTANCE 20
@@ -31,27 +31,27 @@
static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2)
{
- int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->aliasInfo);
- int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->aliasInfo);
- int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->aliasInfo);
- int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->aliasInfo);
+ int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info);
+ int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info);
+ int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info);
+ int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info);
return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
}
/* Convert a more expensive instruction (ie load) into a move */
-static void ConvertMemOpIntoMove(CompilationUnit* cUnit, LIR* origLIR, int dest, int src)
+static void ConvertMemOpIntoMove(CompilationUnit* cu, LIR* orig_lir, int dest, int src)
{
/* Insert a move to replace the load */
- LIR* moveLIR;
- moveLIR = OpRegCopyNoInsert( cUnit, dest, src);
+ LIR* move_lir;
+ move_lir = OpRegCopyNoInsert( cu, dest, src);
/*
* Insert the converted instruction after the original since the
* optimization is scannng in the top-down order and the new instruction
* will need to be re-checked (eg the new dest clobbers the src used in
- * thisLIR).
+ * this_lir).
*/
- InsertLIRAfter(origLIR, moveLIR);
+ InsertLIRAfter(orig_lir, move_lir);
}
/*
@@ -72,147 +72,147 @@
* 1) They are must-aliases
* 2) The memory location is not written to in between
*/
-static void ApplyLoadStoreElimination(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR)
+static void ApplyLoadStoreElimination(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
{
- LIR* thisLIR;
+ LIR* this_lir;
- if (headLIR == tailLIR) return;
+ if (head_lir == tail_lir) return;
- for (thisLIR = PREV_LIR(tailLIR);
- thisLIR != headLIR;
- thisLIR = PREV_LIR(thisLIR)) {
- int sinkDistance = 0;
+ for (this_lir = PREV_LIR(tail_lir);
+ this_lir != head_lir;
+ this_lir = PREV_LIR(this_lir)) {
+ int sink_distance = 0;
/* Skip non-interesting instructions */
- if ((thisLIR->flags.isNop == true) ||
- isPseudoOpcode(thisLIR->opcode) ||
- (GetTargetInstFlags(thisLIR->opcode) & IS_BRANCH) ||
- !(GetTargetInstFlags(thisLIR->opcode) & (IS_LOAD | IS_STORE))) {
+ if ((this_lir->flags.is_nop == true) ||
+ is_pseudo_opcode(this_lir->opcode) ||
+ (GetTargetInstFlags(this_lir->opcode) & IS_BRANCH) ||
+ !(GetTargetInstFlags(this_lir->opcode) & (IS_LOAD | IS_STORE))) {
continue;
}
- int nativeRegId;
- if (cUnit->instructionSet == kX86) {
+ int native_reg_id;
+ if (cu->instruction_set == kX86) {
// If x86, location differs depending on whether memory/reg operation.
- nativeRegId = (GetTargetInstFlags(thisLIR->opcode) & IS_STORE) ? thisLIR->operands[2]
- : thisLIR->operands[0];
+ native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
+ : this_lir->operands[0];
} else {
- nativeRegId = thisLIR->operands[0];
+ native_reg_id = this_lir->operands[0];
}
- bool isThisLIRLoad = GetTargetInstFlags(thisLIR->opcode) & IS_LOAD;
- LIR* checkLIR;
+ bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+ LIR* check_lir;
/* Use the mem mask to determine the rough memory location */
- uint64_t thisMemMask = (thisLIR->useMask | thisLIR->defMask) & ENCODE_MEM;
+ uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
/*
* Currently only eliminate redundant ld/st for constant and Dalvik
* register accesses.
*/
- if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
+ if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
- uint64_t stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
- uint64_t stopUseRegMask;
- if (cUnit->instructionSet == kX86) {
- stopUseRegMask = (IS_BRANCH | thisLIR->useMask) & ~ENCODE_MEM;
+ uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+ uint64_t stop_use_reg_mask;
+ if (cu->instruction_set == kX86) {
+ stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM;
} else {
/*
* Add pc to the resource mask to prevent this instruction
* from sinking past branch instructions. Also take out the memory
- * region bits since stopMask is used to check data/control
+ * region bits since stop_mask is used to check data/control
* dependencies.
*/
- stopUseRegMask = (GetPCUseDefEncoding() | thisLIR->useMask) & ~ENCODE_MEM;
+ stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
}
- for (checkLIR = NEXT_LIR(thisLIR);
- checkLIR != tailLIR;
- checkLIR = NEXT_LIR(checkLIR)) {
+ for (check_lir = NEXT_LIR(this_lir);
+ check_lir != tail_lir;
+ check_lir = NEXT_LIR(check_lir)) {
/*
* Skip already dead instructions (whose dataflow information is
* outdated and misleading).
*/
- if (checkLIR->flags.isNop) continue;
+ if (check_lir->flags.is_nop) continue;
- uint64_t checkMemMask = (checkLIR->useMask | checkLIR->defMask) & ENCODE_MEM;
- uint64_t aliasCondition = thisMemMask & checkMemMask;
- bool stopHere = false;
+ uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM;
+ uint64_t alias_condition = this_mem_mask & check_mem_mask;
+ bool stop_here = false;
/*
* Potential aliases seen - check the alias relations
*/
- if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
- bool isCheckLIRLoad = GetTargetInstFlags(checkLIR->opcode) & IS_LOAD;
- if (aliasCondition == ENCODE_LITERAL) {
+ if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
+ bool is_check_lir_load = GetTargetInstFlags(check_lir->opcode) & IS_LOAD;
+ if (alias_condition == ENCODE_LITERAL) {
/*
* Should only see literal loads in the instruction
* stream.
*/
- DCHECK(!(GetTargetInstFlags(checkLIR->opcode) & IS_STORE));
+ DCHECK(!(GetTargetInstFlags(check_lir->opcode) & IS_STORE));
/* Same value && same register type */
- if (checkLIR->aliasInfo == thisLIR->aliasInfo &&
- SameRegType(checkLIR->operands[0], nativeRegId)) {
+ if (check_lir->alias_info == this_lir->alias_info &&
+ SameRegType(check_lir->operands[0], native_reg_id)) {
/*
* Different destination register - insert
* a move
*/
- if (checkLIR->operands[0] != nativeRegId) {
- ConvertMemOpIntoMove(cUnit, checkLIR, checkLIR->operands[0],
- nativeRegId);
+ if (check_lir->operands[0] != native_reg_id) {
+ ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0],
+ native_reg_id);
}
- checkLIR->flags.isNop = true;
+ check_lir->flags.is_nop = true;
}
- } else if (aliasCondition == ENCODE_DALVIK_REG) {
+ } else if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias */
- if (checkLIR->aliasInfo == thisLIR->aliasInfo) {
+ if (check_lir->alias_info == this_lir->alias_info) {
/* Only optimize compatible registers */
- bool regCompatible = SameRegType(checkLIR->operands[0], nativeRegId);
- if ((isThisLIRLoad && isCheckLIRLoad) ||
- (!isThisLIRLoad && isCheckLIRLoad)) {
+ bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
+ if ((is_this_lir_load && is_check_lir_load) ||
+ (!is_this_lir_load && is_check_lir_load)) {
/* RAR or RAW */
- if (regCompatible) {
+ if (reg_compatible) {
/*
* Different destination register -
* insert a move
*/
- if (checkLIR->operands[0] !=
- nativeRegId) {
- ConvertMemOpIntoMove(cUnit, checkLIR, checkLIR->operands[0],
- nativeRegId);
+ if (check_lir->operands[0] !=
+ native_reg_id) {
+ ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0],
+ native_reg_id);
}
- checkLIR->flags.isNop = true;
+ check_lir->flags.is_nop = true;
} else {
/*
* Destinaions are of different types -
* something complicated going on so
* stop looking now.
*/
- stopHere = true;
+ stop_here = true;
}
- } else if (isThisLIRLoad && !isCheckLIRLoad) {
+ } else if (is_this_lir_load && !is_check_lir_load) {
/* WAR - register value is killed */
- stopHere = true;
- } else if (!isThisLIRLoad && !isCheckLIRLoad) {
+ stop_here = true;
+ } else if (!is_this_lir_load && !is_check_lir_load) {
/* WAW - nuke the earlier store */
- thisLIR->flags.isNop = true;
- stopHere = true;
+ this_lir->flags.is_nop = true;
+ stop_here = true;
}
/* Partial overlap */
- } else if (IsDalvikRegisterClobbered(thisLIR, checkLIR)) {
+ } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
/*
- * It is actually ok to continue if checkLIR
+ * It is actually ok to continue if check_lir
* is a read. But it is hard to make a test
* case for this so we just stop here to be
* conservative.
*/
- stopHere = true;
+ stop_here = true;
}
}
/* Memory content may be updated. Stop looking now. */
- if (stopHere) {
+ if (stop_here) {
break;
- /* The checkLIR has been transformed - check the next one */
- } else if (checkLIR->flags.isNop) {
+ /* The check_lir has been transformed - check the next one */
+ } else if (check_lir->flags.is_nop) {
continue;
}
}
@@ -223,36 +223,36 @@
* their register operands have any RAW, WAR, and WAW
* dependencies. If so, stop looking.
*/
- if (stopHere == false) {
- stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask, checkLIR);
+ if (stop_here == false) {
+ stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
}
- if (stopHere == true) {
- if (cUnit->instructionSet == kX86) {
+ if (stop_here == true) {
+ if (cu->instruction_set == kX86) {
// Prevent stores from being sunk between ops that generate ccodes and
// ops that use them.
- uint64_t flags = GetTargetInstFlags(checkLIR->opcode);
- if (sinkDistance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
- checkLIR = PREV_LIR(checkLIR);
- sinkDistance--;
+ uint64_t flags = GetTargetInstFlags(check_lir->opcode);
+ if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
+ check_lir = PREV_LIR(check_lir);
+ sink_distance--;
}
}
- DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR, "REG CLOBBERED"));
+ DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
/* Only sink store instructions */
- if (sinkDistance && !isThisLIRLoad) {
- LIR* newStoreLIR = static_cast<LIR*>(NewMem(cUnit, sizeof(LIR), true, kAllocLIR));
- *newStoreLIR = *thisLIR;
+ if (sink_distance && !is_this_lir_load) {
+ LIR* new_store_lir = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ *new_store_lir = *this_lir;
/*
- * Stop point found - insert *before* the checkLIR
+ * Stop point found - insert *before* the check_lir
* since the instruction list is scanned in the
* top-down order.
*/
- InsertLIRBefore(checkLIR, newStoreLIR);
- thisLIR->flags.isNop = true;
+ InsertLIRBefore(check_lir, new_store_lir);
+ this_lir->flags.is_nop = true;
}
break;
- } else if (!checkLIR->flags.isNop) {
- sinkDistance++;
+ } else if (!check_lir->flags.is_nop) {
+ sink_distance++;
}
}
}
@@ -262,144 +262,144 @@
* Perform a pass of bottom-up walk, from the second instruction in the
* superblock, to try to hoist loads to earlier slots.
*/
-void ApplyLoadHoisting(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR)
+void ApplyLoadHoisting(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
{
- LIR* thisLIR, *checkLIR;
+ LIR* this_lir, *check_lir;
/*
* Store the list of independent instructions that can be hoisted past.
* Will decide the best place to insert later.
*/
- LIR* prevInstList[MAX_HOIST_DISTANCE];
+ LIR* prev_inst_list[MAX_HOIST_DISTANCE];
/* Empty block */
- if (headLIR == tailLIR) return;
+ if (head_lir == tail_lir) return;
/* Start from the second instruction */
- for (thisLIR = NEXT_LIR(headLIR);
- thisLIR != tailLIR;
- thisLIR = NEXT_LIR(thisLIR)) {
+ for (this_lir = NEXT_LIR(head_lir);
+ this_lir != tail_lir;
+ this_lir = NEXT_LIR(this_lir)) {
/* Skip non-interesting instructions */
- if ((thisLIR->flags.isNop == true) ||
- isPseudoOpcode(thisLIR->opcode) ||
- !(GetTargetInstFlags(thisLIR->opcode) & IS_LOAD)) {
+ if ((this_lir->flags.is_nop == true) ||
+ is_pseudo_opcode(this_lir->opcode) ||
+ !(GetTargetInstFlags(this_lir->opcode) & IS_LOAD)) {
continue;
}
- uint64_t stopUseAllMask = thisLIR->useMask;
+ uint64_t stop_use_all_mask = this_lir->use_mask;
- if (cUnit->instructionSet != kX86) {
+ if (cu->instruction_set != kX86) {
/*
* Branches for null/range checks are marked with the true resource
* bits, and loads to Dalvik registers, constant pools, and non-alias
* locations are safe to be hoisted. So only mark the heap references
* conservatively here.
*/
- if (stopUseAllMask & ENCODE_HEAP_REF) {
- stopUseAllMask |= GetPCUseDefEncoding();
+ if (stop_use_all_mask & ENCODE_HEAP_REF) {
+ stop_use_all_mask |= GetPCUseDefEncoding();
}
}
/* Similar as above, but just check for pure register dependency */
- uint64_t stopUseRegMask = stopUseAllMask & ~ENCODE_MEM;
- uint64_t stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
+ uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM;
+ uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
- int nextSlot = 0;
- bool stopHere = false;
+ int next_slot = 0;
+ bool stop_here = false;
/* Try to hoist the load to a good spot */
- for (checkLIR = PREV_LIR(thisLIR);
- checkLIR != headLIR;
- checkLIR = PREV_LIR(checkLIR)) {
+ for (check_lir = PREV_LIR(this_lir);
+ check_lir != head_lir;
+ check_lir = PREV_LIR(check_lir)) {
/*
* Skip already dead instructions (whose dataflow information is
* outdated and misleading).
*/
- if (checkLIR->flags.isNop) continue;
+ if (check_lir->flags.is_nop) continue;
- uint64_t checkMemMask = checkLIR->defMask & ENCODE_MEM;
- uint64_t aliasCondition = stopUseAllMask & checkMemMask;
- stopHere = false;
+ uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM;
+ uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
+ stop_here = false;
/* Potential WAR alias seen - check the exact relation */
- if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
+ if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
/* We can fully disambiguate Dalvik references */
- if (aliasCondition == ENCODE_DALVIK_REG) {
+ if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias or partually overlap */
- if ((checkLIR->aliasInfo == thisLIR->aliasInfo) ||
- IsDalvikRegisterClobbered(thisLIR, checkLIR)) {
- stopHere = true;
+ if ((check_lir->alias_info == this_lir->alias_info) ||
+ IsDalvikRegisterClobbered(this_lir, check_lir)) {
+ stop_here = true;
}
/* Conservatively treat all heap refs as may-alias */
} else {
- DCHECK_EQ(aliasCondition, ENCODE_HEAP_REF);
- stopHere = true;
+ DCHECK_EQ(alias_condition, ENCODE_HEAP_REF);
+ stop_here = true;
}
/* Memory content may be updated. Stop looking now. */
- if (stopHere) {
- prevInstList[nextSlot++] = checkLIR;
+ if (stop_here) {
+ prev_inst_list[next_slot++] = check_lir;
break;
}
}
- if (stopHere == false) {
- stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
- checkLIR);
+ if (stop_here == false) {
+ stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask,
+ check_lir);
}
/*
* Store the dependent or non-pseudo/indepedent instruction to the
* list.
*/
- if (stopHere || !isPseudoOpcode(checkLIR->opcode)) {
- prevInstList[nextSlot++] = checkLIR;
- if (nextSlot == MAX_HOIST_DISTANCE) break;
+ if (stop_here || !is_pseudo_opcode(check_lir->opcode)) {
+ prev_inst_list[next_slot++] = check_lir;
+ if (next_slot == MAX_HOIST_DISTANCE) break;
}
/* Found a new place to put the load - move it here */
- if (stopHere == true) {
- DEBUG_OPT(dumpDependentInsnPair(checkLIR, thisLIR "HOIST STOP"));
+ if (stop_here == true) {
+ DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
break;
}
}
/*
- * Reached the top - use headLIR as the dependent marker as all labels
+ * Reached the top - use head_lir as the dependent marker as all labels
* are barriers.
*/
- if (stopHere == false && nextSlot < MAX_HOIST_DISTANCE) {
- prevInstList[nextSlot++] = headLIR;
+ if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) {
+ prev_inst_list[next_slot++] = head_lir;
}
/*
* At least one independent instruction is found. Scan in the reversed
* direction to find a beneficial slot.
*/
- if (nextSlot >= 2) {
- int firstSlot = nextSlot - 2;
+ if (next_slot >= 2) {
+ int first_slot = next_slot - 2;
int slot;
- LIR* depLIR = prevInstList[nextSlot-1];
+ LIR* dep_lir = prev_inst_list[next_slot-1];
/* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
- if (!isPseudoOpcode(depLIR->opcode) &&
- (GetTargetInstFlags(depLIR->opcode) & IS_LOAD)) {
- firstSlot -= LDLD_DISTANCE;
+ if (!is_pseudo_opcode(dep_lir->opcode) &&
+ (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
+ first_slot -= LDLD_DISTANCE;
}
/*
- * Make sure we check slot >= 0 since firstSlot may be negative
+ * Make sure we check slot >= 0 since first_slot may be negative
* when the loop is first entered.
*/
- for (slot = firstSlot; slot >= 0; slot--) {
- LIR* curLIR = prevInstList[slot];
- LIR* prevLIR = prevInstList[slot+1];
+ for (slot = first_slot; slot >= 0; slot--) {
+ LIR* cur_lir = prev_inst_list[slot];
+ LIR* prev_lir = prev_inst_list[slot+1];
/* Check the highest instruction */
- if (prevLIR->defMask == ENCODE_ALL) {
+ if (prev_lir->def_mask == ENCODE_ALL) {
/*
* If the first instruction is a load, don't hoist anything
* above it since it is unlikely to be beneficial.
*/
- if (GetTargetInstFlags(curLIR->opcode) & IS_LOAD) continue;
+ if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
/*
* If the remaining number of slots is less than LD_LATENCY,
* insert the hoisted load here.
@@ -408,9 +408,9 @@
}
// Don't look across a barrier label
- if ((prevLIR->opcode == kPseudoTargetLabel) ||
- (prevLIR->opcode == kPseudoSafepointPC) ||
- (prevLIR->opcode == kPseudoBarrier)) {
+ if ((prev_lir->opcode == kPseudoTargetLabel) ||
+ (prev_lir->opcode == kPseudoSafepointPC) ||
+ (prev_lir->opcode == kPseudoBarrier)) {
break;
}
@@ -418,37 +418,37 @@
* Try to find two instructions with load/use dependency until
* the remaining instructions are less than LD_LATENCY.
*/
- bool prevIsLoad = isPseudoOpcode(prevLIR->opcode) ? false :
- (GetTargetInstFlags(prevLIR->opcode) & IS_LOAD);
- if (((curLIR->useMask & prevLIR->defMask) && prevIsLoad) || (slot < LD_LATENCY)) {
+ bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
+ (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
+ if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
break;
}
}
/* Found a slot to hoist to */
if (slot >= 0) {
- LIR* curLIR = prevInstList[slot];
- LIR* newLoadLIR = static_cast<LIR*>(NewMem(cUnit, sizeof(LIR), true, kAllocLIR));
- *newLoadLIR = *thisLIR;
+ LIR* cur_lir = prev_inst_list[slot];
+ LIR* new_load_lir = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ *new_load_lir = *this_lir;
/*
- * Insertion is guaranteed to succeed since checkLIR
+ * Insertion is guaranteed to succeed since check_lir
* is never the first LIR on the list
*/
- InsertLIRBefore(curLIR, newLoadLIR);
- thisLIR->flags.isNop = true;
+ InsertLIRBefore(cur_lir, new_load_lir);
+ this_lir->flags.is_nop = true;
}
}
}
}
-void ApplyLocalOptimizations(CompilationUnit* cUnit, LIR* headLIR,
- LIR* tailLIR)
+void ApplyLocalOptimizations(CompilationUnit* cu, LIR* head_lir,
+ LIR* tail_lir)
{
- if (!(cUnit->disableOpt & (1 << kLoadStoreElimination))) {
- ApplyLoadStoreElimination(cUnit, headLIR, tailLIR);
+ if (!(cu->disable_opt & (1 << kLoadStoreElimination))) {
+ ApplyLoadStoreElimination(cu, head_lir, tail_lir);
}
- if (!(cUnit->disableOpt & (1 << kLoadHoisting))) {
- ApplyLoadHoisting(cUnit, headLIR, tailLIR);
+ if (!(cu->disable_opt & (1 << kLoadHoisting))) {
+ ApplyLoadHoisting(cu, head_lir, tail_lir);
}
}
@@ -457,34 +457,34 @@
* Note: new redundant branches may be inserted later, and we'll
* use a check in final instruction assembly to nop those out.
*/
-void RemoveRedundantBranches(CompilationUnit* cUnit)
+void RemoveRedundantBranches(CompilationUnit* cu)
{
- LIR* thisLIR;
+ LIR* this_lir;
- for (thisLIR = cUnit->firstLIRInsn; thisLIR != cUnit->lastLIRInsn; thisLIR = NEXT_LIR(thisLIR)) {
+ for (this_lir = cu->first_lir_insn; this_lir != cu->last_lir_insn; this_lir = NEXT_LIR(this_lir)) {
/* Branch to the next instruction */
- if (BranchUnconditional(thisLIR)) {
- LIR* nextLIR = thisLIR;
+ if (BranchUnconditional(this_lir)) {
+ LIR* next_lir = this_lir;
while (true) {
- nextLIR = NEXT_LIR(nextLIR);
+ next_lir = NEXT_LIR(next_lir);
/*
* Is the branch target the next instruction?
*/
- if (nextLIR == thisLIR->target) {
- thisLIR->flags.isNop = true;
+ if (next_lir == this_lir->target) {
+ this_lir->flags.is_nop = true;
break;
}
/*
* Found real useful stuff between the branch and the target.
- * Need to explicitly check the lastLIRInsn here because it
+ * Need to explicitly check the last_lir_insn here because it
* might be the last real instruction.
*/
- if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR == cUnit->lastLIRInsn))
+ if (!is_pseudo_opcode(next_lir->opcode) ||
+ (next_lir == cu->last_lir_insn))
break;
}
}
diff --git a/src/compiler/codegen/local_optimizations.h b/src/compiler/codegen/local_optimizations.h
index e740b13..74bae15 100644
--- a/src/compiler/codegen/local_optimizations.h
+++ b/src/compiler/codegen/local_optimizations.h
@@ -19,8 +19,8 @@
namespace art {
-void ApplyLocalOptimizations(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR);
-void RemoveRedundantBranches(CompilationUnit* cUnit);
+void ApplyLocalOptimizations(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir);
+void RemoveRedundantBranches(CompilationUnit* cu);
} // namespace art
diff --git a/src/compiler/codegen/method_bitcode.cc b/src/compiler/codegen/method_bitcode.cc
index 1c576bc..cedf3b7 100644
--- a/src/compiler/codegen/method_bitcode.cc
+++ b/src/compiler/codegen/method_bitcode.cc
@@ -39,26 +39,26 @@
static const char kCatchBlock = 'C';
namespace art {
-// TODO: unify badLoc
-const RegLocation badLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+// TODO: unify bad_loc
+const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
INVALID_REG, INVALID_REG, INVALID_SREG,
INVALID_SREG};
-static RegLocation GetLoc(CompilationUnit* cUnit, llvm::Value* val);
+static RegLocation GetLoc(CompilationUnit* cu, llvm::Value* val);
-static llvm::BasicBlock* GetLLVMBlock(CompilationUnit* cUnit, int id)
+static llvm::BasicBlock* GetLLVMBlock(CompilationUnit* cu, int id)
{
- return cUnit->idToBlockMap.Get(id);
+ return cu->id_to_block_map.Get(id);
}
-static llvm::Value* GetLLVMValue(CompilationUnit* cUnit, int sReg)
+static llvm::Value* GetLLVMValue(CompilationUnit* cu, int s_reg)
{
- return reinterpret_cast<llvm::Value*>(GrowableListGetElement(&cUnit->llvmValues, sReg));
+ return reinterpret_cast<llvm::Value*>(GrowableListGetElement(&cu->llvm_values, s_reg));
}
// Replace the placeholder value with the real definition
-static void DefineValue(CompilationUnit* cUnit, llvm::Value* val, int sReg)
+static void DefineValue(CompilationUnit* cu, llvm::Value* val, int s_reg)
{
- llvm::Value* placeholder = GetLLVMValue(cUnit, sReg);
+ llvm::Value* placeholder = GetLLVMValue(cu, s_reg);
if (placeholder == NULL) {
// This can happen on instruction rewrite on verification failure
LOG(WARNING) << "Null placeholder";
@@ -66,240 +66,240 @@
}
placeholder->replaceAllUsesWith(val);
val->takeName(placeholder);
- cUnit->llvmValues.elemList[sReg] = reinterpret_cast<uintptr_t>(val);
+ cu->llvm_values.elem_list[s_reg] = reinterpret_cast<uintptr_t>(val);
llvm::Instruction* inst = llvm::dyn_cast<llvm::Instruction>(placeholder);
DCHECK(inst != NULL);
inst->eraseFromParent();
// Set vreg for debugging
- if (!cUnit->compiler->IsDebuggingSupported()) {
+ if (!cu->compiler->IsDebuggingSupported()) {
greenland::IntrinsicHelper::IntrinsicId id =
greenland::IntrinsicHelper::SetVReg;
- llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- int vReg = SRegToVReg(cUnit, sReg);
- llvm::Value* tableSlot = cUnit->irb->getInt32(vReg);
- llvm::Value* args[] = { tableSlot, val };
- cUnit->irb->CreateCall(func, args);
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ int v_reg = SRegToVReg(cu, s_reg);
+ llvm::Value* table_slot = cu->irb->getInt32(v_reg);
+ llvm::Value* args[] = { table_slot, val };
+ cu->irb->CreateCall(func, args);
}
}
-static llvm::Type* LlvmTypeFromLocRec(CompilationUnit* cUnit, RegLocation loc)
+static llvm::Type* LlvmTypeFromLocRec(CompilationUnit* cu, RegLocation loc)
{
llvm::Type* res = NULL;
if (loc.wide) {
if (loc.fp)
- res = cUnit->irb->getDoubleTy();
+ res = cu->irb->getDoubleTy();
else
- res = cUnit->irb->getInt64Ty();
+ res = cu->irb->getInt64Ty();
} else {
if (loc.fp) {
- res = cUnit->irb->getFloatTy();
+ res = cu->irb->getFloatTy();
} else {
if (loc.ref)
- res = cUnit->irb->GetJObjectTy();
+ res = cu->irb->GetJObjectTy();
else
- res = cUnit->irb->getInt32Ty();
+ res = cu->irb->getInt32Ty();
}
}
return res;
}
/* Create an in-memory RegLocation from an llvm Value. */
-static void CreateLocFromValue(CompilationUnit* cUnit, llvm::Value* val)
+static void CreateLocFromValue(CompilationUnit* cu, llvm::Value* val)
{
// NOTE: llvm takes shortcuts with c_str() - get to std::string firstt
std::string s(val->getName().str());
- const char* valName = s.c_str();
- SafeMap<llvm::Value*, RegLocation>::iterator it = cUnit->locMap.find(val);
- DCHECK(it == cUnit->locMap.end()) << " - already defined: " << valName;
- int baseSReg = INVALID_SREG;
+ const char* val_name = s.c_str();
+ SafeMap<llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
+ DCHECK(it == cu->loc_map.end()) << " - already defined: " << val_name;
+ int base_sreg = INVALID_SREG;
int subscript = -1;
- sscanf(valName, "v%d_%d", &baseSReg, &subscript);
- if ((baseSReg == INVALID_SREG) && (!strcmp(valName, "method"))) {
- baseSReg = SSA_METHOD_BASEREG;
+ sscanf(val_name, "v%d_%d", &base_sreg, &subscript);
+ if ((base_sreg == INVALID_SREG) && (!strcmp(val_name, "method"))) {
+ base_sreg = SSA_METHOD_BASEREG;
subscript = 0;
}
- DCHECK_NE(baseSReg, INVALID_SREG);
+ DCHECK_NE(base_sreg, INVALID_SREG);
DCHECK_NE(subscript, -1);
// TODO: redo during C++'ification
RegLocation loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, INVALID_REG,
INVALID_REG, INVALID_SREG, INVALID_SREG};
llvm::Type* ty = val->getType();
- loc.wide = ((ty == cUnit->irb->getInt64Ty()) ||
- (ty == cUnit->irb->getDoubleTy()));
+ loc.wide = ((ty == cu->irb->getInt64Ty()) ||
+ (ty == cu->irb->getDoubleTy()));
loc.defined = true;
loc.home = false; // May change during promotion
- loc.sRegLow = baseSReg;
- loc.origSReg = cUnit->locMap.size();
- PromotionMap pMap = cUnit->promotionMap[baseSReg];
- if (ty == cUnit->irb->getFloatTy()) {
+ loc.s_reg_low = base_sreg;
+ loc.orig_sreg = cu->loc_map.size();
+ PromotionMap p_map = cu->promotion_map[base_sreg];
+ if (ty == cu->irb->getFloatTy()) {
loc.fp = true;
- if (pMap.fpLocation == kLocPhysReg) {
- loc.lowReg = pMap.FpReg;
+ if (p_map.fp_location == kLocPhysReg) {
+ loc.low_reg = p_map.FpReg;
loc.location = kLocPhysReg;
loc.home = true;
}
- } else if (ty == cUnit->irb->getDoubleTy()) {
+ } else if (ty == cu->irb->getDoubleTy()) {
loc.fp = true;
- PromotionMap pMapHigh = cUnit->promotionMap[baseSReg + 1];
- if ((pMap.fpLocation == kLocPhysReg) &&
- (pMapHigh.fpLocation == kLocPhysReg) &&
- ((pMap.FpReg & 0x1) == 0) &&
- (pMap.FpReg + 1 == pMapHigh.FpReg)) {
- loc.lowReg = pMap.FpReg;
- loc.highReg = pMapHigh.FpReg;
+ PromotionMap p_map_high = cu->promotion_map[base_sreg + 1];
+ if ((p_map.fp_location == kLocPhysReg) &&
+ (p_map_high.fp_location == kLocPhysReg) &&
+ ((p_map.FpReg & 0x1) == 0) &&
+ (p_map.FpReg + 1 == p_map_high.FpReg)) {
+ loc.low_reg = p_map.FpReg;
+ loc.high_reg = p_map_high.FpReg;
loc.location = kLocPhysReg;
loc.home = true;
}
- } else if (ty == cUnit->irb->GetJObjectTy()) {
+ } else if (ty == cu->irb->GetJObjectTy()) {
loc.ref = true;
- if (pMap.coreLocation == kLocPhysReg) {
- loc.lowReg = pMap.coreReg;
+ if (p_map.core_location == kLocPhysReg) {
+ loc.low_reg = p_map.core_reg;
loc.location = kLocPhysReg;
loc.home = true;
}
- } else if (ty == cUnit->irb->getInt64Ty()) {
+ } else if (ty == cu->irb->getInt64Ty()) {
loc.core = true;
- PromotionMap pMapHigh = cUnit->promotionMap[baseSReg + 1];
- if ((pMap.coreLocation == kLocPhysReg) &&
- (pMapHigh.coreLocation == kLocPhysReg)) {
- loc.lowReg = pMap.coreReg;
- loc.highReg = pMapHigh.coreReg;
+ PromotionMap p_map_high = cu->promotion_map[base_sreg + 1];
+ if ((p_map.core_location == kLocPhysReg) &&
+ (p_map_high.core_location == kLocPhysReg)) {
+ loc.low_reg = p_map.core_reg;
+ loc.high_reg = p_map_high.core_reg;
loc.location = kLocPhysReg;
loc.home = true;
}
} else {
loc.core = true;
- if (pMap.coreLocation == kLocPhysReg) {
- loc.lowReg = pMap.coreReg;
+ if (p_map.core_location == kLocPhysReg) {
+ loc.low_reg = p_map.core_reg;
loc.location = kLocPhysReg;
loc.home = true;
}
}
- if (cUnit->printMe && loc.home) {
+ if (cu->verbose && loc.home) {
if (loc.wide) {
- LOG(INFO) << "Promoted wide " << s << " to regs " << loc.lowReg << "/" << loc.highReg;
+ LOG(INFO) << "Promoted wide " << s << " to regs " << loc.low_reg << "/" << loc.high_reg;
} else {
- LOG(INFO) << "Promoted " << s << " to reg " << loc.lowReg;
+ LOG(INFO) << "Promoted " << s << " to reg " << loc.low_reg;
}
}
- cUnit->locMap.Put(val, loc);
+ cu->loc_map.Put(val, loc);
}
-static void InitIR(CompilationUnit* cUnit)
+static void InitIR(CompilationUnit* cu)
{
- LLVMInfo* llvmInfo = cUnit->llvm_info;
- if (llvmInfo == NULL) {
- CompilerTls* tls = cUnit->compiler->GetTls();
+ LLVMInfo* llvm_info = cu->llvm_info;
+ if (llvm_info == NULL) {
+ CompilerTls* tls = cu->compiler->GetTls();
CHECK(tls != NULL);
- llvmInfo = static_cast<LLVMInfo*>(tls->GetLLVMInfo());
- if (llvmInfo == NULL) {
- llvmInfo = new LLVMInfo();
- tls->SetLLVMInfo(llvmInfo);
+ llvm_info = static_cast<LLVMInfo*>(tls->GetLLVMInfo());
+ if (llvm_info == NULL) {
+ llvm_info = new LLVMInfo();
+ tls->SetLLVMInfo(llvm_info);
}
}
- cUnit->context = llvmInfo->GetLLVMContext();
- cUnit->module = llvmInfo->GetLLVMModule();
- cUnit->intrinsic_helper = llvmInfo->GetIntrinsicHelper();
- cUnit->irb = llvmInfo->GetIRBuilder();
+ cu->context = llvm_info->GetLLVMContext();
+ cu->module = llvm_info->GetLLVMModule();
+ cu->intrinsic_helper = llvm_info->GetIntrinsicHelper();
+ cu->irb = llvm_info->GetIRBuilder();
}
-static const char* LlvmSSAName(CompilationUnit* cUnit, int ssaReg) {
- return GET_ELEM_N(cUnit->ssaStrings, char*, ssaReg);
+static const char* LlvmSSAName(CompilationUnit* cu, int ssa_reg) {
+ return GET_ELEM_N(cu->ssa_strings, char*, ssa_reg);
}
-llvm::BasicBlock* FindCaseTarget(CompilationUnit* cUnit, uint32_t vaddr)
+llvm::BasicBlock* FindCaseTarget(CompilationUnit* cu, uint32_t vaddr)
{
- BasicBlock* bb = FindBlock(cUnit, vaddr);
+ BasicBlock* bb = FindBlock(cu, vaddr);
DCHECK(bb != NULL);
- return GetLLVMBlock(cUnit, bb->id);
+ return GetLLVMBlock(cu, bb->id);
}
-static void ConvertPackedSwitch(CompilationUnit* cUnit, BasicBlock* bb,
- int32_t tableOffset, RegLocation rlSrc)
+static void ConvertPackedSwitch(CompilationUnit* cu, BasicBlock* bb,
+ int32_t table_offset, RegLocation rl_src)
{
const Instruction::PackedSwitchPayload* payload =
reinterpret_cast<const Instruction::PackedSwitchPayload*>(
- cUnit->insns + cUnit->currentDalvikOffset + tableOffset);
+ cu->insns + cu->current_dalvik_offset + table_offset);
- llvm::Value* value = GetLLVMValue(cUnit, rlSrc.origSReg);
+ llvm::Value* value = GetLLVMValue(cu, rl_src.orig_sreg);
llvm::SwitchInst* sw =
- cUnit->irb->CreateSwitch(value, GetLLVMBlock(cUnit, bb->fallThrough->id),
+ cu->irb->CreateSwitch(value, GetLLVMBlock(cu, bb->fall_through->id),
payload->case_count);
for (uint16_t i = 0; i < payload->case_count; ++i) {
- llvm::BasicBlock* llvmBB =
- FindCaseTarget(cUnit, cUnit->currentDalvikOffset + payload->targets[i]);
- sw->addCase(cUnit->irb->getInt32(payload->first_key + i), llvmBB);
+ llvm::BasicBlock* llvm_bb =
+ FindCaseTarget(cu, cu->current_dalvik_offset + payload->targets[i]);
+ sw->addCase(cu->irb->getInt32(payload->first_key + i), llvm_bb);
}
- llvm::MDNode* switchNode =
- llvm::MDNode::get(*cUnit->context, cUnit->irb->getInt32(tableOffset));
- sw->setMetadata("SwitchTable", switchNode);
+ llvm::MDNode* switch_node =
+ llvm::MDNode::get(*cu->context, cu->irb->getInt32(table_offset));
+ sw->setMetadata("SwitchTable", switch_node);
bb->taken = NULL;
- bb->fallThrough = NULL;
+ bb->fall_through = NULL;
}
-static void ConvertSparseSwitch(CompilationUnit* cUnit, BasicBlock* bb,
- int32_t tableOffset, RegLocation rlSrc)
+static void ConvertSparseSwitch(CompilationUnit* cu, BasicBlock* bb,
+ int32_t table_offset, RegLocation rl_src)
{
const Instruction::SparseSwitchPayload* payload =
reinterpret_cast<const Instruction::SparseSwitchPayload*>(
- cUnit->insns + cUnit->currentDalvikOffset + tableOffset);
+ cu->insns + cu->current_dalvik_offset + table_offset);
const int32_t* keys = payload->GetKeys();
const int32_t* targets = payload->GetTargets();
- llvm::Value* value = GetLLVMValue(cUnit, rlSrc.origSReg);
+ llvm::Value* value = GetLLVMValue(cu, rl_src.orig_sreg);
llvm::SwitchInst* sw =
- cUnit->irb->CreateSwitch(value, GetLLVMBlock(cUnit, bb->fallThrough->id),
+ cu->irb->CreateSwitch(value, GetLLVMBlock(cu, bb->fall_through->id),
payload->case_count);
for (size_t i = 0; i < payload->case_count; ++i) {
- llvm::BasicBlock* llvmBB =
- FindCaseTarget(cUnit, cUnit->currentDalvikOffset + targets[i]);
- sw->addCase(cUnit->irb->getInt32(keys[i]), llvmBB);
+ llvm::BasicBlock* llvm_bb =
+ FindCaseTarget(cu, cu->current_dalvik_offset + targets[i]);
+ sw->addCase(cu->irb->getInt32(keys[i]), llvm_bb);
}
- llvm::MDNode* switchNode =
- llvm::MDNode::get(*cUnit->context, cUnit->irb->getInt32(tableOffset));
- sw->setMetadata("SwitchTable", switchNode);
+ llvm::MDNode* switch_node =
+ llvm::MDNode::get(*cu->context, cu->irb->getInt32(table_offset));
+ sw->setMetadata("SwitchTable", switch_node);
bb->taken = NULL;
- bb->fallThrough = NULL;
+ bb->fall_through = NULL;
}
-static void ConvertSget(CompilationUnit* cUnit, int32_t fieldIndex,
- greenland::IntrinsicHelper::IntrinsicId id, RegLocation rlDest)
+static void ConvertSget(CompilationUnit* cu, int32_t field_index,
+ greenland::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
{
- llvm::Constant* fieldIdx = cUnit->irb->getInt32(fieldIndex);
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* res = cUnit->irb->CreateCall(intr, fieldIdx);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Constant* field_idx = cu->irb->getInt32(field_index);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, field_idx);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertSput(CompilationUnit* cUnit, int32_t fieldIndex,
- greenland::IntrinsicHelper::IntrinsicId id, RegLocation rlSrc)
+static void ConvertSput(CompilationUnit* cu, int32_t field_index,
+ greenland::IntrinsicHelper::IntrinsicId id, RegLocation rl_src)
{
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(cUnit->irb->getInt32(fieldIndex));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- cUnit->irb->CreateCall(intr, args);
+ args.push_back(cu->irb->getInt32(field_index));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
}
-static void ConvertFillArrayData(CompilationUnit* cUnit, int32_t offset, RegLocation rlArray)
+static void ConvertFillArrayData(CompilationUnit* cu, int32_t offset, RegLocation rl_array)
{
greenland::IntrinsicHelper::IntrinsicId id;
id = greenland::IntrinsicHelper::HLFillArrayData;
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(cUnit->irb->getInt32(offset));
- args.push_back(GetLLVMValue(cUnit, rlArray.origSReg));
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- cUnit->irb->CreateCall(intr, args);
+ args.push_back(cu->irb->getInt32(offset));
+ args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
}
-static llvm::Value* EmitConst(CompilationUnit* cUnit, llvm::ArrayRef<llvm::Value*> src,
+static llvm::Value* EmitConst(CompilationUnit* cu, llvm::ArrayRef<llvm::Value*> src,
RegLocation loc)
{
greenland::IntrinsicHelper::IntrinsicId id;
@@ -318,18 +318,18 @@
id = greenland::IntrinsicHelper::ConstInt;
}
}
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- return cUnit->irb->CreateCall(intr, src);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ return cu->irb->CreateCall(intr, src);
}
-static void EmitPopShadowFrame(CompilationUnit* cUnit)
+static void EmitPopShadowFrame(CompilationUnit* cu)
{
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(
greenland::IntrinsicHelper::PopShadowFrame);
- cUnit->irb->CreateCall(intr);
+ cu->irb->CreateCall(intr);
}
-static llvm::Value* EmitCopy(CompilationUnit* cUnit, llvm::ArrayRef<llvm::Value*> src,
+static llvm::Value* EmitCopy(CompilationUnit* cu, llvm::ArrayRef<llvm::Value*> src,
RegLocation loc)
{
greenland::IntrinsicHelper::IntrinsicId id;
@@ -348,214 +348,214 @@
id = greenland::IntrinsicHelper::CopyInt;
}
}
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- return cUnit->irb->CreateCall(intr, src);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ return cu->irb->CreateCall(intr, src);
}
-static void ConvertMoveException(CompilationUnit* cUnit, RegLocation rlDest)
+static void ConvertMoveException(CompilationUnit* cu, RegLocation rl_dest)
{
- llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
greenland::IntrinsicHelper::GetException);
- llvm::Value* res = cUnit->irb->CreateCall(func);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* res = cu->irb->CreateCall(func);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertThrow(CompilationUnit* cUnit, RegLocation rlSrc)
+static void ConvertThrow(CompilationUnit* cu, RegLocation rl_src)
{
- llvm::Value* src = GetLLVMValue(cUnit, rlSrc.origSReg);
- llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
greenland::IntrinsicHelper::HLThrowException);
- cUnit->irb->CreateCall(func, src);
+ cu->irb->CreateCall(func, src);
}
-static void ConvertMonitorEnterExit(CompilationUnit* cUnit, int optFlags,
+static void ConvertMonitorEnterExit(CompilationUnit* cu, int opt_flags,
greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlSrc)
+ RegLocation rl_src)
{
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(cUnit->irb->getInt32(optFlags));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- cUnit->irb->CreateCall(func, args);
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(func, args);
}
-static void ConvertArrayLength(CompilationUnit* cUnit, int optFlags,
- RegLocation rlDest, RegLocation rlSrc)
+static void ConvertArrayLength(CompilationUnit* cu, int opt_flags,
+ RegLocation rl_dest, RegLocation rl_src)
{
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(cUnit->irb->getInt32(optFlags));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
greenland::IntrinsicHelper::OptArrayLength);
- llvm::Value* res = cUnit->irb->CreateCall(func, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* res = cu->irb->CreateCall(func, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void EmitSuspendCheck(CompilationUnit* cUnit)
+static void EmitSuspendCheck(CompilationUnit* cu)
{
greenland::IntrinsicHelper::IntrinsicId id =
greenland::IntrinsicHelper::CheckSuspend;
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- cUnit->irb->CreateCall(intr);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr);
}
-static llvm::Value* ConvertCompare(CompilationUnit* cUnit, ConditionCode cc,
+static llvm::Value* ConvertCompare(CompilationUnit* cu, ConditionCode cc,
llvm::Value* src1, llvm::Value* src2)
{
llvm::Value* res = NULL;
DCHECK_EQ(src1->getType(), src2->getType());
switch(cc) {
- case kCondEq: res = cUnit->irb->CreateICmpEQ(src1, src2); break;
- case kCondNe: res = cUnit->irb->CreateICmpNE(src1, src2); break;
- case kCondLt: res = cUnit->irb->CreateICmpSLT(src1, src2); break;
- case kCondGe: res = cUnit->irb->CreateICmpSGE(src1, src2); break;
- case kCondGt: res = cUnit->irb->CreateICmpSGT(src1, src2); break;
- case kCondLe: res = cUnit->irb->CreateICmpSLE(src1, src2); break;
+ case kCondEq: res = cu->irb->CreateICmpEQ(src1, src2); break;
+ case kCondNe: res = cu->irb->CreateICmpNE(src1, src2); break;
+ case kCondLt: res = cu->irb->CreateICmpSLT(src1, src2); break;
+ case kCondGe: res = cu->irb->CreateICmpSGE(src1, src2); break;
+ case kCondGt: res = cu->irb->CreateICmpSGT(src1, src2); break;
+ case kCondLe: res = cu->irb->CreateICmpSLE(src1, src2); break;
default: LOG(FATAL) << "Unexpected cc value " << cc;
}
return res;
}
-static void ConvertCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- ConditionCode cc, RegLocation rlSrc1, RegLocation rlSrc2)
+static void ConvertCompareAndBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2)
{
- if (bb->taken->startOffset <= mir->offset) {
- EmitSuspendCheck(cUnit);
+ if (bb->taken->start_offset <= mir->offset) {
+ EmitSuspendCheck(cu);
}
- llvm::Value* src1 = GetLLVMValue(cUnit, rlSrc1.origSReg);
- llvm::Value* src2 = GetLLVMValue(cUnit, rlSrc2.origSReg);
- llvm::Value* condValue = ConvertCompare(cUnit, cc, src1, src2);
- condValue->setName(StringPrintf("t%d", cUnit->tempName++));
- cUnit->irb->CreateCondBr(condValue, GetLLVMBlock(cUnit, bb->taken->id),
- GetLLVMBlock(cUnit, bb->fallThrough->id));
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
+ llvm::Value* cond_value = ConvertCompare(cu, cc, src1, src2);
+ cond_value->setName(StringPrintf("t%d", cu->temp_name++));
+ cu->irb->CreateCondBr(cond_value, GetLLVMBlock(cu, bb->taken->id),
+ GetLLVMBlock(cu, bb->fall_through->id));
// Don't redo the fallthrough branch in the BB driver
- bb->fallThrough = NULL;
+ bb->fall_through = NULL;
}
-static void ConvertCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
- MIR* mir, ConditionCode cc, RegLocation rlSrc1)
+static void ConvertCompareZeroAndBranch(CompilationUnit* cu, BasicBlock* bb,
+ MIR* mir, ConditionCode cc, RegLocation rl_src1)
{
- if (bb->taken->startOffset <= mir->offset) {
- EmitSuspendCheck(cUnit);
+ if (bb->taken->start_offset <= mir->offset) {
+ EmitSuspendCheck(cu);
}
- llvm::Value* src1 = GetLLVMValue(cUnit, rlSrc1.origSReg);
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
llvm::Value* src2;
- if (rlSrc1.ref) {
- src2 = cUnit->irb->GetJNull();
+ if (rl_src1.ref) {
+ src2 = cu->irb->GetJNull();
} else {
- src2 = cUnit->irb->getInt32(0);
+ src2 = cu->irb->getInt32(0);
}
- llvm::Value* condValue = ConvertCompare(cUnit, cc, src1, src2);
- cUnit->irb->CreateCondBr(condValue, GetLLVMBlock(cUnit, bb->taken->id),
- GetLLVMBlock(cUnit, bb->fallThrough->id));
+ llvm::Value* cond_value = ConvertCompare(cu, cc, src1, src2);
+ cu->irb->CreateCondBr(cond_value, GetLLVMBlock(cu, bb->taken->id),
+ GetLLVMBlock(cu, bb->fall_through->id));
// Don't redo the fallthrough branch in the BB driver
- bb->fallThrough = NULL;
+ bb->fall_through = NULL;
}
-static llvm::Value* GenDivModOp(CompilationUnit* cUnit, bool isDiv, bool isLong,
+static llvm::Value* GenDivModOp(CompilationUnit* cu, bool is_div, bool is_long,
llvm::Value* src1, llvm::Value* src2)
{
greenland::IntrinsicHelper::IntrinsicId id;
- if (isLong) {
- if (isDiv) {
+ if (is_long) {
+ if (is_div) {
id = greenland::IntrinsicHelper::DivLong;
} else {
id = greenland::IntrinsicHelper::RemLong;
}
} else {
- if (isDiv) {
+ if (is_div) {
id = greenland::IntrinsicHelper::DivInt;
} else {
id = greenland::IntrinsicHelper::RemInt;
}
}
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::SmallVector<llvm::Value*, 2>args;
args.push_back(src1);
args.push_back(src2);
- return cUnit->irb->CreateCall(intr, args);
+ return cu->irb->CreateCall(intr, args);
}
-static llvm::Value* GenArithOp(CompilationUnit* cUnit, OpKind op, bool isLong,
+static llvm::Value* GenArithOp(CompilationUnit* cu, OpKind op, bool is_long,
llvm::Value* src1, llvm::Value* src2)
{
llvm::Value* res = NULL;
switch(op) {
- case kOpAdd: res = cUnit->irb->CreateAdd(src1, src2); break;
- case kOpSub: res = cUnit->irb->CreateSub(src1, src2); break;
- case kOpRsub: res = cUnit->irb->CreateSub(src2, src1); break;
- case kOpMul: res = cUnit->irb->CreateMul(src1, src2); break;
- case kOpOr: res = cUnit->irb->CreateOr(src1, src2); break;
- case kOpAnd: res = cUnit->irb->CreateAnd(src1, src2); break;
- case kOpXor: res = cUnit->irb->CreateXor(src1, src2); break;
- case kOpDiv: res = GenDivModOp(cUnit, true, isLong, src1, src2); break;
- case kOpRem: res = GenDivModOp(cUnit, false, isLong, src1, src2); break;
- case kOpLsl: res = cUnit->irb->CreateShl(src1, src2); break;
- case kOpLsr: res = cUnit->irb->CreateLShr(src1, src2); break;
- case kOpAsr: res = cUnit->irb->CreateAShr(src1, src2); break;
+ case kOpAdd: res = cu->irb->CreateAdd(src1, src2); break;
+ case kOpSub: res = cu->irb->CreateSub(src1, src2); break;
+ case kOpRsub: res = cu->irb->CreateSub(src2, src1); break;
+ case kOpMul: res = cu->irb->CreateMul(src1, src2); break;
+ case kOpOr: res = cu->irb->CreateOr(src1, src2); break;
+ case kOpAnd: res = cu->irb->CreateAnd(src1, src2); break;
+ case kOpXor: res = cu->irb->CreateXor(src1, src2); break;
+ case kOpDiv: res = GenDivModOp(cu, true, is_long, src1, src2); break;
+ case kOpRem: res = GenDivModOp(cu, false, is_long, src1, src2); break;
+ case kOpLsl: res = cu->irb->CreateShl(src1, src2); break;
+ case kOpLsr: res = cu->irb->CreateLShr(src1, src2); break;
+ case kOpAsr: res = cu->irb->CreateAShr(src1, src2); break;
default:
LOG(FATAL) << "Invalid op " << op;
}
return res;
}
-static void ConvertFPArithOp(CompilationUnit* cUnit, OpKind op, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+static void ConvertFPArithOp(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- llvm::Value* src1 = GetLLVMValue(cUnit, rlSrc1.origSReg);
- llvm::Value* src2 = GetLLVMValue(cUnit, rlSrc2.origSReg);
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
llvm::Value* res = NULL;
switch(op) {
- case kOpAdd: res = cUnit->irb->CreateFAdd(src1, src2); break;
- case kOpSub: res = cUnit->irb->CreateFSub(src1, src2); break;
- case kOpMul: res = cUnit->irb->CreateFMul(src1, src2); break;
- case kOpDiv: res = cUnit->irb->CreateFDiv(src1, src2); break;
- case kOpRem: res = cUnit->irb->CreateFRem(src1, src2); break;
+ case kOpAdd: res = cu->irb->CreateFAdd(src1, src2); break;
+ case kOpSub: res = cu->irb->CreateFSub(src1, src2); break;
+ case kOpMul: res = cu->irb->CreateFMul(src1, src2); break;
+ case kOpDiv: res = cu->irb->CreateFDiv(src1, src2); break;
+ case kOpRem: res = cu->irb->CreateFRem(src1, src2); break;
default:
LOG(FATAL) << "Invalid op " << op;
}
- DefineValue(cUnit, res, rlDest.origSReg);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertShift(CompilationUnit* cUnit, greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2)
+static void ConvertShift(CompilationUnit* cu, greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
{
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::SmallVector<llvm::Value*, 2>args;
- args.push_back(GetLLVMValue(cUnit, rlSrc1.origSReg));
- args.push_back(GetLLVMValue(cUnit, rlSrc2.origSReg));
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ args.push_back(GetLLVMValue(cu, rl_src1.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_src2.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertShiftLit(CompilationUnit* cUnit, greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlDest, RegLocation rlSrc, int shiftAmount)
+static void ConvertShiftLit(CompilationUnit* cu, greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest, RegLocation rl_src, int shift_amount)
{
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::SmallVector<llvm::Value*, 2>args;
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- args.push_back(cUnit->irb->getInt32(shiftAmount));
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ args.push_back(cu->irb->getInt32(shift_amount));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertArithOp(CompilationUnit* cUnit, OpKind op, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+static void ConvertArithOp(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- llvm::Value* src1 = GetLLVMValue(cUnit, rlSrc1.origSReg);
- llvm::Value* src2 = GetLLVMValue(cUnit, rlSrc2.origSReg);
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
DCHECK_EQ(src1->getType(), src2->getType());
- llvm::Value* res = GenArithOp(cUnit, op, rlDest.wide, src1, src2);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* res = GenArithOp(cu, op, rl_dest.wide, src1, src2);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void SetShadowFrameEntry(CompilationUnit* cUnit, llvm::Value* newVal)
+static void SetShadowFrameEntry(CompilationUnit* cu, llvm::Value* new_val)
{
int index = -1;
- DCHECK(newVal != NULL);
- int vReg = SRegToVReg(cUnit, GetLoc(cUnit, newVal).origSReg);
- for (int i = 0; i < cUnit->numShadowFrameEntries; i++) {
- if (cUnit->shadowMap[i] == vReg) {
+ DCHECK(new_val != NULL);
+ int v_reg = SRegToVReg(cu, GetLoc(cu, new_val).orig_sreg);
+ for (int i = 0; i < cu->num_shadow_frame_entries; i++) {
+ if (cu->shadow_map[i] == v_reg) {
index = i;
break;
}
@@ -563,27 +563,27 @@
if (index == -1) {
return;
}
- llvm::Type* ty = newVal->getType();
+ llvm::Type* ty = new_val->getType();
greenland::IntrinsicHelper::IntrinsicId id =
greenland::IntrinsicHelper::SetShadowFrameEntry;
- llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* tableSlot = cUnit->irb->getInt32(index);
- // If newVal is a Null pointer, we'll see it here as a const int. Replace
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* table_slot = cu->irb->getInt32(index);
+ // If new_val is a Null pointer, we'll see it here as a const int. Replace
if (!ty->isPointerTy()) {
- // TODO: assert newVal created w/ dex_lang_const_int(0) or dex_lang_const_float(0)
- newVal = cUnit->irb->GetJNull();
+ // TODO: assert new_val created w/ dex_lang_const_int(0) or dex_lang_const_float(0)
+ new_val = cu->irb->GetJNull();
}
- llvm::Value* args[] = { newVal, tableSlot };
- cUnit->irb->CreateCall(func, args);
+ llvm::Value* args[] = { new_val, table_slot };
+ cu->irb->CreateCall(func, args);
}
-static void ConvertArithOpLit(CompilationUnit* cUnit, OpKind op, RegLocation rlDest,
- RegLocation rlSrc1, int32_t imm)
+static void ConvertArithOpLit(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+ RegLocation rl_src1, int32_t imm)
{
- llvm::Value* src1 = GetLLVMValue(cUnit, rlSrc1.origSReg);
- llvm::Value* src2 = cUnit->irb->getInt32(imm);
- llvm::Value* res = GenArithOp(cUnit, op, rlDest.wide, src1, src2);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = cu->irb->getInt32(imm);
+ llvm::Value* res = GenArithOp(cu, op, rl_dest.wide, src1, src2);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
/*
@@ -591,20 +591,20 @@
* collect and process arguments for NEW_FILLED_ARRAY and NEW_FILLED_ARRAY_RANGE.
* The requirements are similar.
*/
-static void ConvertInvoke(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- InvokeType invokeType, bool isRange, bool isFilledNewArray)
+static void ConvertInvoke(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ InvokeType invoke_type, bool is_range, bool is_filled_new_array)
{
- CallInfo* info = NewMemCallInfo(cUnit, bb, mir, invokeType, isRange);
+ CallInfo* info = NewMemCallInfo(cu, bb, mir, invoke_type, is_range);
llvm::SmallVector<llvm::Value*, 10> args;
- // Insert the invokeType
- args.push_back(cUnit->irb->getInt32(static_cast<int>(invokeType)));
+ // Insert the invoke_type
+ args.push_back(cu->irb->getInt32(static_cast<int>(invoke_type)));
// Insert the method_idx
- args.push_back(cUnit->irb->getInt32(info->index));
+ args.push_back(cu->irb->getInt32(info->index));
// Insert the optimization flags
- args.push_back(cUnit->irb->getInt32(info->optFlags));
+ args.push_back(cu->irb->getInt32(info->opt_flags));
// Now, insert the actual arguments
- for (int i = 0; i < info->numArgWords;) {
- llvm::Value* val = GetLLVMValue(cUnit, info->args[i].origSReg);
+ for (int i = 0; i < info->num_arg_words;) {
+ llvm::Value* val = GetLLVMValue(cu, info->args[i].orig_sreg);
args.push_back(val);
i += info->args[i].wide ? 2 : 1;
}
@@ -614,7 +614,7 @@
* is not used, we'll treat this as a void invoke.
*/
greenland::IntrinsicHelper::IntrinsicId id;
- if (isFilledNewArray) {
+ if (is_filled_new_array) {
id = greenland::IntrinsicHelper::HLFilledNewArray;
} else if (info->result.location == kLocInvalid) {
id = greenland::IntrinsicHelper::HLInvokeVoid;
@@ -633,213 +633,213 @@
id = greenland::IntrinsicHelper::HLInvokeInt;
}
}
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
if (info->result.location != kLocInvalid) {
- DefineValue(cUnit, res, info->result.origSReg);
+ DefineValue(cu, res, info->result.orig_sreg);
if (info->result.ref) {
- SetShadowFrameEntry(cUnit, reinterpret_cast<llvm::Value*>
- (cUnit->llvmValues.elemList[info->result.origSReg]));
+ SetShadowFrameEntry(cu, reinterpret_cast<llvm::Value*>
+ (cu->llvm_values.elem_list[info->result.orig_sreg]));
}
}
}
-static void ConvertConstObject(CompilationUnit* cUnit, uint32_t idx,
- greenland::IntrinsicHelper::IntrinsicId id, RegLocation rlDest)
+static void ConvertConstObject(CompilationUnit* cu, uint32_t idx,
+ greenland::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
{
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* index = cUnit->irb->getInt32(idx);
- llvm::Value* res = cUnit->irb->CreateCall(intr, index);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* index = cu->irb->getInt32(idx);
+ llvm::Value* res = cu->irb->CreateCall(intr, index);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertCheckCast(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlSrc)
+static void ConvertCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
{
greenland::IntrinsicHelper::IntrinsicId id;
id = greenland::IntrinsicHelper::HLCheckCast;
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(cUnit->irb->getInt32(type_idx));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- cUnit->irb->CreateCall(intr, args);
+ args.push_back(cu->irb->getInt32(type_idx));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ cu->irb->CreateCall(intr, args);
}
-static void ConvertNewInstance(CompilationUnit* cUnit, uint32_t type_idx, RegLocation rlDest)
+static void ConvertNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
{
greenland::IntrinsicHelper::IntrinsicId id;
id = greenland::IntrinsicHelper::NewInstance;
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* index = cUnit->irb->getInt32(type_idx);
- llvm::Value* res = cUnit->irb->CreateCall(intr, index);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* index = cu->irb->getInt32(type_idx);
+ llvm::Value* res = cu->irb->CreateCall(intr, index);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertNewArray(CompilationUnit* cUnit, uint32_t type_idx,
- RegLocation rlDest, RegLocation rlSrc)
+static void ConvertNewArray(CompilationUnit* cu, uint32_t type_idx,
+ RegLocation rl_dest, RegLocation rl_src)
{
greenland::IntrinsicHelper::IntrinsicId id;
id = greenland::IntrinsicHelper::NewArray;
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(cUnit->irb->getInt32(type_idx));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ args.push_back(cu->irb->getInt32(type_idx));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertAget(CompilationUnit* cUnit, int optFlags,
+static void ConvertAget(CompilationUnit* cu, int opt_flags,
greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlDest, RegLocation rlArray, RegLocation rlIndex)
+ RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index)
{
llvm::SmallVector<llvm::Value*, 3> args;
- args.push_back(cUnit->irb->getInt32(optFlags));
- args.push_back(GetLLVMValue(cUnit, rlArray.origSReg));
- args.push_back(GetLLVMValue(cUnit, rlIndex.origSReg));
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_index.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertAput(CompilationUnit* cUnit, int optFlags,
+static void ConvertAput(CompilationUnit* cu, int opt_flags,
greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlSrc, RegLocation rlArray, RegLocation rlIndex)
+ RegLocation rl_src, RegLocation rl_array, RegLocation rl_index)
{
llvm::SmallVector<llvm::Value*, 4> args;
- args.push_back(cUnit->irb->getInt32(optFlags));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- args.push_back(GetLLVMValue(cUnit, rlArray.origSReg));
- args.push_back(GetLLVMValue(cUnit, rlIndex.origSReg));
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- cUnit->irb->CreateCall(intr, args);
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_index.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
}
-static void ConvertIget(CompilationUnit* cUnit, int optFlags,
+static void ConvertIget(CompilationUnit* cu, int opt_flags,
greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlDest, RegLocation rlObj, int fieldIndex)
+ RegLocation rl_dest, RegLocation rl_obj, int field_index)
{
llvm::SmallVector<llvm::Value*, 3> args;
- args.push_back(cUnit->irb->getInt32(optFlags));
- args.push_back(GetLLVMValue(cUnit, rlObj.origSReg));
- args.push_back(cUnit->irb->getInt32(fieldIndex));
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_obj.orig_sreg));
+ args.push_back(cu->irb->getInt32(field_index));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertIput(CompilationUnit* cUnit, int optFlags,
+static void ConvertIput(CompilationUnit* cu, int opt_flags,
greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlSrc, RegLocation rlObj, int fieldIndex)
+ RegLocation rl_src, RegLocation rl_obj, int field_index)
{
llvm::SmallVector<llvm::Value*, 4> args;
- args.push_back(cUnit->irb->getInt32(optFlags));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- args.push_back(GetLLVMValue(cUnit, rlObj.origSReg));
- args.push_back(cUnit->irb->getInt32(fieldIndex));
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- cUnit->irb->CreateCall(intr, args);
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_obj.orig_sreg));
+ args.push_back(cu->irb->getInt32(field_index));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
}
-static void ConvertInstanceOf(CompilationUnit* cUnit, uint32_t type_idx,
- RegLocation rlDest, RegLocation rlSrc)
+static void ConvertInstanceOf(CompilationUnit* cu, uint32_t type_idx,
+ RegLocation rl_dest, RegLocation rl_src)
{
greenland::IntrinsicHelper::IntrinsicId id;
id = greenland::IntrinsicHelper::InstanceOf;
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(cUnit->irb->getInt32(type_idx));
- args.push_back(GetLLVMValue(cUnit, rlSrc.origSReg));
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ args.push_back(cu->irb->getInt32(type_idx));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertIntToLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+static void ConvertIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- llvm::Value* res = cUnit->irb->CreateSExt(GetLLVMValue(cUnit, rlSrc.origSReg),
- cUnit->irb->getInt64Ty());
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* res = cu->irb->CreateSExt(GetLLVMValue(cu, rl_src.orig_sreg),
+ cu->irb->getInt64Ty());
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertLongToInt(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+static void ConvertLongToInt(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- llvm::Value* src = GetLLVMValue(cUnit, rlSrc.origSReg);
- llvm::Value* res = cUnit->irb->CreateTrunc(src, cUnit->irb->getInt32Ty());
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateTrunc(src, cu->irb->getInt32Ty());
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertFloatToDouble(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+static void ConvertFloatToDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- llvm::Value* src = GetLLVMValue(cUnit, rlSrc.origSReg);
- llvm::Value* res = cUnit->irb->CreateFPExt(src, cUnit->irb->getDoubleTy());
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateFPExt(src, cu->irb->getDoubleTy());
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertDoubleToFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+static void ConvertDoubleToFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- llvm::Value* src = GetLLVMValue(cUnit, rlSrc.origSReg);
- llvm::Value* res = cUnit->irb->CreateFPTrunc(src, cUnit->irb->getFloatTy());
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateFPTrunc(src, cu->irb->getFloatTy());
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertWideComparison(CompilationUnit* cUnit,
+static void ConvertWideComparison(CompilationUnit* cu,
greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlDest, RegLocation rlSrc1,
- RegLocation rlSrc2)
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
{
- DCHECK_EQ(rlSrc1.fp, rlSrc2.fp);
- DCHECK_EQ(rlSrc1.wide, rlSrc2.wide);
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ DCHECK_EQ(rl_src1.fp, rl_src2.fp);
+ DCHECK_EQ(rl_src1.wide, rl_src2.wide);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::SmallVector<llvm::Value*, 2> args;
- args.push_back(GetLLVMValue(cUnit, rlSrc1.origSReg));
- args.push_back(GetLLVMValue(cUnit, rlSrc2.origSReg));
- llvm::Value* res = cUnit->irb->CreateCall(intr, args);
- DefineValue(cUnit, res, rlDest.origSReg);
+ args.push_back(GetLLVMValue(cu, rl_src1.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_src2.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertIntNarrowing(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc,
+static void ConvertIntNarrowing(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src,
greenland::IntrinsicHelper::IntrinsicId id)
{
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
llvm::Value* res =
- cUnit->irb->CreateCall(intr, GetLLVMValue(cUnit, rlSrc.origSReg));
- DefineValue(cUnit, res, rlDest.origSReg);
+ cu->irb->CreateCall(intr, GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertNeg(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+static void ConvertNeg(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- llvm::Value* res = cUnit->irb->CreateNeg(GetLLVMValue(cUnit, rlSrc.origSReg));
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* res = cu->irb->CreateNeg(GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertIntToFP(CompilationUnit* cUnit, llvm::Type* ty, RegLocation rlDest,
- RegLocation rlSrc)
+static void ConvertIntToFP(CompilationUnit* cu, llvm::Type* ty, RegLocation rl_dest,
+ RegLocation rl_src)
{
llvm::Value* res =
- cUnit->irb->CreateSIToFP(GetLLVMValue(cUnit, rlSrc.origSReg), ty);
- DefineValue(cUnit, res, rlDest.origSReg);
+ cu->irb->CreateSIToFP(GetLLVMValue(cu, rl_src.orig_sreg), ty);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertFPToInt(CompilationUnit* cUnit, greenland::IntrinsicHelper::IntrinsicId id,
- RegLocation rlDest,
- RegLocation rlSrc)
+static void ConvertFPToInt(CompilationUnit* cu, greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest,
+ RegLocation rl_src)
{
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* res = cUnit->irb->CreateCall(intr, GetLLVMValue(cUnit, rlSrc.origSReg));
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertNegFP(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+static void ConvertNegFP(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
llvm::Value* res =
- cUnit->irb->CreateFNeg(GetLLVMValue(cUnit, rlSrc.origSReg));
- DefineValue(cUnit, res, rlDest.origSReg);
+ cu->irb->CreateFNeg(GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
-static void ConvertNot(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+static void ConvertNot(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- llvm::Value* src = GetLLVMValue(cUnit, rlSrc.origSReg);
- llvm::Value* res = cUnit->irb->CreateXor(src, static_cast<uint64_t>(-1));
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateXor(src, static_cast<uint64_t>(-1));
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
/*
@@ -847,65 +847,65 @@
* load/store utilities here, or target-dependent genXX() handlers
* when necessary.
*/
-static bool ConvertMIRNode(CompilationUnit* cUnit, MIR* mir, BasicBlock* bb,
- llvm::BasicBlock* llvmBB, LIR* labelList)
+static bool ConvertMIRNode(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
+ llvm::BasicBlock* llvm_bb, LIR* label_list)
{
bool res = false; // Assume success
- RegLocation rlSrc[3];
- RegLocation rlDest = badLoc;
+ RegLocation rl_src[3];
+ RegLocation rl_dest = bad_loc;
Instruction::Code opcode = mir->dalvikInsn.opcode;
- int opVal = opcode;
+ int op_val = opcode;
uint32_t vB = mir->dalvikInsn.vB;
uint32_t vC = mir->dalvikInsn.vC;
- int optFlags = mir->optimizationFlags;
+ int opt_flags = mir->optimization_flags;
- bool objectDefinition = false;
+ bool object_definition = false;
- if (cUnit->printMe) {
- if (opVal < kMirOpFirst) {
- LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << opVal;
+ if (cu->verbose) {
+ if (op_val < kMirOpFirst) {
+ LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << op_val;
} else {
- LOG(INFO) << extendedMIROpNames[opVal - kMirOpFirst] << " 0x" << std::hex << opVal;
+ LOG(INFO) << extended_mir_op_names[op_val - kMirOpFirst] << " 0x" << std::hex << op_val;
}
}
/* Prep Src and Dest locations */
- int nextSreg = 0;
- int nextLoc = 0;
- int attrs = oatDataFlowAttributes[opcode];
- rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
+ int next_sreg = 0;
+ int next_loc = 0;
+ int attrs = oat_data_flow_attributes[opcode];
+ rl_src[0] = rl_src[1] = rl_src[2] = bad_loc;
if (attrs & DF_UA) {
if (attrs & DF_A_WIDE) {
- rlSrc[nextLoc++] = GetSrcWide(cUnit, mir, nextSreg);
- nextSreg+= 2;
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
} else {
- rlSrc[nextLoc++] = GetSrc(cUnit, mir, nextSreg);
- nextSreg++;
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
}
}
if (attrs & DF_UB) {
if (attrs & DF_B_WIDE) {
- rlSrc[nextLoc++] = GetSrcWide(cUnit, mir, nextSreg);
- nextSreg+= 2;
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
} else {
- rlSrc[nextLoc++] = GetSrc(cUnit, mir, nextSreg);
- nextSreg++;
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
}
}
if (attrs & DF_UC) {
if (attrs & DF_C_WIDE) {
- rlSrc[nextLoc++] = GetSrcWide(cUnit, mir, nextSreg);
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
} else {
- rlSrc[nextLoc++] = GetSrc(cUnit, mir, nextSreg);
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
}
}
if (attrs & DF_DA) {
if (attrs & DF_A_WIDE) {
- rlDest = GetDestWide(cUnit, mir);
+ rl_dest = GetDestWide(cu, mir);
} else {
- rlDest = GetDest(cUnit, mir);
- if (rlDest.ref) {
- objectDefinition = true;
+ rl_dest = GetDest(cu, mir);
+ if (rl_dest.ref) {
+ object_definition = true;
}
}
}
@@ -930,9 +930,9 @@
* Insert a dummy intrinsic copy call, which will be recognized
* by the quick path and removed by the portable path.
*/
- llvm::Value* src = GetLLVMValue(cUnit, rlSrc[0].origSReg);
- llvm::Value* res = EmitCopy(cUnit, src, rlDest);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Value* src = GetLLVMValue(cu, rl_src[0].orig_sreg);
+ llvm::Value* res = EmitCopy(cu, src, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
break;
@@ -940,11 +940,11 @@
case Instruction::CONST_4:
case Instruction::CONST_16: {
if (vB == 0) {
- objectDefinition = true;
+ object_definition = true;
}
- llvm::Constant* immValue = cUnit->irb->GetJInt(vB);
- llvm::Value* res = EmitConst(cUnit, immValue, rlDest);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Constant* imm_value = cu->irb->GetJInt(vB);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
break;
@@ -952,166 +952,166 @@
case Instruction::CONST_WIDE_32: {
// Sign extend to 64 bits
int64_t imm = static_cast<int32_t>(vB);
- llvm::Constant* immValue = cUnit->irb->GetJLong(imm);
- llvm::Value* res = EmitConst(cUnit, immValue, rlDest);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Constant* imm_value = cu->irb->GetJLong(imm);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
break;
case Instruction::CONST_HIGH16: {
- llvm::Constant* immValue = cUnit->irb->GetJInt(vB << 16);
- llvm::Value* res = EmitConst(cUnit, immValue, rlDest);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Constant* imm_value = cu->irb->GetJInt(vB << 16);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
break;
case Instruction::CONST_WIDE: {
- llvm::Constant* immValue =
- cUnit->irb->GetJLong(mir->dalvikInsn.vB_wide);
- llvm::Value* res = EmitConst(cUnit, immValue, rlDest);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Constant* imm_value =
+ cu->irb->GetJLong(mir->dalvikInsn.vB_wide);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
break;
case Instruction::CONST_WIDE_HIGH16: {
int64_t imm = static_cast<int64_t>(vB) << 48;
- llvm::Constant* immValue = cUnit->irb->GetJLong(imm);
- llvm::Value* res = EmitConst(cUnit, immValue, rlDest);
- DefineValue(cUnit, res, rlDest.origSReg);
+ llvm::Constant* imm_value = cu->irb->GetJLong(imm);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
}
break;
case Instruction::SPUT_OBJECT:
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputObject,
- rlSrc[0]);
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputObject,
+ rl_src[0]);
break;
case Instruction::SPUT:
- if (rlSrc[0].fp) {
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputFloat,
- rlSrc[0]);
+ if (rl_src[0].fp) {
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputFloat,
+ rl_src[0]);
} else {
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSput, rlSrc[0]);
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSput, rl_src[0]);
}
break;
case Instruction::SPUT_BOOLEAN:
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputBoolean,
- rlSrc[0]);
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputBoolean,
+ rl_src[0]);
break;
case Instruction::SPUT_BYTE:
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputByte, rlSrc[0]);
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputByte, rl_src[0]);
break;
case Instruction::SPUT_CHAR:
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputChar, rlSrc[0]);
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputChar, rl_src[0]);
break;
case Instruction::SPUT_SHORT:
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputShort, rlSrc[0]);
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputShort, rl_src[0]);
break;
case Instruction::SPUT_WIDE:
- if (rlSrc[0].fp) {
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputDouble,
- rlSrc[0]);
+ if (rl_src[0].fp) {
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputDouble,
+ rl_src[0]);
} else {
- ConvertSput(cUnit, vB, greenland::IntrinsicHelper::HLSputWide,
- rlSrc[0]);
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputWide,
+ rl_src[0]);
}
break;
case Instruction::SGET_OBJECT:
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetObject, rlDest);
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetObject, rl_dest);
break;
case Instruction::SGET:
- if (rlDest.fp) {
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetFloat, rlDest);
+ if (rl_dest.fp) {
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetFloat, rl_dest);
} else {
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSget, rlDest);
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSget, rl_dest);
}
break;
case Instruction::SGET_BOOLEAN:
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetBoolean, rlDest);
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetBoolean, rl_dest);
break;
case Instruction::SGET_BYTE:
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetByte, rlDest);
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetByte, rl_dest);
break;
case Instruction::SGET_CHAR:
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetChar, rlDest);
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetChar, rl_dest);
break;
case Instruction::SGET_SHORT:
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetShort, rlDest);
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetShort, rl_dest);
break;
case Instruction::SGET_WIDE:
- if (rlDest.fp) {
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetDouble,
- rlDest);
+ if (rl_dest.fp) {
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetDouble,
+ rl_dest);
} else {
- ConvertSget(cUnit, vB, greenland::IntrinsicHelper::HLSgetWide, rlDest);
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetWide, rl_dest);
}
break;
case Instruction::RETURN_WIDE:
case Instruction::RETURN:
case Instruction::RETURN_OBJECT: {
- if (!(cUnit->attrs & METHOD_IS_LEAF)) {
- EmitSuspendCheck(cUnit);
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ EmitSuspendCheck(cu);
}
- EmitPopShadowFrame(cUnit);
- cUnit->irb->CreateRet(GetLLVMValue(cUnit, rlSrc[0].origSReg));
- bb->hasReturn = true;
+ EmitPopShadowFrame(cu);
+ cu->irb->CreateRet(GetLLVMValue(cu, rl_src[0].orig_sreg));
+ bb->has_return = true;
}
break;
case Instruction::RETURN_VOID: {
- if (!(cUnit->attrs & METHOD_IS_LEAF)) {
- EmitSuspendCheck(cUnit);
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ EmitSuspendCheck(cu);
}
- EmitPopShadowFrame(cUnit);
- cUnit->irb->CreateRetVoid();
- bb->hasReturn = true;
+ EmitPopShadowFrame(cu);
+ cu->irb->CreateRetVoid();
+ bb->has_return = true;
}
break;
case Instruction::IF_EQ:
- ConvertCompareAndBranch(cUnit, bb, mir, kCondEq, rlSrc[0], rlSrc[1]);
+ ConvertCompareAndBranch(cu, bb, mir, kCondEq, rl_src[0], rl_src[1]);
break;
case Instruction::IF_NE:
- ConvertCompareAndBranch(cUnit, bb, mir, kCondNe, rlSrc[0], rlSrc[1]);
+ ConvertCompareAndBranch(cu, bb, mir, kCondNe, rl_src[0], rl_src[1]);
break;
case Instruction::IF_LT:
- ConvertCompareAndBranch(cUnit, bb, mir, kCondLt, rlSrc[0], rlSrc[1]);
+ ConvertCompareAndBranch(cu, bb, mir, kCondLt, rl_src[0], rl_src[1]);
break;
case Instruction::IF_GE:
- ConvertCompareAndBranch(cUnit, bb, mir, kCondGe, rlSrc[0], rlSrc[1]);
+ ConvertCompareAndBranch(cu, bb, mir, kCondGe, rl_src[0], rl_src[1]);
break;
case Instruction::IF_GT:
- ConvertCompareAndBranch(cUnit, bb, mir, kCondGt, rlSrc[0], rlSrc[1]);
+ ConvertCompareAndBranch(cu, bb, mir, kCondGt, rl_src[0], rl_src[1]);
break;
case Instruction::IF_LE:
- ConvertCompareAndBranch(cUnit, bb, mir, kCondLe, rlSrc[0], rlSrc[1]);
+ ConvertCompareAndBranch(cu, bb, mir, kCondLe, rl_src[0], rl_src[1]);
break;
case Instruction::IF_EQZ:
- ConvertCompareZeroAndBranch(cUnit, bb, mir, kCondEq, rlSrc[0]);
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondEq, rl_src[0]);
break;
case Instruction::IF_NEZ:
- ConvertCompareZeroAndBranch(cUnit, bb, mir, kCondNe, rlSrc[0]);
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondNe, rl_src[0]);
break;
case Instruction::IF_LTZ:
- ConvertCompareZeroAndBranch(cUnit, bb, mir, kCondLt, rlSrc[0]);
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondLt, rl_src[0]);
break;
case Instruction::IF_GEZ:
- ConvertCompareZeroAndBranch(cUnit, bb, mir, kCondGe, rlSrc[0]);
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondGe, rl_src[0]);
break;
case Instruction::IF_GTZ:
- ConvertCompareZeroAndBranch(cUnit, bb, mir, kCondGt, rlSrc[0]);
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondGt, rl_src[0]);
break;
case Instruction::IF_LEZ:
- ConvertCompareZeroAndBranch(cUnit, bb, mir, kCondLe, rlSrc[0]);
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondLe, rl_src[0]);
break;
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
- if (bb->taken->startOffset <= bb->startOffset) {
- EmitSuspendCheck(cUnit);
+ if (bb->taken->start_offset <= bb->start_offset) {
+ EmitSuspendCheck(cu);
}
- cUnit->irb->CreateBr(GetLLVMBlock(cUnit, bb->taken->id));
+ cu->irb->CreateBr(GetLLVMBlock(cu, bb->taken->id));
}
break;
@@ -1119,249 +1119,249 @@
case Instruction::ADD_LONG_2ADDR:
case Instruction::ADD_INT:
case Instruction::ADD_INT_2ADDR:
- ConvertArithOp(cUnit, kOpAdd, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpAdd, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::SUB_LONG:
case Instruction::SUB_LONG_2ADDR:
case Instruction::SUB_INT:
case Instruction::SUB_INT_2ADDR:
- ConvertArithOp(cUnit, kOpSub, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpSub, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::MUL_LONG:
case Instruction::MUL_LONG_2ADDR:
case Instruction::MUL_INT:
case Instruction::MUL_INT_2ADDR:
- ConvertArithOp(cUnit, kOpMul, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpMul, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::DIV_LONG:
case Instruction::DIV_LONG_2ADDR:
case Instruction::DIV_INT:
case Instruction::DIV_INT_2ADDR:
- ConvertArithOp(cUnit, kOpDiv, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpDiv, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
case Instruction::REM_INT:
case Instruction::REM_INT_2ADDR:
- ConvertArithOp(cUnit, kOpRem, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpRem, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::AND_LONG:
case Instruction::AND_LONG_2ADDR:
case Instruction::AND_INT:
case Instruction::AND_INT_2ADDR:
- ConvertArithOp(cUnit, kOpAnd, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpAnd, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
case Instruction::OR_INT:
case Instruction::OR_INT_2ADDR:
- ConvertArithOp(cUnit, kOpOr, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpOr, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
case Instruction::XOR_INT:
case Instruction::XOR_INT_2ADDR:
- ConvertArithOp(cUnit, kOpXor, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertArithOp(cu, kOpXor, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
- ConvertShift(cUnit, greenland::IntrinsicHelper::SHLLong,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertShift(cu, greenland::IntrinsicHelper::SHLLong,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::SHL_INT:
case Instruction::SHL_INT_2ADDR:
- ConvertShift(cUnit, greenland::IntrinsicHelper::SHLInt,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertShift(cu, greenland::IntrinsicHelper::SHLInt,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
- ConvertShift(cUnit, greenland::IntrinsicHelper::SHRLong,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertShift(cu, greenland::IntrinsicHelper::SHRLong,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::SHR_INT:
case Instruction::SHR_INT_2ADDR:
- ConvertShift(cUnit, greenland::IntrinsicHelper::SHRInt,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertShift(cu, greenland::IntrinsicHelper::SHRInt,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
- ConvertShift(cUnit, greenland::IntrinsicHelper::USHRLong,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertShift(cu, greenland::IntrinsicHelper::USHRLong,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::USHR_INT:
case Instruction::USHR_INT_2ADDR:
- ConvertShift(cUnit, greenland::IntrinsicHelper::USHRInt,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertShift(cu, greenland::IntrinsicHelper::USHRInt,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::ADD_INT_LIT16:
case Instruction::ADD_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpAdd, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpAdd, rl_dest, rl_src[0], vC);
break;
case Instruction::RSUB_INT:
case Instruction::RSUB_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpRsub, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpRsub, rl_dest, rl_src[0], vC);
break;
case Instruction::MUL_INT_LIT16:
case Instruction::MUL_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpMul, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpMul, rl_dest, rl_src[0], vC);
break;
case Instruction::DIV_INT_LIT16:
case Instruction::DIV_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpDiv, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpDiv, rl_dest, rl_src[0], vC);
break;
case Instruction::REM_INT_LIT16:
case Instruction::REM_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpRem, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpRem, rl_dest, rl_src[0], vC);
break;
case Instruction::AND_INT_LIT16:
case Instruction::AND_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpAnd, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpAnd, rl_dest, rl_src[0], vC);
break;
case Instruction::OR_INT_LIT16:
case Instruction::OR_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpOr, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpOr, rl_dest, rl_src[0], vC);
break;
case Instruction::XOR_INT_LIT16:
case Instruction::XOR_INT_LIT8:
- ConvertArithOpLit(cUnit, kOpXor, rlDest, rlSrc[0], vC);
+ ConvertArithOpLit(cu, kOpXor, rl_dest, rl_src[0], vC);
break;
case Instruction::SHL_INT_LIT8:
- ConvertShiftLit(cUnit, greenland::IntrinsicHelper::SHLInt,
- rlDest, rlSrc[0], vC & 0x1f);
+ ConvertShiftLit(cu, greenland::IntrinsicHelper::SHLInt,
+ rl_dest, rl_src[0], vC & 0x1f);
break;
case Instruction::SHR_INT_LIT8:
- ConvertShiftLit(cUnit, greenland::IntrinsicHelper::SHRInt,
- rlDest, rlSrc[0], vC & 0x1f);
+ ConvertShiftLit(cu, greenland::IntrinsicHelper::SHRInt,
+ rl_dest, rl_src[0], vC & 0x1f);
break;
case Instruction::USHR_INT_LIT8:
- ConvertShiftLit(cUnit, greenland::IntrinsicHelper::USHRInt,
- rlDest, rlSrc[0], vC & 0x1f);
+ ConvertShiftLit(cu, greenland::IntrinsicHelper::USHRInt,
+ rl_dest, rl_src[0], vC & 0x1f);
break;
case Instruction::ADD_FLOAT:
case Instruction::ADD_FLOAT_2ADDR:
case Instruction::ADD_DOUBLE:
case Instruction::ADD_DOUBLE_2ADDR:
- ConvertFPArithOp(cUnit, kOpAdd, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertFPArithOp(cu, kOpAdd, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::SUB_FLOAT:
case Instruction::SUB_FLOAT_2ADDR:
case Instruction::SUB_DOUBLE:
case Instruction::SUB_DOUBLE_2ADDR:
- ConvertFPArithOp(cUnit, kOpSub, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertFPArithOp(cu, kOpSub, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::MUL_FLOAT:
case Instruction::MUL_FLOAT_2ADDR:
case Instruction::MUL_DOUBLE:
case Instruction::MUL_DOUBLE_2ADDR:
- ConvertFPArithOp(cUnit, kOpMul, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertFPArithOp(cu, kOpMul, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::DIV_FLOAT:
case Instruction::DIV_FLOAT_2ADDR:
case Instruction::DIV_DOUBLE:
case Instruction::DIV_DOUBLE_2ADDR:
- ConvertFPArithOp(cUnit, kOpDiv, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertFPArithOp(cu, kOpDiv, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::REM_FLOAT:
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_DOUBLE:
case Instruction::REM_DOUBLE_2ADDR:
- ConvertFPArithOp(cUnit, kOpRem, rlDest, rlSrc[0], rlSrc[1]);
+ ConvertFPArithOp(cu, kOpRem, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::INVOKE_STATIC:
- ConvertInvoke(cUnit, bb, mir, kStatic, false /*range*/,
+ ConvertInvoke(cu, bb, mir, kStatic, false /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_STATIC_RANGE:
- ConvertInvoke(cUnit, bb, mir, kStatic, true /*range*/,
+ ConvertInvoke(cu, bb, mir, kStatic, true /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_DIRECT:
- ConvertInvoke(cUnit, bb, mir, kDirect, false /*range*/,
+ ConvertInvoke(cu, bb, mir, kDirect, false /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_DIRECT_RANGE:
- ConvertInvoke(cUnit, bb, mir, kDirect, true /*range*/,
+ ConvertInvoke(cu, bb, mir, kDirect, true /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_VIRTUAL:
- ConvertInvoke(cUnit, bb, mir, kVirtual, false /*range*/,
+ ConvertInvoke(cu, bb, mir, kVirtual, false /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
- ConvertInvoke(cUnit, bb, mir, kVirtual, true /*range*/,
+ ConvertInvoke(cu, bb, mir, kVirtual, true /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_SUPER:
- ConvertInvoke(cUnit, bb, mir, kSuper, false /*range*/,
+ ConvertInvoke(cu, bb, mir, kSuper, false /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_SUPER_RANGE:
- ConvertInvoke(cUnit, bb, mir, kSuper, true /*range*/,
+ ConvertInvoke(cu, bb, mir, kSuper, true /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_INTERFACE:
- ConvertInvoke(cUnit, bb, mir, kInterface, false /*range*/,
+ ConvertInvoke(cu, bb, mir, kInterface, false /*range*/,
false /* NewFilledArray */);
break;
case Instruction::INVOKE_INTERFACE_RANGE:
- ConvertInvoke(cUnit, bb, mir, kInterface, true /*range*/,
+ ConvertInvoke(cu, bb, mir, kInterface, true /*range*/,
false /* NewFilledArray */);
break;
case Instruction::FILLED_NEW_ARRAY:
- ConvertInvoke(cUnit, bb, mir, kInterface, false /*range*/,
+ ConvertInvoke(cu, bb, mir, kInterface, false /*range*/,
true /* NewFilledArray */);
break;
case Instruction::FILLED_NEW_ARRAY_RANGE:
- ConvertInvoke(cUnit, bb, mir, kInterface, true /*range*/,
+ ConvertInvoke(cu, bb, mir, kInterface, true /*range*/,
true /* NewFilledArray */);
break;
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO:
- ConvertConstObject(cUnit, vB, greenland::IntrinsicHelper::ConstString,
- rlDest);
+ ConvertConstObject(cu, vB, greenland::IntrinsicHelper::ConstString,
+ rl_dest);
break;
case Instruction::CONST_CLASS:
- ConvertConstObject(cUnit, vB, greenland::IntrinsicHelper::ConstClass,
- rlDest);
+ ConvertConstObject(cu, vB, greenland::IntrinsicHelper::ConstClass,
+ rl_dest);
break;
case Instruction::CHECK_CAST:
- ConvertCheckCast(cUnit, vB, rlSrc[0]);
+ ConvertCheckCast(cu, vB, rl_src[0]);
break;
case Instruction::NEW_INSTANCE:
- ConvertNewInstance(cUnit, vB, rlDest);
+ ConvertNewInstance(cu, vB, rl_dest);
break;
case Instruction::MOVE_EXCEPTION:
- ConvertMoveException(cUnit, rlDest);
+ ConvertMoveException(cu, rl_dest);
break;
case Instruction::THROW:
- ConvertThrow(cUnit, rlSrc[0]);
+ ConvertThrow(cu, rl_src[0]);
/*
* If this throw is standalone, terminate.
* If it might rethrow, force termination
* of the following block.
*/
- if (bb->fallThrough == NULL) {
- cUnit->irb->CreateUnreachable();
+ if (bb->fall_through == NULL) {
+ cu->irb->CreateUnreachable();
} else {
- bb->fallThrough->fallThrough = NULL;
- bb->fallThrough->taken = NULL;
+ bb->fall_through->fall_through = NULL;
+ bb->fall_through->taken = NULL;
}
break;
@@ -1375,312 +1375,312 @@
break;
case Instruction::MONITOR_ENTER:
- ConvertMonitorEnterExit(cUnit, optFlags,
+ ConvertMonitorEnterExit(cu, opt_flags,
greenland::IntrinsicHelper::MonitorEnter,
- rlSrc[0]);
+ rl_src[0]);
break;
case Instruction::MONITOR_EXIT:
- ConvertMonitorEnterExit(cUnit, optFlags,
+ ConvertMonitorEnterExit(cu, opt_flags,
greenland::IntrinsicHelper::MonitorExit,
- rlSrc[0]);
+ rl_src[0]);
break;
case Instruction::ARRAY_LENGTH:
- ConvertArrayLength(cUnit, optFlags, rlDest, rlSrc[0]);
+ ConvertArrayLength(cu, opt_flags, rl_dest, rl_src[0]);
break;
case Instruction::NEW_ARRAY:
- ConvertNewArray(cUnit, vC, rlDest, rlSrc[0]);
+ ConvertNewArray(cu, vC, rl_dest, rl_src[0]);
break;
case Instruction::INSTANCE_OF:
- ConvertInstanceOf(cUnit, vC, rlDest, rlSrc[0]);
+ ConvertInstanceOf(cu, vC, rl_dest, rl_src[0]);
break;
case Instruction::AGET:
- if (rlDest.fp) {
- ConvertAget(cUnit, optFlags,
+ if (rl_dest.fp) {
+ ConvertAget(cu, opt_flags,
greenland::IntrinsicHelper::HLArrayGetFloat,
- rlDest, rlSrc[0], rlSrc[1]);
+ rl_dest, rl_src[0], rl_src[1]);
} else {
- ConvertAget(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayGet,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGet,
+ rl_dest, rl_src[0], rl_src[1]);
}
break;
case Instruction::AGET_OBJECT:
- ConvertAget(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayGetObject,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetObject,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::AGET_BOOLEAN:
- ConvertAget(cUnit, optFlags,
+ ConvertAget(cu, opt_flags,
greenland::IntrinsicHelper::HLArrayGetBoolean,
- rlDest, rlSrc[0], rlSrc[1]);
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::AGET_BYTE:
- ConvertAget(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayGetByte,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetByte,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::AGET_CHAR:
- ConvertAget(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayGetChar,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetChar,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::AGET_SHORT:
- ConvertAget(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayGetShort,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetShort,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::AGET_WIDE:
- if (rlDest.fp) {
- ConvertAget(cUnit, optFlags,
+ if (rl_dest.fp) {
+ ConvertAget(cu, opt_flags,
greenland::IntrinsicHelper::HLArrayGetDouble,
- rlDest, rlSrc[0], rlSrc[1]);
+ rl_dest, rl_src[0], rl_src[1]);
} else {
- ConvertAget(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayGetWide,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetWide,
+ rl_dest, rl_src[0], rl_src[1]);
}
break;
case Instruction::APUT:
- if (rlSrc[0].fp) {
- ConvertAput(cUnit, optFlags,
+ if (rl_src[0].fp) {
+ ConvertAput(cu, opt_flags,
greenland::IntrinsicHelper::HLArrayPutFloat,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ rl_src[0], rl_src[1], rl_src[2]);
} else {
- ConvertAput(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayPut,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPut,
+ rl_src[0], rl_src[1], rl_src[2]);
}
break;
case Instruction::APUT_OBJECT:
- ConvertAput(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayPutObject,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutObject,
+ rl_src[0], rl_src[1], rl_src[2]);
break;
case Instruction::APUT_BOOLEAN:
- ConvertAput(cUnit, optFlags,
+ ConvertAput(cu, opt_flags,
greenland::IntrinsicHelper::HLArrayPutBoolean,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ rl_src[0], rl_src[1], rl_src[2]);
break;
case Instruction::APUT_BYTE:
- ConvertAput(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayPutByte,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutByte,
+ rl_src[0], rl_src[1], rl_src[2]);
break;
case Instruction::APUT_CHAR:
- ConvertAput(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayPutChar,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutChar,
+ rl_src[0], rl_src[1], rl_src[2]);
break;
case Instruction::APUT_SHORT:
- ConvertAput(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayPutShort,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutShort,
+ rl_src[0], rl_src[1], rl_src[2]);
break;
case Instruction::APUT_WIDE:
- if (rlSrc[0].fp) {
- ConvertAput(cUnit, optFlags,
+ if (rl_src[0].fp) {
+ ConvertAput(cu, opt_flags,
greenland::IntrinsicHelper::HLArrayPutDouble,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ rl_src[0], rl_src[1], rl_src[2]);
} else {
- ConvertAput(cUnit, optFlags, greenland::IntrinsicHelper::HLArrayPutWide,
- rlSrc[0], rlSrc[1], rlSrc[2]);
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutWide,
+ rl_src[0], rl_src[1], rl_src[2]);
}
break;
case Instruction::IGET:
- if (rlDest.fp) {
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetFloat,
- rlDest, rlSrc[0], vC);
+ if (rl_dest.fp) {
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetFloat,
+ rl_dest, rl_src[0], vC);
} else {
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGet,
- rlDest, rlSrc[0], vC);
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGet,
+ rl_dest, rl_src[0], vC);
}
break;
case Instruction::IGET_OBJECT:
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetObject,
- rlDest, rlSrc[0], vC);
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetObject,
+ rl_dest, rl_src[0], vC);
break;
case Instruction::IGET_BOOLEAN:
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetBoolean,
- rlDest, rlSrc[0], vC);
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetBoolean,
+ rl_dest, rl_src[0], vC);
break;
case Instruction::IGET_BYTE:
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetByte,
- rlDest, rlSrc[0], vC);
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetByte,
+ rl_dest, rl_src[0], vC);
break;
case Instruction::IGET_CHAR:
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetChar,
- rlDest, rlSrc[0], vC);
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetChar,
+ rl_dest, rl_src[0], vC);
break;
case Instruction::IGET_SHORT:
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetShort,
- rlDest, rlSrc[0], vC);
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetShort,
+ rl_dest, rl_src[0], vC);
break;
case Instruction::IGET_WIDE:
- if (rlDest.fp) {
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetDouble,
- rlDest, rlSrc[0], vC);
+ if (rl_dest.fp) {
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetDouble,
+ rl_dest, rl_src[0], vC);
} else {
- ConvertIget(cUnit, optFlags, greenland::IntrinsicHelper::HLIGetWide,
- rlDest, rlSrc[0], vC);
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetWide,
+ rl_dest, rl_src[0], vC);
}
break;
case Instruction::IPUT:
- if (rlSrc[0].fp) {
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutFloat,
- rlSrc[0], rlSrc[1], vC);
+ if (rl_src[0].fp) {
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutFloat,
+ rl_src[0], rl_src[1], vC);
} else {
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPut,
- rlSrc[0], rlSrc[1], vC);
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPut,
+ rl_src[0], rl_src[1], vC);
}
break;
case Instruction::IPUT_OBJECT:
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutObject,
- rlSrc[0], rlSrc[1], vC);
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutObject,
+ rl_src[0], rl_src[1], vC);
break;
case Instruction::IPUT_BOOLEAN:
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutBoolean,
- rlSrc[0], rlSrc[1], vC);
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutBoolean,
+ rl_src[0], rl_src[1], vC);
break;
case Instruction::IPUT_BYTE:
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutByte,
- rlSrc[0], rlSrc[1], vC);
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutByte,
+ rl_src[0], rl_src[1], vC);
break;
case Instruction::IPUT_CHAR:
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutChar,
- rlSrc[0], rlSrc[1], vC);
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutChar,
+ rl_src[0], rl_src[1], vC);
break;
case Instruction::IPUT_SHORT:
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutShort,
- rlSrc[0], rlSrc[1], vC);
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutShort,
+ rl_src[0], rl_src[1], vC);
break;
case Instruction::IPUT_WIDE:
- if (rlSrc[0].fp) {
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutDouble,
- rlSrc[0], rlSrc[1], vC);
+ if (rl_src[0].fp) {
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutDouble,
+ rl_src[0], rl_src[1], vC);
} else {
- ConvertIput(cUnit, optFlags, greenland::IntrinsicHelper::HLIPutWide,
- rlSrc[0], rlSrc[1], vC);
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutWide,
+ rl_src[0], rl_src[1], vC);
}
break;
case Instruction::FILL_ARRAY_DATA:
- ConvertFillArrayData(cUnit, vB, rlSrc[0]);
+ ConvertFillArrayData(cu, vB, rl_src[0]);
break;
case Instruction::LONG_TO_INT:
- ConvertLongToInt(cUnit, rlDest, rlSrc[0]);
+ ConvertLongToInt(cu, rl_dest, rl_src[0]);
break;
case Instruction::INT_TO_LONG:
- ConvertIntToLong(cUnit, rlDest, rlSrc[0]);
+ ConvertIntToLong(cu, rl_dest, rl_src[0]);
break;
case Instruction::INT_TO_CHAR:
- ConvertIntNarrowing(cUnit, rlDest, rlSrc[0],
+ ConvertIntNarrowing(cu, rl_dest, rl_src[0],
greenland::IntrinsicHelper::IntToChar);
break;
case Instruction::INT_TO_BYTE:
- ConvertIntNarrowing(cUnit, rlDest, rlSrc[0],
+ ConvertIntNarrowing(cu, rl_dest, rl_src[0],
greenland::IntrinsicHelper::IntToByte);
break;
case Instruction::INT_TO_SHORT:
- ConvertIntNarrowing(cUnit, rlDest, rlSrc[0],
+ ConvertIntNarrowing(cu, rl_dest, rl_src[0],
greenland::IntrinsicHelper::IntToShort);
break;
case Instruction::INT_TO_FLOAT:
case Instruction::LONG_TO_FLOAT:
- ConvertIntToFP(cUnit, cUnit->irb->getFloatTy(), rlDest, rlSrc[0]);
+ ConvertIntToFP(cu, cu->irb->getFloatTy(), rl_dest, rl_src[0]);
break;
case Instruction::INT_TO_DOUBLE:
case Instruction::LONG_TO_DOUBLE:
- ConvertIntToFP(cUnit, cUnit->irb->getDoubleTy(), rlDest, rlSrc[0]);
+ ConvertIntToFP(cu, cu->irb->getDoubleTy(), rl_dest, rl_src[0]);
break;
case Instruction::FLOAT_TO_DOUBLE:
- ConvertFloatToDouble(cUnit, rlDest, rlSrc[0]);
+ ConvertFloatToDouble(cu, rl_dest, rl_src[0]);
break;
case Instruction::DOUBLE_TO_FLOAT:
- ConvertDoubleToFloat(cUnit, rlDest, rlSrc[0]);
+ ConvertDoubleToFloat(cu, rl_dest, rl_src[0]);
break;
case Instruction::NEG_LONG:
case Instruction::NEG_INT:
- ConvertNeg(cUnit, rlDest, rlSrc[0]);
+ ConvertNeg(cu, rl_dest, rl_src[0]);
break;
case Instruction::NEG_FLOAT:
case Instruction::NEG_DOUBLE:
- ConvertNegFP(cUnit, rlDest, rlSrc[0]);
+ ConvertNegFP(cu, rl_dest, rl_src[0]);
break;
case Instruction::NOT_LONG:
case Instruction::NOT_INT:
- ConvertNot(cUnit, rlDest, rlSrc[0]);
+ ConvertNot(cu, rl_dest, rl_src[0]);
break;
case Instruction::FLOAT_TO_INT:
- ConvertFPToInt(cUnit, greenland::IntrinsicHelper::F2I, rlDest, rlSrc[0]);
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::F2I, rl_dest, rl_src[0]);
break;
case Instruction::DOUBLE_TO_INT:
- ConvertFPToInt(cUnit, greenland::IntrinsicHelper::D2I, rlDest, rlSrc[0]);
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::D2I, rl_dest, rl_src[0]);
break;
case Instruction::FLOAT_TO_LONG:
- ConvertFPToInt(cUnit, greenland::IntrinsicHelper::F2L, rlDest, rlSrc[0]);
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::F2L, rl_dest, rl_src[0]);
break;
case Instruction::DOUBLE_TO_LONG:
- ConvertFPToInt(cUnit, greenland::IntrinsicHelper::D2L, rlDest, rlSrc[0]);
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::D2L, rl_dest, rl_src[0]);
break;
case Instruction::CMPL_FLOAT:
- ConvertWideComparison(cUnit, greenland::IntrinsicHelper::CmplFloat,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmplFloat,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::CMPG_FLOAT:
- ConvertWideComparison(cUnit, greenland::IntrinsicHelper::CmpgFloat,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmpgFloat,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::CMPL_DOUBLE:
- ConvertWideComparison(cUnit, greenland::IntrinsicHelper::CmplDouble,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmplDouble,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::CMPG_DOUBLE:
- ConvertWideComparison(cUnit, greenland::IntrinsicHelper::CmpgDouble,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmpgDouble,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::CMP_LONG:
- ConvertWideComparison(cUnit, greenland::IntrinsicHelper::CmpLong,
- rlDest, rlSrc[0], rlSrc[1]);
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmpLong,
+ rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::PACKED_SWITCH:
- ConvertPackedSwitch(cUnit, bb, vB, rlSrc[0]);
+ ConvertPackedSwitch(cu, bb, vB, rl_src[0]);
break;
case Instruction::SPARSE_SWITCH:
- ConvertSparseSwitch(cUnit, bb, vB, rlSrc[0]);
+ ConvertSparseSwitch(cu, bb, vB, rl_src[0]);
break;
default:
UNIMPLEMENTED(FATAL) << "Unsupported Dex opcode 0x" << std::hex << opcode;
res = true;
}
- if (objectDefinition) {
- SetShadowFrameEntry(cUnit, reinterpret_cast<llvm::Value*>
- (cUnit->llvmValues.elemList[rlDest.origSReg]));
+ if (object_definition) {
+ SetShadowFrameEntry(cu, reinterpret_cast<llvm::Value*>
+ (cu->llvm_values.elem_list[rl_dest.orig_sreg]));
}
return res;
}
/* Extended MIR instructions like PHI */
-static void ConvertExtendedMIR(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- llvm::BasicBlock* llvmBB)
+static void ConvertExtendedMIR(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ llvm::BasicBlock* llvm_bb)
{
switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
case kMirOpPhi: {
- RegLocation rlDest = cUnit->regLocation[mir->ssaRep->defs[0]];
+ RegLocation rl_dest = cu->reg_location[mir->ssa_rep->defs[0]];
/*
* The Art compiler's Phi nodes only handle 32-bit operands,
* representing wide values using a matched set of Phi nodes
@@ -1688,29 +1688,29 @@
* want a single Phi for wides. Here we will simply discard
* the Phi node representing the high word.
*/
- if (rlDest.highWord) {
+ if (rl_dest.high_word) {
return; // No Phi node - handled via low word
}
int* incoming = reinterpret_cast<int*>(mir->dalvikInsn.vB);
- llvm::Type* phiType =
- LlvmTypeFromLocRec(cUnit, rlDest);
- llvm::PHINode* phi = cUnit->irb->CreatePHI(phiType, mir->ssaRep->numUses);
- for (int i = 0; i < mir->ssaRep->numUses; i++) {
+ llvm::Type* phi_type =
+ LlvmTypeFromLocRec(cu, rl_dest);
+ llvm::PHINode* phi = cu->irb->CreatePHI(phi_type, mir->ssa_rep->num_uses);
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
RegLocation loc;
// Don't check width here.
- loc = GetRawSrc(cUnit, mir, i);
- DCHECK_EQ(rlDest.wide, loc.wide);
- DCHECK_EQ(rlDest.wide & rlDest.highWord, loc.wide & loc.highWord);
- DCHECK_EQ(rlDest.fp, loc.fp);
- DCHECK_EQ(rlDest.core, loc.core);
- DCHECK_EQ(rlDest.ref, loc.ref);
+ loc = GetRawSrc(cu, mir, i);
+ DCHECK_EQ(rl_dest.wide, loc.wide);
+ DCHECK_EQ(rl_dest.wide & rl_dest.high_word, loc.wide & loc.high_word);
+ DCHECK_EQ(rl_dest.fp, loc.fp);
+ DCHECK_EQ(rl_dest.core, loc.core);
+ DCHECK_EQ(rl_dest.ref, loc.ref);
SafeMap<unsigned int, unsigned int>::iterator it;
- it = cUnit->blockIdMap.find(incoming[i]);
- DCHECK(it != cUnit->blockIdMap.end());
- phi->addIncoming(GetLLVMValue(cUnit, loc.origSReg),
- GetLLVMBlock(cUnit, it->second));
+ it = cu->block_id_map.find(incoming[i]);
+ DCHECK(it != cu->block_id_map.end());
+ phi->addIncoming(GetLLVMValue(cu, loc.orig_sreg),
+ GetLLVMBlock(cu, it->second));
}
- DefineValue(cUnit, phi, rlDest.origSReg);
+ DefineValue(cu, phi, rl_dest.orig_sreg);
break;
}
case kMirOpCopy: {
@@ -1718,9 +1718,9 @@
break;
}
case kMirOpNop:
- if ((mir == bb->lastMIRInsn) && (bb->taken == NULL) &&
- (bb->fallThrough == NULL)) {
- cUnit->irb->CreateUnreachable();
+ if ((mir == bb->last_mir_insn) && (bb->taken == NULL) &&
+ (bb->fall_through == NULL)) {
+ cu->irb->CreateUnreachable();
}
break;
@@ -1745,187 +1745,187 @@
}
}
-static void SetDexOffset(CompilationUnit* cUnit, int32_t offset)
+static void SetDexOffset(CompilationUnit* cu, int32_t offset)
{
- cUnit->currentDalvikOffset = offset;
- llvm::SmallVector<llvm::Value*, 1> arrayRef;
- arrayRef.push_back(cUnit->irb->getInt32(offset));
- llvm::MDNode* node = llvm::MDNode::get(*cUnit->context, arrayRef);
- cUnit->irb->SetDexOffset(node);
+ cu->current_dalvik_offset = offset;
+ llvm::SmallVector<llvm::Value*, 1> array_ref;
+ array_ref.push_back(cu->irb->getInt32(offset));
+ llvm::MDNode* node = llvm::MDNode::get(*cu->context, array_ref);
+ cu->irb->SetDexOffset(node);
}
// Attach method info as metadata to special intrinsic
-static void SetMethodInfo(CompilationUnit* cUnit)
+static void SetMethodInfo(CompilationUnit* cu)
{
// We don't want dex offset on this
- cUnit->irb->SetDexOffset(NULL);
+ cu->irb->SetDexOffset(NULL);
greenland::IntrinsicHelper::IntrinsicId id;
id = greenland::IntrinsicHelper::MethodInfo;
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Instruction* inst = cUnit->irb->CreateCall(intr);
- llvm::SmallVector<llvm::Value*, 2> regInfo;
- regInfo.push_back(cUnit->irb->getInt32(cUnit->numIns));
- regInfo.push_back(cUnit->irb->getInt32(cUnit->numRegs));
- regInfo.push_back(cUnit->irb->getInt32(cUnit->numOuts));
- regInfo.push_back(cUnit->irb->getInt32(cUnit->numCompilerTemps));
- regInfo.push_back(cUnit->irb->getInt32(cUnit->numSSARegs));
- llvm::MDNode* regInfoNode = llvm::MDNode::get(*cUnit->context, regInfo);
- inst->setMetadata("RegInfo", regInfoNode);
- int promoSize = cUnit->numDalvikRegisters + cUnit->numCompilerTemps + 1;
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Instruction* inst = cu->irb->CreateCall(intr);
+ llvm::SmallVector<llvm::Value*, 2> reg_info;
+ reg_info.push_back(cu->irb->getInt32(cu->num_ins));
+ reg_info.push_back(cu->irb->getInt32(cu->num_regs));
+ reg_info.push_back(cu->irb->getInt32(cu->num_outs));
+ reg_info.push_back(cu->irb->getInt32(cu->num_compiler_temps));
+ reg_info.push_back(cu->irb->getInt32(cu->num_ssa_regs));
+ llvm::MDNode* reg_info_node = llvm::MDNode::get(*cu->context, reg_info);
+ inst->setMetadata("RegInfo", reg_info_node);
+ int promo_size = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
llvm::SmallVector<llvm::Value*, 50> pmap;
- for (int i = 0; i < promoSize; i++) {
- PromotionMap* p = &cUnit->promotionMap[i];
- int32_t mapData = ((p->firstInPair & 0xff) << 24) |
+ for (int i = 0; i < promo_size; i++) {
+ PromotionMap* p = &cu->promotion_map[i];
+ int32_t map_data = ((p->first_in_pair & 0xff) << 24) |
((p->FpReg & 0xff) << 16) |
- ((p->coreReg & 0xff) << 8) |
- ((p->fpLocation & 0xf) << 4) |
- (p->coreLocation & 0xf);
- pmap.push_back(cUnit->irb->getInt32(mapData));
+ ((p->core_reg & 0xff) << 8) |
+ ((p->fp_location & 0xf) << 4) |
+ (p->core_location & 0xf);
+ pmap.push_back(cu->irb->getInt32(map_data));
}
- llvm::MDNode* mapNode = llvm::MDNode::get(*cUnit->context, pmap);
- inst->setMetadata("PromotionMap", mapNode);
- SetDexOffset(cUnit, cUnit->currentDalvikOffset);
+ llvm::MDNode* map_node = llvm::MDNode::get(*cu->context, pmap);
+ inst->setMetadata("PromotionMap", map_node);
+ SetDexOffset(cu, cu->current_dalvik_offset);
}
/* Handle the content in each basic block */
-static bool BlockBitcodeConversion(CompilationUnit* cUnit, BasicBlock* bb)
+static bool BlockBitcodeConversion(CompilationUnit* cu, BasicBlock* bb)
{
- if (bb->blockType == kDead) return false;
- llvm::BasicBlock* llvmBB = GetLLVMBlock(cUnit, bb->id);
- if (llvmBB == NULL) {
- CHECK(bb->blockType == kExitBlock);
+ if (bb->block_type == kDead) return false;
+ llvm::BasicBlock* llvm_bb = GetLLVMBlock(cu, bb->id);
+ if (llvm_bb == NULL) {
+ CHECK(bb->block_type == kExitBlock);
} else {
- cUnit->irb->SetInsertPoint(llvmBB);
- SetDexOffset(cUnit, bb->startOffset);
+ cu->irb->SetInsertPoint(llvm_bb);
+ SetDexOffset(cu, bb->start_offset);
}
- if (cUnit->printMe) {
+ if (cu->verbose) {
LOG(INFO) << "................................";
LOG(INFO) << "Block id " << bb->id;
- if (llvmBB != NULL) {
- LOG(INFO) << "label " << llvmBB->getName().str().c_str();
+ if (llvm_bb != NULL) {
+ LOG(INFO) << "label " << llvm_bb->getName().str().c_str();
} else {
- LOG(INFO) << "llvmBB is NULL";
+ LOG(INFO) << "llvm_bb is NULL";
}
}
- if (bb->blockType == kEntryBlock) {
- SetMethodInfo(cUnit);
- bool *canBeRef = static_cast<bool*>(NewMem(cUnit, sizeof(bool) * cUnit->numDalvikRegisters,
+ if (bb->block_type == kEntryBlock) {
+ SetMethodInfo(cu);
+ bool *can_be_ref = static_cast<bool*>(NewMem(cu, sizeof(bool) * cu->num_dalvik_registers,
true, kAllocMisc));
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- int vReg = SRegToVReg(cUnit, i);
- if (vReg > SSA_METHOD_BASEREG) {
- canBeRef[SRegToVReg(cUnit, i)] |= cUnit->regLocation[i].ref;
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ int v_reg = SRegToVReg(cu, i);
+ if (v_reg > SSA_METHOD_BASEREG) {
+ can_be_ref[SRegToVReg(cu, i)] |= cu->reg_location[i].ref;
}
}
- for (int i = 0; i < cUnit->numDalvikRegisters; i++) {
- if (canBeRef[i]) {
- cUnit->numShadowFrameEntries++;
+ for (int i = 0; i < cu->num_dalvik_registers; i++) {
+ if (can_be_ref[i]) {
+ cu->num_shadow_frame_entries++;
}
}
- if (cUnit->numShadowFrameEntries > 0) {
- cUnit->shadowMap = static_cast<int*>(NewMem(cUnit, sizeof(int) * cUnit->numShadowFrameEntries,
+ if (cu->num_shadow_frame_entries > 0) {
+ cu->shadow_map = static_cast<int*>(NewMem(cu, sizeof(int) * cu->num_shadow_frame_entries,
true, kAllocMisc));
- for (int i = 0, j = 0; i < cUnit->numDalvikRegisters; i++) {
- if (canBeRef[i]) {
- cUnit->shadowMap[j++] = i;
+ for (int i = 0, j = 0; i < cu->num_dalvik_registers; i++) {
+ if (can_be_ref[i]) {
+ cu->shadow_map[j++] = i;
}
}
}
greenland::IntrinsicHelper::IntrinsicId id =
greenland::IntrinsicHelper::AllocaShadowFrame;
- llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
- llvm::Value* entries = cUnit->irb->getInt32(cUnit->numShadowFrameEntries);
- llvm::Value* dalvikRegs = cUnit->irb->getInt32(cUnit->numDalvikRegisters);
- llvm::Value* args[] = { entries, dalvikRegs };
- cUnit->irb->CreateCall(func, args);
- } else if (bb->blockType == kExitBlock) {
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* entries = cu->irb->getInt32(cu->num_shadow_frame_entries);
+ llvm::Value* dalvik_regs = cu->irb->getInt32(cu->num_dalvik_registers);
+ llvm::Value* args[] = { entries, dalvik_regs };
+ cu->irb->CreateCall(func, args);
+ } else if (bb->block_type == kExitBlock) {
/*
* Because of the differences between how MIR/LIR and llvm handle exit
* blocks, we won't explicitly covert them. On the llvm-to-lir
* path, it will need to be regenereated.
*/
return false;
- } else if (bb->blockType == kExceptionHandling) {
+ } else if (bb->block_type == kExceptionHandling) {
/*
* Because we're deferring null checking, delete the associated empty
* exception block.
*/
- llvmBB->eraseFromParent();
+ llvm_bb->eraseFromParent();
return false;
}
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir; mir = mir->next) {
- SetDexOffset(cUnit, mir->offset);
+ SetDexOffset(cu, mir->offset);
int opcode = mir->dalvikInsn.opcode;
- Instruction::Format dalvikFormat =
+ Instruction::Format dalvik_format =
Instruction::FormatOf(mir->dalvikInsn.opcode);
if (opcode == kMirOpCheck) {
// Combine check and work halves of throwing instruction.
- MIR* workHalf = mir->meta.throwInsn;
- mir->dalvikInsn.opcode = workHalf->dalvikInsn.opcode;
+ MIR* work_half = mir->meta.throw_insn;
+ mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
opcode = mir->dalvikInsn.opcode;
- SSARepresentation* ssaRep = workHalf->ssaRep;
- workHalf->ssaRep = mir->ssaRep;
- mir->ssaRep = ssaRep;
- workHalf->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- if (bb->successorBlockList.blockListType == kCatch) {
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(
+ SSARepresentation* ssa_rep = work_half->ssa_rep;
+ work_half->ssa_rep = mir->ssa_rep;
+ mir->ssa_rep = ssa_rep;
+ work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ if (bb->successor_block_list.block_list_type == kCatch) {
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(
greenland::IntrinsicHelper::CatchTargets);
- llvm::Value* switchKey =
- cUnit->irb->CreateCall(intr, cUnit->irb->getInt32(mir->offset));
+ llvm::Value* switch_key =
+ cu->irb->CreateCall(intr, cu->irb->getInt32(mir->offset));
GrowableListIterator iter;
- GrowableListIteratorInit(&bb->successorBlockList.blocks, &iter);
+ GrowableListIteratorInit(&bb->successor_block_list.blocks, &iter);
// New basic block to use for work half
- llvm::BasicBlock* workBB =
- llvm::BasicBlock::Create(*cUnit->context, "", cUnit->func);
+ llvm::BasicBlock* work_bb =
+ llvm::BasicBlock::Create(*cu->context, "", cu->func);
llvm::SwitchInst* sw =
- cUnit->irb->CreateSwitch(switchKey, workBB,
- bb->successorBlockList.blocks.numUsed);
+ cu->irb->CreateSwitch(switch_key, work_bb,
+ bb->successor_block_list.blocks.num_used);
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iter));
- if (successorBlockInfo == NULL) break;
+ if (successor_block_info == NULL) break;
llvm::BasicBlock *target =
- GetLLVMBlock(cUnit, successorBlockInfo->block->id);
- int typeIndex = successorBlockInfo->key;
- sw->addCase(cUnit->irb->getInt32(typeIndex), target);
+ GetLLVMBlock(cu, successor_block_info->block->id);
+ int type_index = successor_block_info->key;
+ sw->addCase(cu->irb->getInt32(type_index), target);
}
- llvmBB = workBB;
- cUnit->irb->SetInsertPoint(llvmBB);
+ llvm_bb = work_bb;
+ cu->irb->SetInsertPoint(llvm_bb);
}
}
if (opcode >= kMirOpFirst) {
- ConvertExtendedMIR(cUnit, bb, mir, llvmBB);
+ ConvertExtendedMIR(cu, bb, mir, llvm_bb);
continue;
}
- bool notHandled = ConvertMIRNode(cUnit, mir, bb, llvmBB,
- NULL /* labelList */);
- if (notHandled) {
- Instruction::Code dalvikOpcode = static_cast<Instruction::Code>(opcode);
+ bool not_handled = ConvertMIRNode(cu, mir, bb, llvm_bb,
+ NULL /* label_list */);
+ if (not_handled) {
+ Instruction::Code dalvik_opcode = static_cast<Instruction::Code>(opcode);
LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
mir->offset, opcode,
- Instruction::Name(dalvikOpcode),
- dalvikFormat);
+ Instruction::Name(dalvik_opcode),
+ dalvik_format);
}
}
- if (bb->blockType == kEntryBlock) {
- cUnit->entryTargetBB = GetLLVMBlock(cUnit, bb->fallThrough->id);
- } else if ((bb->fallThrough != NULL) && !bb->hasReturn) {
- cUnit->irb->CreateBr(GetLLVMBlock(cUnit, bb->fallThrough->id));
+ if (bb->block_type == kEntryBlock) {
+ cu->entryTarget_bb = GetLLVMBlock(cu, bb->fall_through->id);
+ } else if ((bb->fall_through != NULL) && !bb->has_return) {
+ cu->irb->CreateBr(GetLLVMBlock(cu, bb->fall_through->id));
}
return false;
}
-char RemapShorty(char shortyType) {
+char RemapShorty(char shorty_type) {
/*
* TODO: might want to revisit this. Dalvik registers are 32-bits wide,
* and longs/doubles are represented as a pair of registers. When sub-word
@@ -1937,89 +1937,89 @@
* types (which is valid so long as we always do a real expansion of passed
* arguments and field loads).
*/
- switch(shortyType) {
- case 'Z' : shortyType = 'I'; break;
- case 'B' : shortyType = 'I'; break;
- case 'S' : shortyType = 'I'; break;
- case 'C' : shortyType = 'I'; break;
+ switch(shorty_type) {
+ case 'Z' : shorty_type = 'I'; break;
+ case 'B' : shorty_type = 'I'; break;
+ case 'S' : shorty_type = 'I'; break;
+ case 'C' : shorty_type = 'I'; break;
default: break;
}
- return shortyType;
+ return shorty_type;
}
-static llvm::FunctionType* GetFunctionType(CompilationUnit* cUnit) {
+static llvm::FunctionType* GetFunctionType(CompilationUnit* cu) {
// Get return type
- llvm::Type* ret_type = cUnit->irb->GetJType(RemapShorty(cUnit->shorty[0]),
+ llvm::Type* ret_type = cu->irb->GetJType(RemapShorty(cu->shorty[0]),
greenland::kAccurate);
// Get argument type
std::vector<llvm::Type*> args_type;
// method object
- args_type.push_back(cUnit->irb->GetJMethodTy());
+ args_type.push_back(cu->irb->GetJMethodTy());
// Do we have a "this"?
- if ((cUnit->access_flags & kAccStatic) == 0) {
- args_type.push_back(cUnit->irb->GetJObjectTy());
+ if ((cu->access_flags & kAccStatic) == 0) {
+ args_type.push_back(cu->irb->GetJObjectTy());
}
- for (uint32_t i = 1; i < strlen(cUnit->shorty); ++i) {
- args_type.push_back(cUnit->irb->GetJType(RemapShorty(cUnit->shorty[i]),
+ for (uint32_t i = 1; i < strlen(cu->shorty); ++i) {
+ args_type.push_back(cu->irb->GetJType(RemapShorty(cu->shorty[i]),
greenland::kAccurate));
}
return llvm::FunctionType::get(ret_type, args_type, false);
}
-static bool CreateFunction(CompilationUnit* cUnit) {
- std::string func_name(PrettyMethod(cUnit->method_idx, *cUnit->dex_file,
+static bool CreateFunction(CompilationUnit* cu) {
+ std::string func_name(PrettyMethod(cu->method_idx, *cu->dex_file,
/* with_signature */ false));
- llvm::FunctionType* func_type = GetFunctionType(cUnit);
+ llvm::FunctionType* func_type = GetFunctionType(cu);
if (func_type == NULL) {
return false;
}
- cUnit->func = llvm::Function::Create(func_type,
+ cu->func = llvm::Function::Create(func_type,
llvm::Function::ExternalLinkage,
- func_name, cUnit->module);
+ func_name, cu->module);
- llvm::Function::arg_iterator arg_iter(cUnit->func->arg_begin());
- llvm::Function::arg_iterator arg_end(cUnit->func->arg_end());
+ llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
+ llvm::Function::arg_iterator arg_end(cu->func->arg_end());
arg_iter->setName("method");
++arg_iter;
- int startSReg = cUnit->numRegs;
+ int start_sreg = cu->num_regs;
for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
- arg_iter->setName(StringPrintf("v%i_0", startSReg));
- startSReg += cUnit->regLocation[startSReg].wide ? 2 : 1;
+ arg_iter->setName(StringPrintf("v%i_0", start_sreg));
+ start_sreg += cu->reg_location[start_sreg].wide ? 2 : 1;
}
return true;
}
-static bool CreateLLVMBasicBlock(CompilationUnit* cUnit, BasicBlock* bb)
+static bool CreateLLVMBasicBlock(CompilationUnit* cu, BasicBlock* bb)
{
// Skip the exit block
- if ((bb->blockType == kDead) ||(bb->blockType == kExitBlock)) {
- cUnit->idToBlockMap.Put(bb->id, NULL);
+ if ((bb->block_type == kDead) ||(bb->block_type == kExitBlock)) {
+ cu->id_to_block_map.Put(bb->id, NULL);
} else {
- int offset = bb->startOffset;
- bool entryBlock = (bb->blockType == kEntryBlock);
- llvm::BasicBlock* llvmBB =
- llvm::BasicBlock::Create(*cUnit->context, entryBlock ? "entry" :
- StringPrintf(kLabelFormat, bb->catchEntry ? kCatchBlock :
- kNormalBlock, offset, bb->id), cUnit->func);
- if (entryBlock) {
- cUnit->entryBB = llvmBB;
- cUnit->placeholderBB =
- llvm::BasicBlock::Create(*cUnit->context, "placeholder",
- cUnit->func);
+ int offset = bb->start_offset;
+ bool entry_block = (bb->block_type == kEntryBlock);
+ llvm::BasicBlock* llvm_bb =
+ llvm::BasicBlock::Create(*cu->context, entry_block ? "entry" :
+ StringPrintf(kLabelFormat, bb->catch_entry ? kCatchBlock :
+ kNormalBlock, offset, bb->id), cu->func);
+ if (entry_block) {
+ cu->entry_bb = llvm_bb;
+ cu->placeholder_bb =
+ llvm::BasicBlock::Create(*cu->context, "placeholder",
+ cu->func);
}
- cUnit->idToBlockMap.Put(bb->id, llvmBB);
+ cu->id_to_block_map.Put(bb->id, llvm_bb);
}
return false;
}
@@ -2033,45 +2033,45 @@
* o Iterate through the MIR a basic block at a time, setting arguments
* to recovered ssa name.
*/
-void MethodMIR2Bitcode(CompilationUnit* cUnit)
+void MethodMIR2Bitcode(CompilationUnit* cu)
{
- InitIR(cUnit);
- CompilerInitGrowableList(cUnit, &cUnit->llvmValues, cUnit->numSSARegs);
+ InitIR(cu);
+ CompilerInitGrowableList(cu, &cu->llvm_values, cu->num_ssa_regs);
// Create the function
- CreateFunction(cUnit);
+ CreateFunction(cu);
// Create an LLVM basic block for each MIR block in dfs preorder
- DataFlowAnalysisDispatcher(cUnit, CreateLLVMBasicBlock,
- kPreOrderDFSTraversal, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, CreateLLVMBasicBlock,
+ kPreOrderDFSTraversal, false /* is_iterative */);
/*
* Create an llvm named value for each MIR SSA name. Note: we'll use
* placeholders for all non-argument values (because we haven't seen
* the definition yet).
*/
- cUnit->irb->SetInsertPoint(cUnit->placeholderBB);
- llvm::Function::arg_iterator arg_iter(cUnit->func->arg_begin());
+ cu->irb->SetInsertPoint(cu->placeholder_bb);
+ llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
arg_iter++; /* Skip path method */
- for (int i = 0; i < cUnit->numSSARegs; i++) {
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
llvm::Value* val;
- RegLocation rlTemp = cUnit->regLocation[i];
- if ((SRegToVReg(cUnit, i) < 0) || rlTemp.highWord) {
- InsertGrowableList(cUnit, &cUnit->llvmValues, 0);
- } else if ((i < cUnit->numRegs) ||
- (i >= (cUnit->numRegs + cUnit->numIns))) {
- llvm::Constant* immValue = cUnit->regLocation[i].wide ?
- cUnit->irb->GetJLong(0) : cUnit->irb->GetJInt(0);
- val = EmitConst(cUnit, immValue, cUnit->regLocation[i]);
- val->setName(LlvmSSAName(cUnit, i));
- InsertGrowableList(cUnit, &cUnit->llvmValues, reinterpret_cast<uintptr_t>(val));
+ RegLocation rl_temp = cu->reg_location[i];
+ if ((SRegToVReg(cu, i) < 0) || rl_temp.high_word) {
+ InsertGrowableList(cu, &cu->llvm_values, 0);
+ } else if ((i < cu->num_regs) ||
+ (i >= (cu->num_regs + cu->num_ins))) {
+ llvm::Constant* imm_value = cu->reg_location[i].wide ?
+ cu->irb->GetJLong(0) : cu->irb->GetJInt(0);
+ val = EmitConst(cu, imm_value, cu->reg_location[i]);
+ val->setName(LlvmSSAName(cu, i));
+ InsertGrowableList(cu, &cu->llvm_values, reinterpret_cast<uintptr_t>(val));
} else {
// Recover previously-created argument values
- llvm::Value* argVal = arg_iter++;
- InsertGrowableList(cUnit, &cUnit->llvmValues, reinterpret_cast<uintptr_t>(argVal));
+ llvm::Value* arg_val = arg_iter++;
+ InsertGrowableList(cu, &cu->llvm_values, reinterpret_cast<uintptr_t>(arg_val));
}
}
- DataFlowAnalysisDispatcher(cUnit, BlockBitcodeConversion,
+ DataFlowAnalysisDispatcher(cu, BlockBitcodeConversion,
kPreOrderDFSTraversal, false /* Iterative */);
/*
@@ -2087,8 +2087,8 @@
* If any definitions remain, we link the placeholder block into the
* CFG. Otherwise, it is deleted.
*/
- for (llvm::BasicBlock::iterator it = cUnit->placeholderBB->begin(),
- itEnd = cUnit->placeholderBB->end(); it != itEnd;) {
+ for (llvm::BasicBlock::iterator it = cu->placeholder_bb->begin(),
+ it_end = cu->placeholder_bb->end(); it != it_end;) {
llvm::Instruction* inst = llvm::dyn_cast<llvm::Instruction>(it++);
DCHECK(inst != NULL);
llvm::Value* val = llvm::dyn_cast<llvm::Value>(inst);
@@ -2097,30 +2097,30 @@
inst->eraseFromParent();
}
}
- SetDexOffset(cUnit, 0);
- if (cUnit->placeholderBB->empty()) {
- cUnit->placeholderBB->eraseFromParent();
+ SetDexOffset(cu, 0);
+ if (cu->placeholder_bb->empty()) {
+ cu->placeholder_bb->eraseFromParent();
} else {
- cUnit->irb->SetInsertPoint(cUnit->placeholderBB);
- cUnit->irb->CreateBr(cUnit->entryTargetBB);
- cUnit->entryTargetBB = cUnit->placeholderBB;
+ cu->irb->SetInsertPoint(cu->placeholder_bb);
+ cu->irb->CreateBr(cu->entryTarget_bb);
+ cu->entryTarget_bb = cu->placeholder_bb;
}
- cUnit->irb->SetInsertPoint(cUnit->entryBB);
- cUnit->irb->CreateBr(cUnit->entryTargetBB);
+ cu->irb->SetInsertPoint(cu->entry_bb);
+ cu->irb->CreateBr(cu->entryTarget_bb);
- if (cUnit->enableDebug & (1 << kDebugVerifyBitcode)) {
- if (llvm::verifyFunction(*cUnit->func, llvm::PrintMessageAction)) {
+ if (cu->enable_debug & (1 << kDebugVerifyBitcode)) {
+ if (llvm::verifyFunction(*cu->func, llvm::PrintMessageAction)) {
LOG(INFO) << "Bitcode verification FAILED for "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file)
- << " of size " << cUnit->insnsSize;
- cUnit->enableDebug |= (1 << kDebugDumpBitcodeFile);
+ << PrettyMethod(cu->method_idx, *cu->dex_file)
+ << " of size " << cu->insns_size;
+ cu->enable_debug |= (1 << kDebugDumpBitcodeFile);
}
}
- if (cUnit->enableDebug & (1 << kDebugDumpBitcodeFile)) {
+ if (cu->enable_debug & (1 << kDebugDumpBitcodeFile)) {
// Write bitcode to file
std::string errmsg;
- std::string fname(PrettyMethod(cUnit->method_idx, *cUnit->dex_file));
+ std::string fname(PrettyMethod(cu->method_idx, *cu->dex_file));
ReplaceSpecialChars(fname);
// TODO: make configurable change naming mechanism to avoid fname length issues.
fname = StringPrintf("/sdcard/Bitcode/%s.bc", fname.c_str());
@@ -2138,40 +2138,40 @@
LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
}
- llvm::WriteBitcodeToFile(cUnit->module, out_file->os());
+ llvm::WriteBitcodeToFile(cu->module, out_file->os());
out_file->keep();
}
}
-static RegLocation GetLoc(CompilationUnit* cUnit, llvm::Value* val) {
+static RegLocation GetLoc(CompilationUnit* cu, llvm::Value* val) {
RegLocation res;
DCHECK(val != NULL);
- SafeMap<llvm::Value*, RegLocation>::iterator it = cUnit->locMap.find(val);
- if (it == cUnit->locMap.end()) {
- std::string valName = val->getName().str();
- if (valName.empty()) {
+ SafeMap<llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
+ if (it == cu->loc_map.end()) {
+ std::string val_name = val->getName().str();
+ if (val_name.empty()) {
// FIXME: need to be more robust, handle FP and be in a position to
// manage unnamed temps whose lifetimes span basic block boundaries
UNIMPLEMENTED(WARNING) << "Need to handle unnamed llvm temps";
memset(&res, 0, sizeof(res));
res.location = kLocPhysReg;
- res.lowReg = AllocTemp(cUnit);
+ res.low_reg = AllocTemp(cu);
res.home = true;
- res.sRegLow = INVALID_SREG;
- res.origSReg = INVALID_SREG;
+ res.s_reg_low = INVALID_SREG;
+ res.orig_sreg = INVALID_SREG;
llvm::Type* ty = val->getType();
- res.wide = ((ty == cUnit->irb->getInt64Ty()) ||
- (ty == cUnit->irb->getDoubleTy()));
+ res.wide = ((ty == cu->irb->getInt64Ty()) ||
+ (ty == cu->irb->getDoubleTy()));
if (res.wide) {
- res.highReg = AllocTemp(cUnit);
+ res.high_reg = AllocTemp(cu);
}
- cUnit->locMap.Put(val, res);
+ cu->loc_map.Put(val, res);
} else {
- DCHECK_EQ(valName[0], 'v');
- int baseSReg = INVALID_SREG;
- sscanf(valName.c_str(), "v%d_", &baseSReg);
- res = cUnit->regLocation[baseSReg];
- cUnit->locMap.Put(val, res);
+ DCHECK_EQ(val_name[0], 'v');
+ int base_sreg = INVALID_SREG;
+ sscanf(val_name.c_str(), "v%d_", &base_sreg);
+ res = cu->reg_location[base_sreg];
+ cu->loc_map.Put(val, res);
}
} else {
res = it->second;
@@ -2179,10 +2179,10 @@
return res;
}
-static Instruction::Code GetDalvikOpcode(OpKind op, bool isConst, bool isWide)
+static Instruction::Code GetDalvikOpcode(OpKind op, bool is_const, bool is_wide)
{
Instruction::Code res = Instruction::NOP;
- if (isWide) {
+ if (is_wide) {
switch(op) {
case kOpAdd: res = Instruction::ADD_LONG; break;
case kOpSub: res = Instruction::SUB_LONG; break;
@@ -2197,7 +2197,7 @@
case kOpAsr: res = Instruction::SHR_LONG; break;
default: LOG(FATAL) << "Unexpected OpKind " << op;
}
- } else if (isConst){
+ } else if (is_const){
switch(op) {
case kOpAdd: res = Instruction::ADD_INT_LIT16; break;
case kOpSub: res = Instruction::RSUB_INT_LIT8; break;
@@ -2231,10 +2231,10 @@
return res;
}
-static Instruction::Code GetDalvikFPOpcode(OpKind op, bool isConst, bool isWide)
+static Instruction::Code GetDalvikFPOpcode(OpKind op, bool is_const, bool is_wide)
{
Instruction::Code res = Instruction::NOP;
- if (isWide) {
+ if (is_wide) {
switch(op) {
case kOpAdd: res = Instruction::ADD_DOUBLE; break;
case kOpSub: res = Instruction::SUB_DOUBLE; break;
@@ -2256,231 +2256,231 @@
return res;
}
-static void CvtBinFPOp(CompilationUnit* cUnit, OpKind op, llvm::Instruction* inst)
+static void CvtBinFPOp(CompilationUnit* cu, OpKind op, llvm::Instruction* inst)
{
- RegLocation rlDest = GetLoc(cUnit, inst);
+ RegLocation rl_dest = GetLoc(cu, inst);
/*
* Normally, we won't ever generate an FP operation with an immediate
* operand (not supported in Dex instruction set). However, the IR builder
- * may insert them - in particular for createNegFP. Recognize this case
+ * may insert them - in particular for create_neg_fp. Recognize this case
* and deal with it.
*/
llvm::ConstantFP* op1C = llvm::dyn_cast<llvm::ConstantFP>(inst->getOperand(0));
llvm::ConstantFP* op2C = llvm::dyn_cast<llvm::ConstantFP>(inst->getOperand(1));
DCHECK(op2C == NULL);
if ((op1C != NULL) && (op == kOpSub)) {
- RegLocation rlSrc = GetLoc(cUnit, inst->getOperand(1));
- if (rlDest.wide) {
- GenArithOpDouble(cUnit, Instruction::NEG_DOUBLE, rlDest, rlSrc, rlSrc);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(1));
+ if (rl_dest.wide) {
+ GenArithOpDouble(cu, Instruction::NEG_DOUBLE, rl_dest, rl_src, rl_src);
} else {
- GenArithOpFloat(cUnit, Instruction::NEG_FLOAT, rlDest, rlSrc, rlSrc);
+ GenArithOpFloat(cu, Instruction::NEG_FLOAT, rl_dest, rl_src, rl_src);
}
} else {
DCHECK(op1C == NULL);
- RegLocation rlSrc1 = GetLoc(cUnit, inst->getOperand(0));
- RegLocation rlSrc2 = GetLoc(cUnit, inst->getOperand(1));
- Instruction::Code dalvikOp = GetDalvikFPOpcode(op, false, rlDest.wide);
- if (rlDest.wide) {
- GenArithOpDouble(cUnit, dalvikOp, rlDest, rlSrc1, rlSrc2);
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
+ RegLocation rl_src2 = GetLoc(cu, inst->getOperand(1));
+ Instruction::Code dalvik_op = GetDalvikFPOpcode(op, false, rl_dest.wide);
+ if (rl_dest.wide) {
+ GenArithOpDouble(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
} else {
- GenArithOpFloat(cUnit, dalvikOp, rlDest, rlSrc1, rlSrc2);
+ GenArithOpFloat(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
}
}
}
-static void CvtIntNarrowing(CompilationUnit* cUnit, llvm::Instruction* inst,
+static void CvtIntNarrowing(CompilationUnit* cu, llvm::Instruction* inst,
Instruction::Code opcode)
{
- RegLocation rlDest = GetLoc(cUnit, inst);
- RegLocation rlSrc = GetLoc(cUnit, inst->getOperand(0));
- GenIntNarrowing(cUnit, opcode, rlDest, rlSrc);
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ GenIntNarrowing(cu, opcode, rl_dest, rl_src);
}
-static void CvtIntToFP(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtIntToFP(CompilationUnit* cu, llvm::Instruction* inst)
{
- RegLocation rlDest = GetLoc(cUnit, inst);
- RegLocation rlSrc = GetLoc(cUnit, inst->getOperand(0));
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
Instruction::Code opcode;
- if (rlDest.wide) {
- if (rlSrc.wide) {
+ if (rl_dest.wide) {
+ if (rl_src.wide) {
opcode = Instruction::LONG_TO_DOUBLE;
} else {
opcode = Instruction::INT_TO_DOUBLE;
}
} else {
- if (rlSrc.wide) {
+ if (rl_src.wide) {
opcode = Instruction::LONG_TO_FLOAT;
} else {
opcode = Instruction::INT_TO_FLOAT;
}
}
- GenConversion(cUnit, opcode, rlDest, rlSrc);
+ GenConversion(cu, opcode, rl_dest, rl_src);
}
-static void CvtFPToInt(CompilationUnit* cUnit, llvm::CallInst* call_inst)
+static void CvtFPToInt(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- RegLocation rlDest = GetLoc(cUnit, call_inst);
- RegLocation rlSrc = GetLoc(cUnit, call_inst->getOperand(0));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_src = GetLoc(cu, call_inst->getOperand(0));
Instruction::Code opcode;
- if (rlDest.wide) {
- if (rlSrc.wide) {
+ if (rl_dest.wide) {
+ if (rl_src.wide) {
opcode = Instruction::DOUBLE_TO_LONG;
} else {
opcode = Instruction::FLOAT_TO_LONG;
}
} else {
- if (rlSrc.wide) {
+ if (rl_src.wide) {
opcode = Instruction::DOUBLE_TO_INT;
} else {
opcode = Instruction::FLOAT_TO_INT;
}
}
- GenConversion(cUnit, opcode, rlDest, rlSrc);
+ GenConversion(cu, opcode, rl_dest, rl_src);
}
-static void CvtFloatToDouble(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtFloatToDouble(CompilationUnit* cu, llvm::Instruction* inst)
{
- RegLocation rlDest = GetLoc(cUnit, inst);
- RegLocation rlSrc = GetLoc(cUnit, inst->getOperand(0));
- GenConversion(cUnit, Instruction::FLOAT_TO_DOUBLE, rlDest, rlSrc);
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ GenConversion(cu, Instruction::FLOAT_TO_DOUBLE, rl_dest, rl_src);
}
-static void CvtTrunc(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtTrunc(CompilationUnit* cu, llvm::Instruction* inst)
{
- RegLocation rlDest = GetLoc(cUnit, inst);
- RegLocation rlSrc = GetLoc(cUnit, inst->getOperand(0));
- rlSrc = UpdateLocWide(cUnit, rlSrc);
- rlSrc = WideToNarrow(cUnit, rlSrc);
- StoreValue(cUnit, rlDest, rlSrc);
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ rl_src = UpdateLocWide(cu, rl_src);
+ rl_src = WideToNarrow(cu, rl_src);
+ StoreValue(cu, rl_dest, rl_src);
}
-static void CvtDoubleToFloat(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtDoubleToFloat(CompilationUnit* cu, llvm::Instruction* inst)
{
- RegLocation rlDest = GetLoc(cUnit, inst);
- RegLocation rlSrc = GetLoc(cUnit, inst->getOperand(0));
- GenConversion(cUnit, Instruction::DOUBLE_TO_FLOAT, rlDest, rlSrc);
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ GenConversion(cu, Instruction::DOUBLE_TO_FLOAT, rl_dest, rl_src);
}
-static void CvtIntExt(CompilationUnit* cUnit, llvm::Instruction* inst, bool isSigned)
+static void CvtIntExt(CompilationUnit* cu, llvm::Instruction* inst, bool is_signed)
{
// TODO: evaluate src/tgt types and add general support for more than int to long
- RegLocation rlDest = GetLoc(cUnit, inst);
- RegLocation rlSrc = GetLoc(cUnit, inst->getOperand(0));
- DCHECK(rlDest.wide);
- DCHECK(!rlSrc.wide);
- DCHECK(!rlDest.fp);
- DCHECK(!rlSrc.fp);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- if (rlSrc.location == kLocPhysReg) {
- OpRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ DCHECK(rl_dest.wide);
+ DCHECK(!rl_src.wide);
+ DCHECK(!rl_dest.fp);
+ DCHECK(!rl_src.fp);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (rl_src.location == kLocPhysReg) {
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
} else {
- LoadValueDirect(cUnit, rlSrc, rlResult.lowReg);
+ LoadValueDirect(cu, rl_src, rl_result.low_reg);
}
- if (isSigned) {
- OpRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
+ if (is_signed) {
+ OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
} else {
- LoadConstant(cUnit, rlResult.highReg, 0);
+ LoadConstant(cu, rl_result.high_reg, 0);
}
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
}
-static void CvtBinOp(CompilationUnit* cUnit, OpKind op, llvm::Instruction* inst)
+static void CvtBinOp(CompilationUnit* cu, OpKind op, llvm::Instruction* inst)
{
- RegLocation rlDest = GetLoc(cUnit, inst);
+ RegLocation rl_dest = GetLoc(cu, inst);
llvm::Value* lhs = inst->getOperand(0);
// Special-case RSUB/NEG
- llvm::ConstantInt* lhsImm = llvm::dyn_cast<llvm::ConstantInt>(lhs);
- if ((op == kOpSub) && (lhsImm != NULL)) {
- RegLocation rlSrc1 = GetLoc(cUnit, inst->getOperand(1));
- if (rlSrc1.wide) {
- DCHECK_EQ(lhsImm->getSExtValue(), 0);
- GenArithOpLong(cUnit, Instruction::NEG_LONG, rlDest, rlSrc1, rlSrc1);
+ llvm::ConstantInt* lhs_imm = llvm::dyn_cast<llvm::ConstantInt>(lhs);
+ if ((op == kOpSub) && (lhs_imm != NULL)) {
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(1));
+ if (rl_src1.wide) {
+ DCHECK_EQ(lhs_imm->getSExtValue(), 0);
+ GenArithOpLong(cu, Instruction::NEG_LONG, rl_dest, rl_src1, rl_src1);
} else {
- GenArithOpIntLit(cUnit, Instruction::RSUB_INT, rlDest, rlSrc1,
- lhsImm->getSExtValue());
+ GenArithOpIntLit(cu, Instruction::RSUB_INT, rl_dest, rl_src1,
+ lhs_imm->getSExtValue());
}
return;
}
- DCHECK(lhsImm == NULL);
- RegLocation rlSrc1 = GetLoc(cUnit, inst->getOperand(0));
+ DCHECK(lhs_imm == NULL);
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
llvm::Value* rhs = inst->getOperand(1);
- llvm::ConstantInt* constRhs = llvm::dyn_cast<llvm::ConstantInt>(rhs);
- if (!rlDest.wide && (constRhs != NULL)) {
- Instruction::Code dalvikOp = GetDalvikOpcode(op, true, false);
- GenArithOpIntLit(cUnit, dalvikOp, rlDest, rlSrc1, constRhs->getSExtValue());
+ llvm::ConstantInt* const_rhs = llvm::dyn_cast<llvm::ConstantInt>(rhs);
+ if (!rl_dest.wide && (const_rhs != NULL)) {
+ Instruction::Code dalvik_op = GetDalvikOpcode(op, true, false);
+ GenArithOpIntLit(cu, dalvik_op, rl_dest, rl_src1, const_rhs->getSExtValue());
} else {
- Instruction::Code dalvikOp = GetDalvikOpcode(op, false, rlDest.wide);
- RegLocation rlSrc2;
- if (constRhs != NULL) {
+ Instruction::Code dalvik_op = GetDalvikOpcode(op, false, rl_dest.wide);
+ RegLocation rl_src2;
+ if (const_rhs != NULL) {
// ir_builder converts NOT_LONG to xor src, -1. Restore
- DCHECK_EQ(dalvikOp, Instruction::XOR_LONG);
- DCHECK_EQ(-1L, constRhs->getSExtValue());
- dalvikOp = Instruction::NOT_LONG;
- rlSrc2 = rlSrc1;
+ DCHECK_EQ(dalvik_op, Instruction::XOR_LONG);
+ DCHECK_EQ(-1L, const_rhs->getSExtValue());
+ dalvik_op = Instruction::NOT_LONG;
+ rl_src2 = rl_src1;
} else {
- rlSrc2 = GetLoc(cUnit, rhs);
+ rl_src2 = GetLoc(cu, rhs);
}
- if (rlDest.wide) {
- GenArithOpLong(cUnit, dalvikOp, rlDest, rlSrc1, rlSrc2);
+ if (rl_dest.wide) {
+ GenArithOpLong(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
} else {
- GenArithOpInt(cUnit, dalvikOp, rlDest, rlSrc1, rlSrc2);
+ GenArithOpInt(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
}
}
}
-static void CvtShiftOp(CompilationUnit* cUnit, Instruction::Code opcode, llvm::CallInst* callInst)
+static void CvtShiftOp(CompilationUnit* cu, Instruction::Code opcode, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- RegLocation rlDest = GetLoc(cUnit, callInst);
- RegLocation rlSrc = GetLoc(cUnit, callInst->getArgOperand(0));
- llvm::Value* rhs = callInst->getArgOperand(1);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
+ llvm::Value* rhs = call_inst->getArgOperand(1);
if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
- DCHECK(!rlDest.wide);
- GenArithOpIntLit(cUnit, opcode, rlDest, rlSrc, src2->getSExtValue());
+ DCHECK(!rl_dest.wide);
+ GenArithOpIntLit(cu, opcode, rl_dest, rl_src, src2->getSExtValue());
} else {
- RegLocation rlShift = GetLoc(cUnit, rhs);
- if (callInst->getType() == cUnit->irb->getInt64Ty()) {
- GenShiftOpLong(cUnit, opcode, rlDest, rlSrc, rlShift);
+ RegLocation rl_shift = GetLoc(cu, rhs);
+ if (call_inst->getType() == cu->irb->getInt64Ty()) {
+ GenShiftOpLong(cu, opcode, rl_dest, rl_src, rl_shift);
} else {
- GenArithOpInt(cUnit, opcode, rlDest, rlSrc, rlShift);
+ GenArithOpInt(cu, opcode, rl_dest, rl_src, rl_shift);
}
}
}
-static void CvtBr(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtBr(CompilationUnit* cu, llvm::Instruction* inst)
{
- llvm::BranchInst* brInst = llvm::dyn_cast<llvm::BranchInst>(inst);
- DCHECK(brInst != NULL);
- DCHECK(brInst->isUnconditional()); // May change - but this is all we use now
- llvm::BasicBlock* targetBB = brInst->getSuccessor(0);
- OpUnconditionalBranch(cUnit, cUnit->blockToLabelMap.Get(targetBB));
+ llvm::BranchInst* br_inst = llvm::dyn_cast<llvm::BranchInst>(inst);
+ DCHECK(br_inst != NULL);
+ DCHECK(br_inst->isUnconditional()); // May change - but this is all we use now
+ llvm::BasicBlock* target_bb = br_inst->getSuccessor(0);
+ OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
}
-static void CvtPhi(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtPhi(CompilationUnit* cu, llvm::Instruction* inst)
{
// Nop - these have already been processed
}
-static void CvtRet(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtRet(CompilationUnit* cu, llvm::Instruction* inst)
{
- llvm::ReturnInst* retInst = llvm::dyn_cast<llvm::ReturnInst>(inst);
- llvm::Value* retVal = retInst->getReturnValue();
- if (retVal != NULL) {
- RegLocation rlSrc = GetLoc(cUnit, retVal);
- if (rlSrc.wide) {
- StoreValueWide(cUnit, GetReturnWide(cUnit, rlSrc.fp), rlSrc);
+ llvm::ReturnInst* ret_inst = llvm::dyn_cast<llvm::ReturnInst>(inst);
+ llvm::Value* ret_val = ret_inst->getReturnValue();
+ if (ret_val != NULL) {
+ RegLocation rl_src = GetLoc(cu, ret_val);
+ if (rl_src.wide) {
+ StoreValueWide(cu, GetReturnWide(cu, rl_src.fp), rl_src);
} else {
- StoreValue(cUnit, GetReturn(cUnit, rlSrc.fp), rlSrc);
+ StoreValue(cu, GetReturn(cu, rl_src.fp), rl_src);
}
}
- GenExitSequence(cUnit);
+ GenExitSequence(cu);
}
-static ConditionCode GetCond(llvm::ICmpInst::Predicate llvmCond)
+static ConditionCode GetCond(llvm::ICmpInst::Predicate llvm_cond)
{
ConditionCode res = kCondAl;
- switch(llvmCond) {
+ switch(llvm_cond) {
case llvm::ICmpInst::ICMP_EQ: res = kCondEq; break;
case llvm::ICmpInst::ICMP_NE: res = kCondNe; break;
case llvm::ICmpInst::ICMP_SLT: res = kCondLt; break;
@@ -2492,498 +2492,498 @@
return res;
}
-static void CvtICmp(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtICmp(CompilationUnit* cu, llvm::Instruction* inst)
{
- // GenCmpLong(cUnit, rlDest, rlSrc1, rlSrc2)
+ // GenCmpLong(cu, rl_dest, rl_src1, rl_src2)
UNIMPLEMENTED(FATAL);
}
-static void CvtICmpBr(CompilationUnit* cUnit, llvm::Instruction* inst,
- llvm::BranchInst* brInst)
+static void CvtICmpBr(CompilationUnit* cu, llvm::Instruction* inst,
+ llvm::BranchInst* br_inst)
{
// Get targets
- llvm::BasicBlock* takenBB = brInst->getSuccessor(0);
- LIR* taken = cUnit->blockToLabelMap.Get(takenBB);
- llvm::BasicBlock* fallThroughBB = brInst->getSuccessor(1);
- LIR* fallThrough = cUnit->blockToLabelMap.Get(fallThroughBB);
+ llvm::BasicBlock* taken_bb = br_inst->getSuccessor(0);
+ LIR* taken = cu->block_to_label_map.Get(taken_bb);
+ llvm::BasicBlock* fallthrough_bb = br_inst->getSuccessor(1);
+ LIR* fall_through = cu->block_to_label_map.Get(fallthrough_bb);
// Get comparison operands
- llvm::ICmpInst* iCmpInst = llvm::dyn_cast<llvm::ICmpInst>(inst);
- ConditionCode cond = GetCond(iCmpInst->getPredicate());
- llvm::Value* lhs = iCmpInst->getOperand(0);
+ llvm::ICmpInst* i_cmp_inst = llvm::dyn_cast<llvm::ICmpInst>(inst);
+ ConditionCode cond = GetCond(i_cmp_inst->getPredicate());
+ llvm::Value* lhs = i_cmp_inst->getOperand(0);
// Not expecting a constant as 1st operand
DCHECK(llvm::dyn_cast<llvm::ConstantInt>(lhs) == NULL);
- RegLocation rlSrc1 = GetLoc(cUnit, inst->getOperand(0));
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
llvm::Value* rhs = inst->getOperand(1);
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// Compare and branch in one shot
UNIMPLEMENTED(FATAL);
}
//Compare, then branch
// TODO: handle fused CMP_LONG/IF_xxZ case
if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
- OpRegImm(cUnit, kOpCmp, rlSrc1.lowReg, src2->getSExtValue());
+ OpRegImm(cu, kOpCmp, rl_src1.low_reg, src2->getSExtValue());
} else if (llvm::dyn_cast<llvm::ConstantPointerNull>(rhs) != NULL) {
- OpRegImm(cUnit, kOpCmp, rlSrc1.lowReg, 0);
+ OpRegImm(cu, kOpCmp, rl_src1.low_reg, 0);
} else {
- RegLocation rlSrc2 = GetLoc(cUnit, rhs);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
- OpRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
+ RegLocation rl_src2 = GetLoc(cu, rhs);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
}
- OpCondBranch(cUnit, cond, taken);
+ OpCondBranch(cu, cond, taken);
// Fallthrough
- OpUnconditionalBranch(cUnit, fallThrough);
+ OpUnconditionalBranch(cu, fall_through);
}
-static void CvtCopy(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtCopy(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 1U);
- RegLocation rlSrc = GetLoc(cUnit, callInst->getArgOperand(0));
- RegLocation rlDest = GetLoc(cUnit, callInst);
- DCHECK_EQ(rlSrc.wide, rlDest.wide);
- DCHECK_EQ(rlSrc.fp, rlDest.fp);
- if (rlSrc.wide) {
- StoreValueWide(cUnit, rlDest, rlSrc);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ DCHECK_EQ(rl_src.wide, rl_dest.wide);
+ DCHECK_EQ(rl_src.fp, rl_dest.fp);
+ if (rl_src.wide) {
+ StoreValueWide(cu, rl_dest, rl_src);
} else {
- StoreValue(cUnit, rlDest, rlSrc);
+ StoreValue(cu, rl_dest, rl_src);
}
}
// Note: Immediate arg is a ConstantInt regardless of result type
-static void CvtConst(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtConst(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 1U);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
llvm::ConstantInt* src =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
uint64_t immval = src->getZExtValue();
- RegLocation rlDest = GetLoc(cUnit, callInst);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kAnyReg, true);
- if (rlDest.wide) {
- LoadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ if (rl_dest.wide) {
+ LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
(immval) & 0xffffffff, (immval >> 32) & 0xffffffff);
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- LoadConstantNoClobber(cUnit, rlResult.lowReg, immval & 0xffffffff);
- StoreValue(cUnit, rlDest, rlResult);
+ LoadConstantNoClobber(cu, rl_result.low_reg, immval & 0xffffffff);
+ StoreValue(cu, rl_dest, rl_result);
}
}
-static void CvtConstObject(CompilationUnit* cUnit, llvm::CallInst* callInst, bool isString)
+static void CvtConstObject(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_string)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 1U);
- llvm::ConstantInt* idxVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- uint32_t index = idxVal->getZExtValue();
- RegLocation rlDest = GetLoc(cUnit, callInst);
- if (isString) {
- GenConstString(cUnit, index, rlDest);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::ConstantInt* idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t index = idx_val->getZExtValue();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ if (is_string) {
+ GenConstString(cu, index, rl_dest);
} else {
- GenConstClass(cUnit, index, rlDest);
+ GenConstClass(cu, index, rl_dest);
}
}
-static void CvtFillArrayData(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtFillArrayData(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- llvm::ConstantInt* offsetVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- RegLocation rlSrc = GetLoc(cUnit, callInst->getArgOperand(1));
- GenFillArrayData(cUnit, offsetVal->getSExtValue(), rlSrc);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* offset_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ GenFillArrayData(cu, offset_val->getSExtValue(), rl_src);
}
-static void CvtNewInstance(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtNewInstance(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 1U);
- llvm::ConstantInt* typeIdxVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- uint32_t typeIdx = typeIdxVal->getZExtValue();
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenNewInstance(cUnit, typeIdx, rlDest);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenNewInstance(cu, type_idx, rl_dest);
}
-static void CvtNewArray(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtNewArray(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- llvm::ConstantInt* typeIdxVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- uint32_t typeIdx = typeIdxVal->getZExtValue();
- llvm::Value* len = callInst->getArgOperand(1);
- RegLocation rlLen = GetLoc(cUnit, len);
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenNewArray(cUnit, typeIdx, rlDest, rlLen);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ llvm::Value* len = call_inst->getArgOperand(1);
+ RegLocation rl_len = GetLoc(cu, len);
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenNewArray(cu, type_idx, rl_dest, rl_len);
}
-static void CvtInstanceOf(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtInstanceOf(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- llvm::ConstantInt* typeIdxVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- uint32_t typeIdx = typeIdxVal->getZExtValue();
- llvm::Value* src = callInst->getArgOperand(1);
- RegLocation rlSrc = GetLoc(cUnit, src);
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenInstanceof(cUnit, typeIdx, rlDest, rlSrc);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenInstanceof(cu, type_idx, rl_dest, rl_src);
}
-static void CvtThrow(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtThrow(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 1U);
- llvm::Value* src = callInst->getArgOperand(0);
- RegLocation rlSrc = GetLoc(cUnit, src);
- GenThrow(cUnit, rlSrc);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::Value* src = call_inst->getArgOperand(0);
+ RegLocation rl_src = GetLoc(cu, src);
+ GenThrow(cu, rl_src);
}
-static void CvtMonitorEnterExit(CompilationUnit* cUnit, bool isEnter,
- llvm::CallInst* callInst)
+static void CvtMonitorEnterExit(CompilationUnit* cu, bool is_enter,
+ llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- llvm::ConstantInt* optFlags =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- llvm::Value* src = callInst->getArgOperand(1);
- RegLocation rlSrc = GetLoc(cUnit, src);
- if (isEnter) {
- GenMonitorEnter(cUnit, optFlags->getZExtValue(), rlSrc);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ if (is_enter) {
+ GenMonitorEnter(cu, opt_flags->getZExtValue(), rl_src);
} else {
- GenMonitorExit(cUnit, optFlags->getZExtValue(), rlSrc);
+ GenMonitorExit(cu, opt_flags->getZExtValue(), rl_src);
}
}
-static void CvtArrayLength(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtArrayLength(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- llvm::ConstantInt* optFlags =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- llvm::Value* src = callInst->getArgOperand(1);
- RegLocation rlSrc = GetLoc(cUnit, src);
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- GenNullCheck(cUnit, rlSrc.sRegLow, rlSrc.lowReg, optFlags->getZExtValue());
- RegLocation rlDest = GetLoc(cUnit, callInst);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int lenOffset = Array::LengthOffset().Int32Value();
- LoadWordDisp(cUnit, rlSrc.lowReg, lenOffset, rlResult.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ GenNullCheck(cu, rl_src.s_reg_low, rl_src.low_reg, opt_flags->getZExtValue());
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int len_offset = Array::LengthOffset().Int32Value();
+ LoadWordDisp(cu, rl_src.low_reg, len_offset, rl_result.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
-static void CvtMoveException(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtMoveException(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenMoveException(cUnit, rlDest);
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenMoveException(cu, rl_dest);
}
-static void CvtSget(CompilationUnit* cUnit, llvm::CallInst* callInst, bool isWide, bool isObject)
+static void CvtSget(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_wide, bool is_object)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 1U);
- llvm::ConstantInt* typeIdxVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- uint32_t typeIdx = typeIdxVal->getZExtValue();
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenSget(cUnit, typeIdx, rlDest, isWide, isObject);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenSget(cu, type_idx, rl_dest, is_wide, is_object);
}
-static void CvtSput(CompilationUnit* cUnit, llvm::CallInst* callInst, bool isWide, bool isObject)
+static void CvtSput(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_wide, bool is_object)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- llvm::ConstantInt* typeIdxVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- uint32_t typeIdx = typeIdxVal->getZExtValue();
- llvm::Value* src = callInst->getArgOperand(1);
- RegLocation rlSrc = GetLoc(cUnit, src);
- GenSput(cUnit, typeIdx, rlSrc, isWide, isObject);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ GenSput(cu, type_idx, rl_src, is_wide, is_object);
}
-static void CvtAget(CompilationUnit* cUnit, llvm::CallInst* callInst, OpSize size, int scale)
+static void CvtAget(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size, int scale)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 3U);
- llvm::ConstantInt* optFlags =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- RegLocation rlArray = GetLoc(cUnit, callInst->getArgOperand(1));
- RegLocation rlIndex = GetLoc(cUnit, callInst->getArgOperand(2));
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenArrayGet(cUnit, optFlags->getZExtValue(), size, rlArray, rlIndex,
- rlDest, scale);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(2));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenArrayGet(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
+ rl_dest, scale);
}
-static void CvtAput(CompilationUnit* cUnit, llvm::CallInst* callInst, OpSize size,
- int scale, bool isObject)
+static void CvtAput(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
+ int scale, bool is_object)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 4U);
- llvm::ConstantInt* optFlags =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- RegLocation rlSrc = GetLoc(cUnit, callInst->getArgOperand(1));
- RegLocation rlArray = GetLoc(cUnit, callInst->getArgOperand(2));
- RegLocation rlIndex = GetLoc(cUnit, callInst->getArgOperand(3));
- if (isObject) {
- GenArrayObjPut(cUnit, optFlags->getZExtValue(), rlArray, rlIndex,
- rlSrc, scale);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(2));
+ RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(3));
+ if (is_object) {
+ GenArrayObjPut(cu, opt_flags->getZExtValue(), rl_array, rl_index,
+ rl_src, scale);
} else {
- GenArrayPut(cUnit, optFlags->getZExtValue(), size, rlArray, rlIndex,
- rlSrc, scale);
+ GenArrayPut(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
+ rl_src, scale);
}
}
-static void CvtAputObj(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtAputObj(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- CvtAput(cUnit, callInst, kWord, 2, true /* isObject */);
+ CvtAput(cu, call_inst, kWord, 2, true /* is_object */);
}
-static void CvtAputPrimitive(CompilationUnit* cUnit, llvm::CallInst* callInst,
+static void CvtAputPrimitive(CompilationUnit* cu, llvm::CallInst* call_inst,
OpSize size, int scale)
{
- CvtAput(cUnit, callInst, size, scale, false /* isObject */);
+ CvtAput(cu, call_inst, size, scale, false /* is_object */);
}
-static void CvtIget(CompilationUnit* cUnit, llvm::CallInst* callInst, OpSize size,
- bool isWide, bool isObj)
+static void CvtIget(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
+ bool is_wide, bool is_obj)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 3U);
- llvm::ConstantInt* optFlags =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- RegLocation rlObj = GetLoc(cUnit, callInst->getArgOperand(1));
- llvm::ConstantInt* fieldIdx =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(2));
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenIGet(cUnit, fieldIdx->getZExtValue(), optFlags->getZExtValue(),
- size, rlDest, rlObj, isWide, isObj);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_obj = GetLoc(cu, call_inst->getArgOperand(1));
+ llvm::ConstantInt* field_idx =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(2));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenIGet(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
+ size, rl_dest, rl_obj, is_wide, is_obj);
}
-static void CvtIput(CompilationUnit* cUnit, llvm::CallInst* callInst, OpSize size,
- bool isWide, bool isObj)
+static void CvtIput(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
+ bool is_wide, bool is_obj)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 4U);
- llvm::ConstantInt* optFlags =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- RegLocation rlSrc = GetLoc(cUnit, callInst->getArgOperand(1));
- RegLocation rlObj = GetLoc(cUnit, callInst->getArgOperand(2));
- llvm::ConstantInt* fieldIdx =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(3));
- GenIPut(cUnit, fieldIdx->getZExtValue(), optFlags->getZExtValue(),
- size, rlSrc, rlObj, isWide, isObj);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_obj = GetLoc(cu, call_inst->getArgOperand(2));
+ llvm::ConstantInt* field_idx =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(3));
+ GenIPut(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
+ size, rl_src, rl_obj, is_wide, is_obj);
}
-static void CvtCheckCast(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtCheckCast(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- DCHECK_EQ(callInst->getNumArgOperands(), 2U);
- llvm::ConstantInt* typeIdx =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- RegLocation rlSrc = GetLoc(cUnit, callInst->getArgOperand(1));
- GenCheckCast(cUnit, typeIdx->getZExtValue(), rlSrc);
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ GenCheckCast(cu, type_idx->getZExtValue(), rl_src);
}
-static void CvtFPCompare(CompilationUnit* cUnit, llvm::CallInst* callInst,
+static void CvtFPCompare(CompilationUnit* cu, llvm::CallInst* call_inst,
Instruction::Code opcode)
{
- RegLocation rlSrc1 = GetLoc(cUnit, callInst->getArgOperand(0));
- RegLocation rlSrc2 = GetLoc(cUnit, callInst->getArgOperand(1));
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenCmpFP(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
+ RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenCmpFP(cu, opcode, rl_dest, rl_src1, rl_src2);
}
-static void CvtLongCompare(CompilationUnit* cUnit, llvm::CallInst* callInst)
+static void CvtLongCompare(CompilationUnit* cu, llvm::CallInst* call_inst)
{
- RegLocation rlSrc1 = GetLoc(cUnit, callInst->getArgOperand(0));
- RegLocation rlSrc2 = GetLoc(cUnit, callInst->getArgOperand(1));
- RegLocation rlDest = GetLoc(cUnit, callInst);
- GenCmpLong(cUnit, rlDest, rlSrc1, rlSrc2);
+ RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
+ RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ GenCmpLong(cu, rl_dest, rl_src1, rl_src2);
}
-static void CvtSwitch(CompilationUnit* cUnit, llvm::Instruction* inst)
+static void CvtSwitch(CompilationUnit* cu, llvm::Instruction* inst)
{
- llvm::SwitchInst* swInst = llvm::dyn_cast<llvm::SwitchInst>(inst);
- DCHECK(swInst != NULL);
- llvm::Value* testVal = swInst->getCondition();
- llvm::MDNode* tableOffsetNode = swInst->getMetadata("SwitchTable");
- DCHECK(tableOffsetNode != NULL);
- llvm::ConstantInt* tableOffsetValue =
- static_cast<llvm::ConstantInt*>(tableOffsetNode->getOperand(0));
- int32_t tableOffset = tableOffsetValue->getSExtValue();
- RegLocation rlSrc = GetLoc(cUnit, testVal);
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- uint16_t tableMagic = *table;
- if (tableMagic == 0x100) {
- GenPackedSwitch(cUnit, tableOffset, rlSrc);
+ llvm::SwitchInst* sw_inst = llvm::dyn_cast<llvm::SwitchInst>(inst);
+ DCHECK(sw_inst != NULL);
+ llvm::Value* test_val = sw_inst->getCondition();
+ llvm::MDNode* table_offset_node = sw_inst->getMetadata("SwitchTable");
+ DCHECK(table_offset_node != NULL);
+ llvm::ConstantInt* table_offset_value =
+ static_cast<llvm::ConstantInt*>(table_offset_node->getOperand(0));
+ int32_t table_offset = table_offset_value->getSExtValue();
+ RegLocation rl_src = GetLoc(cu, test_val);
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ uint16_t table_magic = *table;
+ if (table_magic == 0x100) {
+ GenPackedSwitch(cu, table_offset, rl_src);
} else {
- DCHECK_EQ(tableMagic, 0x200);
- GenSparseSwitch(cUnit, tableOffset, rlSrc);
+ DCHECK_EQ(table_magic, 0x200);
+ GenSparseSwitch(cu, table_offset, rl_src);
}
}
-static void CvtInvoke(CompilationUnit* cUnit, llvm::CallInst* callInst, bool isVoid,
- bool isFilledNewArray)
+static void CvtInvoke(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_void,
+ bool is_filled_new_array)
{
- CallInfo* info = static_cast<CallInfo*>(NewMem(cUnit, sizeof(CallInfo), true, kAllocMisc));
- if (isVoid) {
+ CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
+ if (is_void) {
info->result.location = kLocInvalid;
} else {
- info->result = GetLoc(cUnit, callInst);
+ info->result = GetLoc(cu, call_inst);
}
- llvm::ConstantInt* invokeTypeVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
- llvm::ConstantInt* methodIndexVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(1));
- llvm::ConstantInt* optFlagsVal =
- llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(2));
- info->type = static_cast<InvokeType>(invokeTypeVal->getZExtValue());
- info->index = methodIndexVal->getZExtValue();
- info->optFlags = optFlagsVal->getZExtValue();
- info->offset = cUnit->currentDalvikOffset;
+ llvm::ConstantInt* invoke_type_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ llvm::ConstantInt* method_index_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(1));
+ llvm::ConstantInt* opt_flags_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(2));
+ info->type = static_cast<InvokeType>(invoke_type_val->getZExtValue());
+ info->index = method_index_val->getZExtValue();
+ info->opt_flags = opt_flags_val->getZExtValue();
+ info->offset = cu->current_dalvik_offset;
// Count the argument words, and then build argument array.
- info->numArgWords = 0;
- for (unsigned int i = 3; i < callInst->getNumArgOperands(); i++) {
- RegLocation tLoc = GetLoc(cUnit, callInst->getArgOperand(i));
- info->numArgWords += tLoc.wide ? 2 : 1;
+ info->num_arg_words = 0;
+ for (unsigned int i = 3; i < call_inst->getNumArgOperands(); i++) {
+ RegLocation t_loc = GetLoc(cu, call_inst->getArgOperand(i));
+ info->num_arg_words += t_loc.wide ? 2 : 1;
}
- info->args = (info->numArgWords == 0) ? NULL : static_cast<RegLocation*>
- (NewMem(cUnit, sizeof(RegLocation) * info->numArgWords, false, kAllocMisc));
+ info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
+ (NewMem(cu, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
// Now, fill in the location records, synthesizing high loc of wide vals
- for (int i = 3, next = 0; next < info->numArgWords;) {
- info->args[next] = GetLoc(cUnit, callInst->getArgOperand(i++));
+ for (int i = 3, next = 0; next < info->num_arg_words;) {
+ info->args[next] = GetLoc(cu, call_inst->getArgOperand(i++));
if (info->args[next].wide) {
next++;
// TODO: Might make sense to mark this as an invalid loc
- info->args[next].origSReg = info->args[next-1].origSReg+1;
- info->args[next].sRegLow = info->args[next-1].sRegLow+1;
+ info->args[next].orig_sreg = info->args[next-1].orig_sreg+1;
+ info->args[next].s_reg_low = info->args[next-1].s_reg_low+1;
}
next++;
}
- // TODO - rework such that we no longer need isRange
- info->isRange = (info->numArgWords > 5);
+ // TODO - rework such that we no longer need is_range
+ info->is_range = (info->num_arg_words > 5);
- if (isFilledNewArray) {
- GenFilledNewArray(cUnit, info);
+ if (is_filled_new_array) {
+ GenFilledNewArray(cu, info);
} else {
- GenInvoke(cUnit, info);
+ GenInvoke(cu, info);
}
}
/* Look up the RegLocation associated with a Value. Must already be defined */
-static RegLocation ValToLoc(CompilationUnit* cUnit, llvm::Value* val)
+static RegLocation ValToLoc(CompilationUnit* cu, llvm::Value* val)
{
- SafeMap<llvm::Value*, RegLocation>::iterator it = cUnit->locMap.find(val);
- DCHECK(it != cUnit->locMap.end()) << "Missing definition";
+ SafeMap<llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
+ DCHECK(it != cu->loc_map.end()) << "Missing definition";
return it->second;
}
-static bool BitcodeBlockCodeGen(CompilationUnit* cUnit, llvm::BasicBlock* bb)
+static bool BitcodeBlockCodeGen(CompilationUnit* cu, llvm::BasicBlock* bb)
{
- while (cUnit->llvmBlocks.find(bb) == cUnit->llvmBlocks.end()) {
- llvm::BasicBlock* nextBB = NULL;
- cUnit->llvmBlocks.insert(bb);
- bool isEntry = (bb == &cUnit->func->getEntryBlock());
+ while (cu->llvm_blocks.find(bb) == cu->llvm_blocks.end()) {
+ llvm::BasicBlock* next_bb = NULL;
+ cu->llvm_blocks.insert(bb);
+ bool is_entry = (bb == &cu->func->getEntryBlock());
// Define the starting label
- LIR* blockLabel = cUnit->blockToLabelMap.Get(bb);
+ LIR* block_label = cu->block_to_label_map.Get(bb);
// Extract the type and starting offset from the block's name
- char blockType = kInvalidBlock;
- if (isEntry) {
- blockType = kNormalBlock;
- blockLabel->operands[0] = 0;
+ char block_type = kInvalidBlock;
+ if (is_entry) {
+ block_type = kNormalBlock;
+ block_label->operands[0] = 0;
} else if (!bb->hasName()) {
- blockType = kNormalBlock;
- blockLabel->operands[0] = DexFile::kDexNoIndex;
+ block_type = kNormalBlock;
+ block_label->operands[0] = DexFile::kDexNoIndex;
} else {
- std::string blockName = bb->getName().str();
+ std::string block_name = bb->getName().str();
int dummy;
- sscanf(blockName.c_str(), kLabelFormat, &blockType, &blockLabel->operands[0], &dummy);
- cUnit->currentDalvikOffset = blockLabel->operands[0];
+ sscanf(block_name.c_str(), kLabelFormat, &block_type, &block_label->operands[0], &dummy);
+ cu->current_dalvik_offset = block_label->operands[0];
}
- DCHECK((blockType == kNormalBlock) || (blockType == kCatchBlock));
- cUnit->currentDalvikOffset = blockLabel->operands[0];
+ DCHECK((block_type == kNormalBlock) || (block_type == kCatchBlock));
+ cu->current_dalvik_offset = block_label->operands[0];
// Set the label kind
- blockLabel->opcode = kPseudoNormalBlockLabel;
+ block_label->opcode = kPseudoNormalBlockLabel;
// Insert the label
- AppendLIR(cUnit, blockLabel);
+ AppendLIR(cu, block_label);
- LIR* headLIR = NULL;
+ LIR* head_lir = NULL;
- if (blockType == kCatchBlock) {
- headLIR = NewLIR0(cUnit, kPseudoExportedPC);
+ if (block_type == kCatchBlock) {
+ head_lir = NewLIR0(cu, kPseudoExportedPC);
}
// Free temp registers and reset redundant store tracking */
- ResetRegPool(cUnit);
- ResetDefTracking(cUnit);
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
//TODO: restore oat incoming liveness optimization
- ClobberAllRegs(cUnit);
+ ClobberAllRegs(cu);
- if (isEntry) {
+ if (is_entry) {
RegLocation* ArgLocs = static_cast<RegLocation*>
- (NewMem(cUnit, sizeof(RegLocation) * cUnit->numIns, true, kAllocMisc));
- llvm::Function::arg_iterator it(cUnit->func->arg_begin());
- llvm::Function::arg_iterator it_end(cUnit->func->arg_end());
+ (NewMem(cu, sizeof(RegLocation) * cu->num_ins, true, kAllocMisc));
+ llvm::Function::arg_iterator it(cu->func->arg_begin());
+ llvm::Function::arg_iterator it_end(cu->func->arg_end());
// Skip past Method*
it++;
for (unsigned i = 0; it != it_end; ++it) {
llvm::Value* val = it;
- ArgLocs[i++] = ValToLoc(cUnit, val);
+ ArgLocs[i++] = ValToLoc(cu, val);
llvm::Type* ty = val->getType();
- if ((ty == cUnit->irb->getInt64Ty()) || (ty == cUnit->irb->getDoubleTy())) {
+ if ((ty == cu->irb->getInt64Ty()) || (ty == cu->irb->getDoubleTy())) {
ArgLocs[i] = ArgLocs[i-1];
- ArgLocs[i].lowReg = ArgLocs[i].highReg;
- ArgLocs[i].origSReg++;
- ArgLocs[i].sRegLow = INVALID_SREG;
- ArgLocs[i].highWord = true;
+ ArgLocs[i].low_reg = ArgLocs[i].high_reg;
+ ArgLocs[i].orig_sreg++;
+ ArgLocs[i].s_reg_low = INVALID_SREG;
+ ArgLocs[i].high_word = true;
i++;
}
}
- GenEntrySequence(cUnit, ArgLocs, cUnit->methodLoc);
+ GenEntrySequence(cu, ArgLocs, cu->method_loc);
}
// Visit all of the instructions in the block
for (llvm::BasicBlock::iterator it = bb->begin(), e = bb->end(); it != e;) {
llvm::Instruction* inst = it;
- llvm::BasicBlock::iterator nextIt = ++it;
+ llvm::BasicBlock::iterator next_it = ++it;
// Extract the Dalvik offset from the instruction
uint32_t opcode = inst->getOpcode();
- llvm::MDNode* dexOffsetNode = inst->getMetadata("DexOff");
- if (dexOffsetNode != NULL) {
- llvm::ConstantInt* dexOffsetValue =
- static_cast<llvm::ConstantInt*>(dexOffsetNode->getOperand(0));
- cUnit->currentDalvikOffset = dexOffsetValue->getZExtValue();
+ llvm::MDNode* dex_offset_node = inst->getMetadata("DexOff");
+ if (dex_offset_node != NULL) {
+ llvm::ConstantInt* dex_offset_value =
+ static_cast<llvm::ConstantInt*>(dex_offset_node->getOperand(0));
+ cu->current_dalvik_offset = dex_offset_value->getZExtValue();
}
- ResetRegPool(cUnit);
- if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
- ClobberAllRegs(cUnit);
+ ResetRegPool(cu);
+ if (cu->disable_opt & (1 << kTrackLiveTemps)) {
+ ClobberAllRegs(cu);
}
- if (cUnit->disableOpt & (1 << kSuppressLoads)) {
- ResetDefTracking(cUnit);
+ if (cu->disable_opt & (1 << kSuppressLoads)) {
+ ResetDefTracking(cu);
}
#ifndef NDEBUG
/* Reset temp tracking sanity check */
- cUnit->liveSReg = INVALID_SREG;
+ cu->live_sreg = INVALID_SREG;
#endif
// TODO: use llvm opcode name here instead of "boundary" if verbose
- LIR* boundaryLIR = MarkBoundary(cUnit, cUnit->currentDalvikOffset, "boundary");
+ LIR* boundary_lir = MarkBoundary(cu, cu->current_dalvik_offset, "boundary");
/* Remember the first LIR for thisl block*/
- if (headLIR == NULL) {
- headLIR = boundaryLIR;
- headLIR->defMask = ENCODE_ALL;
+ if (head_lir == NULL) {
+ head_lir = boundary_lir;
+ head_lir->def_mask = ENCODE_ALL;
}
switch(opcode) {
case llvm::Instruction::ICmp: {
- llvm::Instruction* nextInst = nextIt;
- llvm::BranchInst* brInst = llvm::dyn_cast<llvm::BranchInst>(nextInst);
- if (brInst != NULL /* and... */) {
- CvtICmpBr(cUnit, inst, brInst);
+ llvm::Instruction* next_inst = next_it;
+ llvm::BranchInst* br_inst = llvm::dyn_cast<llvm::BranchInst>(next_inst);
+ if (br_inst != NULL /* and... */) {
+ CvtICmpBr(cu, inst, br_inst);
++it;
} else {
- CvtICmp(cUnit, inst);
+ CvtICmp(cu, inst);
}
}
break;
case llvm::Instruction::Call: {
- llvm::CallInst* callInst = llvm::dyn_cast<llvm::CallInst>(inst);
- llvm::Function* callee = callInst->getCalledFunction();
+ llvm::CallInst* call_inst = llvm::dyn_cast<llvm::CallInst>(inst);
+ llvm::Function* callee = call_inst->getCalledFunction();
greenland::IntrinsicHelper::IntrinsicId id =
- cUnit->intrinsic_helper->GetIntrinsicId(callee);
+ cu->intrinsic_helper->GetIntrinsicId(callee);
switch (id) {
case greenland::IntrinsicHelper::AllocaShadowFrame:
case greenland::IntrinsicHelper::SetShadowFrameEntry:
@@ -2996,59 +2996,59 @@
case greenland::IntrinsicHelper::CopyFloat:
case greenland::IntrinsicHelper::CopyLong:
case greenland::IntrinsicHelper::CopyDouble:
- CvtCopy(cUnit, callInst);
+ CvtCopy(cu, call_inst);
break;
case greenland::IntrinsicHelper::ConstInt:
case greenland::IntrinsicHelper::ConstObj:
case greenland::IntrinsicHelper::ConstLong:
case greenland::IntrinsicHelper::ConstFloat:
case greenland::IntrinsicHelper::ConstDouble:
- CvtConst(cUnit, callInst);
+ CvtConst(cu, call_inst);
break;
case greenland::IntrinsicHelper::DivInt:
case greenland::IntrinsicHelper::DivLong:
- CvtBinOp(cUnit, kOpDiv, inst);
+ CvtBinOp(cu, kOpDiv, inst);
break;
case greenland::IntrinsicHelper::RemInt:
case greenland::IntrinsicHelper::RemLong:
- CvtBinOp(cUnit, kOpRem, inst);
+ CvtBinOp(cu, kOpRem, inst);
break;
case greenland::IntrinsicHelper::MethodInfo:
// Already dealt with - just ignore it here.
break;
case greenland::IntrinsicHelper::CheckSuspend:
- GenSuspendTest(cUnit, 0 /* optFlags already applied */);
+ GenSuspendTest(cu, 0 /* opt_flags already applied */);
break;
case greenland::IntrinsicHelper::HLInvokeObj:
case greenland::IntrinsicHelper::HLInvokeFloat:
case greenland::IntrinsicHelper::HLInvokeDouble:
case greenland::IntrinsicHelper::HLInvokeLong:
case greenland::IntrinsicHelper::HLInvokeInt:
- CvtInvoke(cUnit, callInst, false /* isVoid */, false /* newArray */);
+ CvtInvoke(cu, call_inst, false /* is_void */, false /* new_array */);
break;
case greenland::IntrinsicHelper::HLInvokeVoid:
- CvtInvoke(cUnit, callInst, true /* isVoid */, false /* newArray */);
+ CvtInvoke(cu, call_inst, true /* is_void */, false /* new_array */);
break;
case greenland::IntrinsicHelper::HLFilledNewArray:
- CvtInvoke(cUnit, callInst, false /* isVoid */, true /* newArray */);
+ CvtInvoke(cu, call_inst, false /* is_void */, true /* new_array */);
break;
case greenland::IntrinsicHelper::HLFillArrayData:
- CvtFillArrayData(cUnit, callInst);
+ CvtFillArrayData(cu, call_inst);
break;
case greenland::IntrinsicHelper::ConstString:
- CvtConstObject(cUnit, callInst, true /* isString */);
+ CvtConstObject(cu, call_inst, true /* is_string */);
break;
case greenland::IntrinsicHelper::ConstClass:
- CvtConstObject(cUnit, callInst, false /* isString */);
+ CvtConstObject(cu, call_inst, false /* is_string */);
break;
case greenland::IntrinsicHelper::HLCheckCast:
- CvtCheckCast(cUnit, callInst);
+ CvtCheckCast(cu, call_inst);
break;
case greenland::IntrinsicHelper::NewInstance:
- CvtNewInstance(cUnit, callInst);
+ CvtNewInstance(cu, call_inst);
break;
case greenland::IntrinsicHelper::HLSgetObject:
- CvtSget(cUnit, callInst, false /* wide */, true /* Object */);
+ CvtSget(cu, call_inst, false /* wide */, true /* Object */);
break;
case greenland::IntrinsicHelper::HLSget:
case greenland::IntrinsicHelper::HLSgetFloat:
@@ -3056,11 +3056,11 @@
case greenland::IntrinsicHelper::HLSgetByte:
case greenland::IntrinsicHelper::HLSgetChar:
case greenland::IntrinsicHelper::HLSgetShort:
- CvtSget(cUnit, callInst, false /* wide */, false /* Object */);
+ CvtSget(cu, call_inst, false /* wide */, false /* Object */);
break;
case greenland::IntrinsicHelper::HLSgetWide:
case greenland::IntrinsicHelper::HLSgetDouble:
- CvtSget(cUnit, callInst, true /* wide */, false /* Object */);
+ CvtSget(cu, call_inst, true /* wide */, false /* Object */);
break;
case greenland::IntrinsicHelper::HLSput:
case greenland::IntrinsicHelper::HLSputFloat:
@@ -3068,245 +3068,245 @@
case greenland::IntrinsicHelper::HLSputByte:
case greenland::IntrinsicHelper::HLSputChar:
case greenland::IntrinsicHelper::HLSputShort:
- CvtSput(cUnit, callInst, false /* wide */, false /* Object */);
+ CvtSput(cu, call_inst, false /* wide */, false /* Object */);
break;
case greenland::IntrinsicHelper::HLSputWide:
case greenland::IntrinsicHelper::HLSputDouble:
- CvtSput(cUnit, callInst, true /* wide */, false /* Object */);
+ CvtSput(cu, call_inst, true /* wide */, false /* Object */);
break;
case greenland::IntrinsicHelper::HLSputObject:
- CvtSput(cUnit, callInst, false /* wide */, true /* Object */);
+ CvtSput(cu, call_inst, false /* wide */, true /* Object */);
break;
case greenland::IntrinsicHelper::GetException:
- CvtMoveException(cUnit, callInst);
+ CvtMoveException(cu, call_inst);
break;
case greenland::IntrinsicHelper::HLThrowException:
- CvtThrow(cUnit, callInst);
+ CvtThrow(cu, call_inst);
break;
case greenland::IntrinsicHelper::MonitorEnter:
- CvtMonitorEnterExit(cUnit, true /* isEnter */, callInst);
+ CvtMonitorEnterExit(cu, true /* is_enter */, call_inst);
break;
case greenland::IntrinsicHelper::MonitorExit:
- CvtMonitorEnterExit(cUnit, false /* isEnter */, callInst);
+ CvtMonitorEnterExit(cu, false /* is_enter */, call_inst);
break;
case greenland::IntrinsicHelper::OptArrayLength:
- CvtArrayLength(cUnit, callInst);
+ CvtArrayLength(cu, call_inst);
break;
case greenland::IntrinsicHelper::NewArray:
- CvtNewArray(cUnit, callInst);
+ CvtNewArray(cu, call_inst);
break;
case greenland::IntrinsicHelper::InstanceOf:
- CvtInstanceOf(cUnit, callInst);
+ CvtInstanceOf(cu, call_inst);
break;
case greenland::IntrinsicHelper::HLArrayGet:
case greenland::IntrinsicHelper::HLArrayGetObject:
case greenland::IntrinsicHelper::HLArrayGetFloat:
- CvtAget(cUnit, callInst, kWord, 2);
+ CvtAget(cu, call_inst, kWord, 2);
break;
case greenland::IntrinsicHelper::HLArrayGetWide:
case greenland::IntrinsicHelper::HLArrayGetDouble:
- CvtAget(cUnit, callInst, kLong, 3);
+ CvtAget(cu, call_inst, kLong, 3);
break;
case greenland::IntrinsicHelper::HLArrayGetBoolean:
- CvtAget(cUnit, callInst, kUnsignedByte, 0);
+ CvtAget(cu, call_inst, kUnsignedByte, 0);
break;
case greenland::IntrinsicHelper::HLArrayGetByte:
- CvtAget(cUnit, callInst, kSignedByte, 0);
+ CvtAget(cu, call_inst, kSignedByte, 0);
break;
case greenland::IntrinsicHelper::HLArrayGetChar:
- CvtAget(cUnit, callInst, kUnsignedHalf, 1);
+ CvtAget(cu, call_inst, kUnsignedHalf, 1);
break;
case greenland::IntrinsicHelper::HLArrayGetShort:
- CvtAget(cUnit, callInst, kSignedHalf, 1);
+ CvtAget(cu, call_inst, kSignedHalf, 1);
break;
case greenland::IntrinsicHelper::HLArrayPut:
case greenland::IntrinsicHelper::HLArrayPutFloat:
- CvtAputPrimitive(cUnit, callInst, kWord, 2);
+ CvtAputPrimitive(cu, call_inst, kWord, 2);
break;
case greenland::IntrinsicHelper::HLArrayPutObject:
- CvtAputObj(cUnit, callInst);
+ CvtAputObj(cu, call_inst);
break;
case greenland::IntrinsicHelper::HLArrayPutWide:
case greenland::IntrinsicHelper::HLArrayPutDouble:
- CvtAputPrimitive(cUnit, callInst, kLong, 3);
+ CvtAputPrimitive(cu, call_inst, kLong, 3);
break;
case greenland::IntrinsicHelper::HLArrayPutBoolean:
- CvtAputPrimitive(cUnit, callInst, kUnsignedByte, 0);
+ CvtAputPrimitive(cu, call_inst, kUnsignedByte, 0);
break;
case greenland::IntrinsicHelper::HLArrayPutByte:
- CvtAputPrimitive(cUnit, callInst, kSignedByte, 0);
+ CvtAputPrimitive(cu, call_inst, kSignedByte, 0);
break;
case greenland::IntrinsicHelper::HLArrayPutChar:
- CvtAputPrimitive(cUnit, callInst, kUnsignedHalf, 1);
+ CvtAputPrimitive(cu, call_inst, kUnsignedHalf, 1);
break;
case greenland::IntrinsicHelper::HLArrayPutShort:
- CvtAputPrimitive(cUnit, callInst, kSignedHalf, 1);
+ CvtAputPrimitive(cu, call_inst, kSignedHalf, 1);
break;
case greenland::IntrinsicHelper::HLIGet:
case greenland::IntrinsicHelper::HLIGetFloat:
- CvtIget(cUnit, callInst, kWord, false /* isWide */, false /* obj */);
+ CvtIget(cu, call_inst, kWord, false /* is_wide */, false /* obj */);
break;
case greenland::IntrinsicHelper::HLIGetObject:
- CvtIget(cUnit, callInst, kWord, false /* isWide */, true /* obj */);
+ CvtIget(cu, call_inst, kWord, false /* is_wide */, true /* obj */);
break;
case greenland::IntrinsicHelper::HLIGetWide:
case greenland::IntrinsicHelper::HLIGetDouble:
- CvtIget(cUnit, callInst, kLong, true /* isWide */, false /* obj */);
+ CvtIget(cu, call_inst, kLong, true /* is_wide */, false /* obj */);
break;
case greenland::IntrinsicHelper::HLIGetBoolean:
- CvtIget(cUnit, callInst, kUnsignedByte, false /* isWide */,
+ CvtIget(cu, call_inst, kUnsignedByte, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::HLIGetByte:
- CvtIget(cUnit, callInst, kSignedByte, false /* isWide */,
+ CvtIget(cu, call_inst, kSignedByte, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::HLIGetChar:
- CvtIget(cUnit, callInst, kUnsignedHalf, false /* isWide */,
+ CvtIget(cu, call_inst, kUnsignedHalf, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::HLIGetShort:
- CvtIget(cUnit, callInst, kSignedHalf, false /* isWide */,
+ CvtIget(cu, call_inst, kSignedHalf, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::HLIPut:
case greenland::IntrinsicHelper::HLIPutFloat:
- CvtIput(cUnit, callInst, kWord, false /* isWide */, false /* obj */);
+ CvtIput(cu, call_inst, kWord, false /* is_wide */, false /* obj */);
break;
case greenland::IntrinsicHelper::HLIPutObject:
- CvtIput(cUnit, callInst, kWord, false /* isWide */, true /* obj */);
+ CvtIput(cu, call_inst, kWord, false /* is_wide */, true /* obj */);
break;
case greenland::IntrinsicHelper::HLIPutWide:
case greenland::IntrinsicHelper::HLIPutDouble:
- CvtIput(cUnit, callInst, kLong, true /* isWide */, false /* obj */);
+ CvtIput(cu, call_inst, kLong, true /* is_wide */, false /* obj */);
break;
case greenland::IntrinsicHelper::HLIPutBoolean:
- CvtIput(cUnit, callInst, kUnsignedByte, false /* isWide */,
+ CvtIput(cu, call_inst, kUnsignedByte, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::HLIPutByte:
- CvtIput(cUnit, callInst, kSignedByte, false /* isWide */,
+ CvtIput(cu, call_inst, kSignedByte, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::HLIPutChar:
- CvtIput(cUnit, callInst, kUnsignedHalf, false /* isWide */,
+ CvtIput(cu, call_inst, kUnsignedHalf, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::HLIPutShort:
- CvtIput(cUnit, callInst, kSignedHalf, false /* isWide */,
+ CvtIput(cu, call_inst, kSignedHalf, false /* is_wide */,
false /* obj */);
break;
case greenland::IntrinsicHelper::IntToChar:
- CvtIntNarrowing(cUnit, callInst, Instruction::INT_TO_CHAR);
+ CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_CHAR);
break;
case greenland::IntrinsicHelper::IntToShort:
- CvtIntNarrowing(cUnit, callInst, Instruction::INT_TO_SHORT);
+ CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_SHORT);
break;
case greenland::IntrinsicHelper::IntToByte:
- CvtIntNarrowing(cUnit, callInst, Instruction::INT_TO_BYTE);
+ CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_BYTE);
break;
case greenland::IntrinsicHelper::F2I:
case greenland::IntrinsicHelper::D2I:
case greenland::IntrinsicHelper::F2L:
case greenland::IntrinsicHelper::D2L:
- CvtFPToInt(cUnit, callInst);
+ CvtFPToInt(cu, call_inst);
break;
case greenland::IntrinsicHelper::CmplFloat:
- CvtFPCompare(cUnit, callInst, Instruction::CMPL_FLOAT);
+ CvtFPCompare(cu, call_inst, Instruction::CMPL_FLOAT);
break;
case greenland::IntrinsicHelper::CmpgFloat:
- CvtFPCompare(cUnit, callInst, Instruction::CMPG_FLOAT);
+ CvtFPCompare(cu, call_inst, Instruction::CMPG_FLOAT);
break;
case greenland::IntrinsicHelper::CmplDouble:
- CvtFPCompare(cUnit, callInst, Instruction::CMPL_DOUBLE);
+ CvtFPCompare(cu, call_inst, Instruction::CMPL_DOUBLE);
break;
case greenland::IntrinsicHelper::CmpgDouble:
- CvtFPCompare(cUnit, callInst, Instruction::CMPG_DOUBLE);
+ CvtFPCompare(cu, call_inst, Instruction::CMPG_DOUBLE);
break;
case greenland::IntrinsicHelper::CmpLong:
- CvtLongCompare(cUnit, callInst);
+ CvtLongCompare(cu, call_inst);
break;
case greenland::IntrinsicHelper::SHLLong:
- CvtShiftOp(cUnit, Instruction::SHL_LONG, callInst);
+ CvtShiftOp(cu, Instruction::SHL_LONG, call_inst);
break;
case greenland::IntrinsicHelper::SHRLong:
- CvtShiftOp(cUnit, Instruction::SHR_LONG, callInst);
+ CvtShiftOp(cu, Instruction::SHR_LONG, call_inst);
break;
case greenland::IntrinsicHelper::USHRLong:
- CvtShiftOp(cUnit, Instruction::USHR_LONG, callInst);
+ CvtShiftOp(cu, Instruction::USHR_LONG, call_inst);
break;
case greenland::IntrinsicHelper::SHLInt:
- CvtShiftOp(cUnit, Instruction::SHL_INT, callInst);
+ CvtShiftOp(cu, Instruction::SHL_INT, call_inst);
break;
case greenland::IntrinsicHelper::SHRInt:
- CvtShiftOp(cUnit, Instruction::SHR_INT, callInst);
+ CvtShiftOp(cu, Instruction::SHR_INT, call_inst);
break;
case greenland::IntrinsicHelper::USHRInt:
- CvtShiftOp(cUnit, Instruction::USHR_INT, callInst);
+ CvtShiftOp(cu, Instruction::USHR_INT, call_inst);
break;
case greenland::IntrinsicHelper::CatchTargets: {
- llvm::SwitchInst* swInst =
- llvm::dyn_cast<llvm::SwitchInst>(nextIt);
- DCHECK(swInst != NULL);
+ llvm::SwitchInst* sw_inst =
+ llvm::dyn_cast<llvm::SwitchInst>(next_it);
+ DCHECK(sw_inst != NULL);
/*
* Discard the edges and the following conditional branch.
* Do a direct branch to the default target (which is the
* "work" portion of the pair.
* TODO: awful code layout - rework
*/
- llvm::BasicBlock* targetBB = swInst->getDefaultDest();
- DCHECK(targetBB != NULL);
- OpUnconditionalBranch(cUnit,
- cUnit->blockToLabelMap.Get(targetBB));
+ llvm::BasicBlock* target_bb = sw_inst->getDefaultDest();
+ DCHECK(target_bb != NULL);
+ OpUnconditionalBranch(cu,
+ cu->block_to_label_map.Get(target_bb));
++it;
// Set next bb to default target - improves code layout
- nextBB = targetBB;
+ next_bb = target_bb;
}
break;
default:
- LOG(FATAL) << "Unexpected intrinsic " << cUnit->intrinsic_helper->GetName(id);
+ LOG(FATAL) << "Unexpected intrinsic " << cu->intrinsic_helper->GetName(id);
}
}
break;
- case llvm::Instruction::Br: CvtBr(cUnit, inst); break;
- case llvm::Instruction::Add: CvtBinOp(cUnit, kOpAdd, inst); break;
- case llvm::Instruction::Sub: CvtBinOp(cUnit, kOpSub, inst); break;
- case llvm::Instruction::Mul: CvtBinOp(cUnit, kOpMul, inst); break;
- case llvm::Instruction::SDiv: CvtBinOp(cUnit, kOpDiv, inst); break;
- case llvm::Instruction::SRem: CvtBinOp(cUnit, kOpRem, inst); break;
- case llvm::Instruction::And: CvtBinOp(cUnit, kOpAnd, inst); break;
- case llvm::Instruction::Or: CvtBinOp(cUnit, kOpOr, inst); break;
- case llvm::Instruction::Xor: CvtBinOp(cUnit, kOpXor, inst); break;
- case llvm::Instruction::PHI: CvtPhi(cUnit, inst); break;
- case llvm::Instruction::Ret: CvtRet(cUnit, inst); break;
- case llvm::Instruction::FAdd: CvtBinFPOp(cUnit, kOpAdd, inst); break;
- case llvm::Instruction::FSub: CvtBinFPOp(cUnit, kOpSub, inst); break;
- case llvm::Instruction::FMul: CvtBinFPOp(cUnit, kOpMul, inst); break;
- case llvm::Instruction::FDiv: CvtBinFPOp(cUnit, kOpDiv, inst); break;
- case llvm::Instruction::FRem: CvtBinFPOp(cUnit, kOpRem, inst); break;
- case llvm::Instruction::SIToFP: CvtIntToFP(cUnit, inst); break;
- case llvm::Instruction::FPTrunc: CvtDoubleToFloat(cUnit, inst); break;
- case llvm::Instruction::FPExt: CvtFloatToDouble(cUnit, inst); break;
- case llvm::Instruction::Trunc: CvtTrunc(cUnit, inst); break;
+ case llvm::Instruction::Br: CvtBr(cu, inst); break;
+ case llvm::Instruction::Add: CvtBinOp(cu, kOpAdd, inst); break;
+ case llvm::Instruction::Sub: CvtBinOp(cu, kOpSub, inst); break;
+ case llvm::Instruction::Mul: CvtBinOp(cu, kOpMul, inst); break;
+ case llvm::Instruction::SDiv: CvtBinOp(cu, kOpDiv, inst); break;
+ case llvm::Instruction::SRem: CvtBinOp(cu, kOpRem, inst); break;
+ case llvm::Instruction::And: CvtBinOp(cu, kOpAnd, inst); break;
+ case llvm::Instruction::Or: CvtBinOp(cu, kOpOr, inst); break;
+ case llvm::Instruction::Xor: CvtBinOp(cu, kOpXor, inst); break;
+ case llvm::Instruction::PHI: CvtPhi(cu, inst); break;
+ case llvm::Instruction::Ret: CvtRet(cu, inst); break;
+ case llvm::Instruction::FAdd: CvtBinFPOp(cu, kOpAdd, inst); break;
+ case llvm::Instruction::FSub: CvtBinFPOp(cu, kOpSub, inst); break;
+ case llvm::Instruction::FMul: CvtBinFPOp(cu, kOpMul, inst); break;
+ case llvm::Instruction::FDiv: CvtBinFPOp(cu, kOpDiv, inst); break;
+ case llvm::Instruction::FRem: CvtBinFPOp(cu, kOpRem, inst); break;
+ case llvm::Instruction::SIToFP: CvtIntToFP(cu, inst); break;
+ case llvm::Instruction::FPTrunc: CvtDoubleToFloat(cu, inst); break;
+ case llvm::Instruction::FPExt: CvtFloatToDouble(cu, inst); break;
+ case llvm::Instruction::Trunc: CvtTrunc(cu, inst); break;
- case llvm::Instruction::ZExt: CvtIntExt(cUnit, inst, false /* signed */);
+ case llvm::Instruction::ZExt: CvtIntExt(cu, inst, false /* signed */);
break;
- case llvm::Instruction::SExt: CvtIntExt(cUnit, inst, true /* signed */);
+ case llvm::Instruction::SExt: CvtIntExt(cu, inst, true /* signed */);
break;
- case llvm::Instruction::Switch: CvtSwitch(cUnit, inst); break;
+ case llvm::Instruction::Switch: CvtSwitch(cu, inst); break;
case llvm::Instruction::Unreachable:
break; // FIXME: can we really ignore these?
@@ -3351,12 +3351,12 @@
}
}
- if (headLIR != NULL) {
- ApplyLocalOptimizations(cUnit, headLIR, cUnit->lastLIRInsn);
+ if (head_lir != NULL) {
+ ApplyLocalOptimizations(cu, head_lir, cu->last_lir_insn);
}
- if (nextBB != NULL) {
- bb = nextBB;
- nextBB = NULL;
+ if (next_bb != NULL) {
+ bb = next_bb;
+ next_bb = NULL;
}
}
return false;
@@ -3369,45 +3369,45 @@
* o Perform a basic-block optimization pass to remove unnecessary
* store/load sequences.
* o Convert the LLVM Value operands into RegLocations where applicable.
- * o Create ssaRep def/use operand arrays for each converted LLVM opcode
+ * o Create ssa_rep def/use operand arrays for each converted LLVM opcode
* o Perform register promotion
* o Iterate through the graph a basic block at a time, generating
* LIR.
* o Assemble LIR as usual.
* o Profit.
*/
-void MethodBitcode2LIR(CompilationUnit* cUnit)
+void MethodBitcode2LIR(CompilationUnit* cu)
{
- llvm::Function* func = cUnit->func;
- int numBasicBlocks = func->getBasicBlockList().size();
+ llvm::Function* func = cu->func;
+ int num_basic_blocks = func->getBasicBlockList().size();
// Allocate a list for LIR basic block labels
- cUnit->blockLabelList =
- static_cast<LIR*>(NewMem(cUnit, sizeof(LIR) * numBasicBlocks, true, kAllocLIR));
- LIR* labelList = cUnit->blockLabelList;
- int nextLabel = 0;
+ cu->block_label_list =
+ static_cast<LIR*>(NewMem(cu, sizeof(LIR) * num_basic_blocks, true, kAllocLIR));
+ LIR* label_list = cu->block_label_list;
+ int next_label = 0;
for (llvm::Function::iterator i = func->begin(),
e = func->end(); i != e; ++i) {
- cUnit->blockToLabelMap.Put(static_cast<llvm::BasicBlock*>(i),
- &labelList[nextLabel++]);
+ cu->block_to_label_map.Put(static_cast<llvm::BasicBlock*>(i),
+ &label_list[next_label++]);
}
/*
- * Keep honest - clear regLocations, Value => RegLocation,
+ * Keep honest - clear reg_locations, Value => RegLocation,
* promotion map and VmapTables.
*/
- cUnit->locMap.clear(); // Start fresh
- cUnit->regLocation = NULL;
- for (int i = 0; i < cUnit->numDalvikRegisters + cUnit->numCompilerTemps + 1;
+ cu->loc_map.clear(); // Start fresh
+ cu->reg_location = NULL;
+ for (int i = 0; i < cu->num_dalvik_registers + cu->num_compiler_temps + 1;
i++) {
- cUnit->promotionMap[i].coreLocation = kLocDalvikFrame;
- cUnit->promotionMap[i].fpLocation = kLocDalvikFrame;
+ cu->promotion_map[i].core_location = kLocDalvikFrame;
+ cu->promotion_map[i].fp_location = kLocDalvikFrame;
}
- cUnit->coreSpillMask = 0;
- cUnit->numCoreSpills = 0;
- cUnit->fpSpillMask = 0;
- cUnit->numFPSpills = 0;
- cUnit->coreVmapTable.clear();
- cUnit->fpVmapTable.clear();
+ cu->core_spill_mask = 0;
+ cu->num_core_spills = 0;
+ cu->fp_spill_mask = 0;
+ cu->num_fp_spills = 0;
+ cu->core_vmap_table.clear();
+ cu->fp_vmap_table.clear();
/*
* At this point, we've lost all knowledge of register promotion.
@@ -3418,99 +3418,99 @@
*/
for (llvm::inst_iterator i = llvm::inst_begin(func),
e = llvm::inst_end(func); i != e; ++i) {
- llvm::CallInst* callInst = llvm::dyn_cast<llvm::CallInst>(&*i);
- if (callInst != NULL) {
- llvm::Function* callee = callInst->getCalledFunction();
+ llvm::CallInst* call_inst = llvm::dyn_cast<llvm::CallInst>(&*i);
+ if (call_inst != NULL) {
+ llvm::Function* callee = call_inst->getCalledFunction();
greenland::IntrinsicHelper::IntrinsicId id =
- cUnit->intrinsic_helper->GetIntrinsicId(callee);
+ cu->intrinsic_helper->GetIntrinsicId(callee);
if (id == greenland::IntrinsicHelper::MethodInfo) {
- if (cUnit->printMe) {
+ if (cu->verbose) {
LOG(INFO) << "Found MethodInfo";
}
- llvm::MDNode* regInfoNode = callInst->getMetadata("RegInfo");
- if (regInfoNode != NULL) {
- llvm::ConstantInt* numInsValue =
- static_cast<llvm::ConstantInt*>(regInfoNode->getOperand(0));
- llvm::ConstantInt* numRegsValue =
- static_cast<llvm::ConstantInt*>(regInfoNode->getOperand(1));
- llvm::ConstantInt* numOutsValue =
- static_cast<llvm::ConstantInt*>(regInfoNode->getOperand(2));
- llvm::ConstantInt* numCompilerTempsValue =
- static_cast<llvm::ConstantInt*>(regInfoNode->getOperand(3));
- llvm::ConstantInt* numSSARegsValue =
- static_cast<llvm::ConstantInt*>(regInfoNode->getOperand(4));
- if (cUnit->printMe) {
- LOG(INFO) << "RegInfo - Ins:" << numInsValue->getZExtValue()
- << ", Regs:" << numRegsValue->getZExtValue()
- << ", Outs:" << numOutsValue->getZExtValue()
- << ", CTemps:" << numCompilerTempsValue->getZExtValue()
- << ", SSARegs:" << numSSARegsValue->getZExtValue();
+ llvm::MDNode* reg_info_node = call_inst->getMetadata("RegInfo");
+ if (reg_info_node != NULL) {
+ llvm::ConstantInt* num_ins_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(0));
+ llvm::ConstantInt* num_regs_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(1));
+ llvm::ConstantInt* num_outs_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(2));
+ llvm::ConstantInt* num_compiler_temps_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(3));
+ llvm::ConstantInt* num_ssa_regs_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(4));
+ if (cu->verbose) {
+ LOG(INFO) << "RegInfo - Ins:" << num_ins_value->getZExtValue()
+ << ", Regs:" << num_regs_value->getZExtValue()
+ << ", Outs:" << num_outs_value->getZExtValue()
+ << ", CTemps:" << num_compiler_temps_value->getZExtValue()
+ << ", SSARegs:" << num_ssa_regs_value->getZExtValue();
}
}
- llvm::MDNode* pmapInfoNode = callInst->getMetadata("PromotionMap");
- if (pmapInfoNode != NULL) {
- int elems = pmapInfoNode->getNumOperands();
- if (cUnit->printMe) {
+ llvm::MDNode* pmap_info_node = call_inst->getMetadata("PromotionMap");
+ if (pmap_info_node != NULL) {
+ int elems = pmap_info_node->getNumOperands();
+ if (cu->verbose) {
LOG(INFO) << "PMap size: " << elems;
}
for (int i = 0; i < elems; i++) {
- llvm::ConstantInt* rawMapData =
- static_cast<llvm::ConstantInt*>(pmapInfoNode->getOperand(i));
- uint32_t mapData = rawMapData->getZExtValue();
- PromotionMap* p = &cUnit->promotionMap[i];
- p->firstInPair = (mapData >> 24) & 0xff;
- p->FpReg = (mapData >> 16) & 0xff;
- p->coreReg = (mapData >> 8) & 0xff;
- p->fpLocation = static_cast<RegLocationType>((mapData >> 4) & 0xf);
- if (p->fpLocation == kLocPhysReg) {
- RecordFpPromotion(cUnit, p->FpReg, i);
+ llvm::ConstantInt* raw_map_data =
+ static_cast<llvm::ConstantInt*>(pmap_info_node->getOperand(i));
+ uint32_t map_data = raw_map_data->getZExtValue();
+ PromotionMap* p = &cu->promotion_map[i];
+ p->first_in_pair = (map_data >> 24) & 0xff;
+ p->FpReg = (map_data >> 16) & 0xff;
+ p->core_reg = (map_data >> 8) & 0xff;
+ p->fp_location = static_cast<RegLocationType>((map_data >> 4) & 0xf);
+ if (p->fp_location == kLocPhysReg) {
+ RecordFpPromotion(cu, p->FpReg, i);
}
- p->coreLocation = static_cast<RegLocationType>(mapData & 0xf);
- if (p->coreLocation == kLocPhysReg) {
- RecordCorePromotion(cUnit, p->coreReg, i);
+ p->core_location = static_cast<RegLocationType>(map_data & 0xf);
+ if (p->core_location == kLocPhysReg) {
+ RecordCorePromotion(cu, p->core_reg, i);
}
}
- if (cUnit->printMe) {
- DumpPromotionMap(cUnit);
+ if (cu->verbose) {
+ DumpPromotionMap(cu);
}
}
break;
}
}
}
- AdjustSpillMask(cUnit);
- cUnit->frameSize = ComputeFrameSize(cUnit);
+ AdjustSpillMask(cu);
+ cu->frame_size = ComputeFrameSize(cu);
// Create RegLocations for arguments
- llvm::Function::arg_iterator it(cUnit->func->arg_begin());
- llvm::Function::arg_iterator it_end(cUnit->func->arg_end());
+ llvm::Function::arg_iterator it(cu->func->arg_begin());
+ llvm::Function::arg_iterator it_end(cu->func->arg_end());
for (; it != it_end; ++it) {
llvm::Value* val = it;
- CreateLocFromValue(cUnit, val);
+ CreateLocFromValue(cu, val);
}
// Create RegLocations for all non-argument defintions
for (llvm::inst_iterator i = llvm::inst_begin(func),
e = llvm::inst_end(func); i != e; ++i) {
llvm::Value* val = &*i;
if (val->hasName() && (val->getName().str().c_str()[0] == 'v')) {
- CreateLocFromValue(cUnit, val);
+ CreateLocFromValue(cu, val);
}
}
// Walk the blocks, generating code.
- for (llvm::Function::iterator i = cUnit->func->begin(),
- e = cUnit->func->end(); i != e; ++i) {
- BitcodeBlockCodeGen(cUnit, static_cast<llvm::BasicBlock*>(i));
+ for (llvm::Function::iterator i = cu->func->begin(),
+ e = cu->func->end(); i != e; ++i) {
+ BitcodeBlockCodeGen(cu, static_cast<llvm::BasicBlock*>(i));
}
- HandleSuspendLaunchPads(cUnit);
+ HandleSuspendLaunchPads(cu);
- HandleThrowLaunchPads(cUnit);
+ HandleThrowLaunchPads(cu);
- HandleIntrinsicLaunchPads(cUnit);
+ HandleIntrinsicLaunchPads(cu);
- cUnit->func->eraseFromParent();
- cUnit->func = NULL;
+ cu->func->eraseFromParent();
+ cu->func = NULL;
}
diff --git a/src/compiler/codegen/method_bitcode.h b/src/compiler/codegen/method_bitcode.h
index 1d6a1c9..df4f4d4 100644
--- a/src/compiler/codegen/method_bitcode.h
+++ b/src/compiler/codegen/method_bitcode.h
@@ -19,8 +19,8 @@
namespace art {
-void MethodMIR2Bitcode(CompilationUnit* cUnit);
-void MethodBitcode2LIR(CompilationUnit* cUnit);
+void MethodMIR2Bitcode(CompilationUnit* cu);
+void MethodBitcode2LIR(CompilationUnit* cu);
} // namespace art
diff --git a/src/compiler/codegen/method_codegen_driver.cc b/src/compiler/codegen/method_codegen_driver.cc
index 3808a35..9f7f692 100644
--- a/src/compiler/codegen/method_codegen_driver.cc
+++ b/src/compiler/codegen/method_codegen_driver.cc
@@ -23,35 +23,35 @@
namespace art {
-// TODO: unify badLoc
-const RegLocation badLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+// TODO: unify bad_loc
+const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
INVALID_REG, INVALID_REG, INVALID_SREG,
INVALID_SREG};
/* Mark register usage state and return long retloc */
-RegLocation GetReturnWide(CompilationUnit* cUnit, bool isDouble)
+RegLocation GetReturnWide(CompilationUnit* cu, bool is_double)
{
RegLocation gpr_res = LocCReturnWide();
RegLocation fpr_res = LocCReturnDouble();
- RegLocation res = isDouble ? fpr_res : gpr_res;
- Clobber(cUnit, res.lowReg);
- Clobber(cUnit, res.highReg);
- LockTemp(cUnit, res.lowReg);
- LockTemp(cUnit, res.highReg);
- MarkPair(cUnit, res.lowReg, res.highReg);
+ RegLocation res = is_double ? fpr_res : gpr_res;
+ Clobber(cu, res.low_reg);
+ Clobber(cu, res.high_reg);
+ LockTemp(cu, res.low_reg);
+ LockTemp(cu, res.high_reg);
+ MarkPair(cu, res.low_reg, res.high_reg);
return res;
}
-RegLocation GetReturn(CompilationUnit* cUnit, bool isFloat)
+RegLocation GetReturn(CompilationUnit* cu, bool is_float)
{
RegLocation gpr_res = LocCReturn();
RegLocation fpr_res = LocCReturnFloat();
- RegLocation res = isFloat ? fpr_res : gpr_res;
- Clobber(cUnit, res.lowReg);
- if (cUnit->instructionSet == kMips) {
- MarkInUse(cUnit, res.lowReg);
+ RegLocation res = is_float ? fpr_res : gpr_res;
+ Clobber(cu, res.low_reg);
+ if (cu->instruction_set == kMips) {
+ MarkInUse(cu, res.low_reg);
} else {
- LockTemp(cUnit, res.lowReg);
+ LockTemp(cu, res.low_reg);
}
return res;
}
@@ -61,53 +61,53 @@
* load/store utilities here, or target-dependent genXX() handlers
* when necessary.
*/
-static bool CompileDalvikInstruction(CompilationUnit* cUnit, MIR* mir, BasicBlock* bb,
- LIR* labelList)
+static bool CompileDalvikInstruction(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
+ LIR* label_list)
{
bool res = false; // Assume success
- RegLocation rlSrc[3];
- RegLocation rlDest = badLoc;
- RegLocation rlResult = badLoc;
+ RegLocation rl_src[3];
+ RegLocation rl_dest = bad_loc;
+ RegLocation rl_result = bad_loc;
Instruction::Code opcode = mir->dalvikInsn.opcode;
- int optFlags = mir->optimizationFlags;
+ int opt_flags = mir->optimization_flags;
uint32_t vB = mir->dalvikInsn.vB;
uint32_t vC = mir->dalvikInsn.vC;
/* Prep Src and Dest locations */
- int nextSreg = 0;
- int nextLoc = 0;
- int attrs = oatDataFlowAttributes[opcode];
- rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
+ int next_sreg = 0;
+ int next_loc = 0;
+ int attrs = oat_data_flow_attributes[opcode];
+ rl_src[0] = rl_src[1] = rl_src[2] = bad_loc;
if (attrs & DF_UA) {
if (attrs & DF_A_WIDE) {
- rlSrc[nextLoc++] = GetSrcWide(cUnit, mir, nextSreg);
- nextSreg+= 2;
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
} else {
- rlSrc[nextLoc++] = GetSrc(cUnit, mir, nextSreg);
- nextSreg++;
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
}
}
if (attrs & DF_UB) {
if (attrs & DF_B_WIDE) {
- rlSrc[nextLoc++] = GetSrcWide(cUnit, mir, nextSreg);
- nextSreg+= 2;
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
} else {
- rlSrc[nextLoc++] = GetSrc(cUnit, mir, nextSreg);
- nextSreg++;
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
}
}
if (attrs & DF_UC) {
if (attrs & DF_C_WIDE) {
- rlSrc[nextLoc++] = GetSrcWide(cUnit, mir, nextSreg);
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
} else {
- rlSrc[nextLoc++] = GetSrc(cUnit, mir, nextSreg);
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
}
}
if (attrs & DF_DA) {
if (attrs & DF_A_WIDE) {
- rlDest = GetDestWide(cUnit, mir);
+ rl_dest = GetDestWide(cu, mir);
} else {
- rlDest = GetDest(cUnit, mir);
+ rl_dest = GetDest(cu, mir);
}
}
switch (opcode) {
@@ -115,41 +115,41 @@
break;
case Instruction::MOVE_EXCEPTION:
- GenMoveException(cUnit, rlDest);
+ GenMoveException(cu, rl_dest);
break;
case Instruction::RETURN_VOID:
- if (!(cUnit->attrs & METHOD_IS_LEAF)) {
- GenSuspendTest(cUnit, optFlags);
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ GenSuspendTest(cu, opt_flags);
}
break;
case Instruction::RETURN:
case Instruction::RETURN_OBJECT:
- if (!(cUnit->attrs & METHOD_IS_LEAF)) {
- GenSuspendTest(cUnit, optFlags);
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ GenSuspendTest(cu, opt_flags);
}
- StoreValue(cUnit, GetReturn(cUnit, cUnit->shorty[0] == 'F'), rlSrc[0]);
+ StoreValue(cu, GetReturn(cu, cu->shorty[0] == 'F'), rl_src[0]);
break;
case Instruction::RETURN_WIDE:
- if (!(cUnit->attrs & METHOD_IS_LEAF)) {
- GenSuspendTest(cUnit, optFlags);
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ GenSuspendTest(cu, opt_flags);
}
- StoreValueWide(cUnit, GetReturnWide(cUnit,
- cUnit->shorty[0] == 'D'), rlSrc[0]);
+ StoreValueWide(cu, GetReturnWide(cu,
+ cu->shorty[0] == 'D'), rl_src[0]);
break;
case Instruction::MOVE_RESULT_WIDE:
- if (optFlags & MIR_INLINED)
+ if (opt_flags & MIR_INLINED)
break; // Nop - combined w/ previous invoke
- StoreValueWide(cUnit, rlDest, GetReturnWide(cUnit, rlDest.fp));
+ StoreValueWide(cu, rl_dest, GetReturnWide(cu, rl_dest.fp));
break;
case Instruction::MOVE_RESULT:
case Instruction::MOVE_RESULT_OBJECT:
- if (optFlags & MIR_INLINED)
+ if (opt_flags & MIR_INLINED)
break; // Nop - combined w/ previous invoke
- StoreValue(cUnit, rlDest, GetReturn(cUnit, rlDest.fp));
+ StoreValue(cu, rl_dest, GetReturn(cu, rl_dest.fp));
break;
case Instruction::MOVE:
@@ -158,140 +158,140 @@
case Instruction::MOVE_OBJECT_16:
case Instruction::MOVE_FROM16:
case Instruction::MOVE_OBJECT_FROM16:
- StoreValue(cUnit, rlDest, rlSrc[0]);
+ StoreValue(cu, rl_dest, rl_src[0]);
break;
case Instruction::MOVE_WIDE:
case Instruction::MOVE_WIDE_16:
case Instruction::MOVE_WIDE_FROM16:
- StoreValueWide(cUnit, rlDest, rlSrc[0]);
+ StoreValueWide(cu, rl_dest, rl_src[0]);
break;
case Instruction::CONST:
case Instruction::CONST_4:
case Instruction::CONST_16:
- rlResult = EvalLoc(cUnit, rlDest, kAnyReg, true);
- LoadConstantNoClobber(cUnit, rlResult.lowReg, vB);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ LoadConstantNoClobber(cu, rl_result.low_reg, vB);
+ StoreValue(cu, rl_dest, rl_result);
break;
case Instruction::CONST_HIGH16:
- rlResult = EvalLoc(cUnit, rlDest, kAnyReg, true);
- LoadConstantNoClobber(cUnit, rlResult.lowReg, vB << 16);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ LoadConstantNoClobber(cu, rl_result.low_reg, vB << 16);
+ StoreValue(cu, rl_dest, rl_result);
break;
case Instruction::CONST_WIDE_16:
case Instruction::CONST_WIDE_32:
- rlResult = EvalLoc(cUnit, rlDest, kAnyReg, true);
- LoadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg, vB,
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg, vB,
(vB & 0x80000000) ? -1 : 0);
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
break;
case Instruction::CONST_WIDE:
- rlResult = EvalLoc(cUnit, rlDest, kAnyReg, true);
- LoadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
mir->dalvikInsn.vB_wide & 0xffffffff,
(mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
break;
case Instruction::CONST_WIDE_HIGH16:
- rlResult = EvalLoc(cUnit, rlDest, kAnyReg, true);
- LoadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
0, vB << 16);
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
break;
case Instruction::MONITOR_ENTER:
- GenMonitorEnter(cUnit, optFlags, rlSrc[0]);
+ GenMonitorEnter(cu, opt_flags, rl_src[0]);
break;
case Instruction::MONITOR_EXIT:
- GenMonitorExit(cUnit, optFlags, rlSrc[0]);
+ GenMonitorExit(cu, opt_flags, rl_src[0]);
break;
case Instruction::CHECK_CAST:
- GenCheckCast(cUnit, vB, rlSrc[0]);
+ GenCheckCast(cu, vB, rl_src[0]);
break;
case Instruction::INSTANCE_OF:
- GenInstanceof(cUnit, vC, rlDest, rlSrc[0]);
+ GenInstanceof(cu, vC, rl_dest, rl_src[0]);
break;
case Instruction::NEW_INSTANCE:
- GenNewInstance(cUnit, vB, rlDest);
+ GenNewInstance(cu, vB, rl_dest);
break;
case Instruction::THROW:
- GenThrow(cUnit, rlSrc[0]);
+ GenThrow(cu, rl_src[0]);
break;
case Instruction::ARRAY_LENGTH:
- int lenOffset;
- lenOffset = Array::LengthOffset().Int32Value();
- rlSrc[0] = LoadValue(cUnit, rlSrc[0], kCoreReg);
- GenNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg, optFlags);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset, rlResult.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ int len_offset;
+ len_offset = Array::LengthOffset().Int32Value();
+ rl_src[0] = LoadValue(cu, rl_src[0], kCoreReg);
+ GenNullCheck(cu, rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadWordDisp(cu, rl_src[0].low_reg, len_offset, rl_result.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
break;
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO:
- GenConstString(cUnit, vB, rlDest);
+ GenConstString(cu, vB, rl_dest);
break;
case Instruction::CONST_CLASS:
- GenConstClass(cUnit, vB, rlDest);
+ GenConstClass(cu, vB, rl_dest);
break;
case Instruction::FILL_ARRAY_DATA:
- GenFillArrayData(cUnit, vB, rlSrc[0]);
+ GenFillArrayData(cu, vB, rl_src[0]);
break;
case Instruction::FILLED_NEW_ARRAY:
- GenFilledNewArray(cUnit, NewMemCallInfo(cUnit, bb, mir, kStatic,
+ GenFilledNewArray(cu, NewMemCallInfo(cu, bb, mir, kStatic,
false /* not range */));
break;
case Instruction::FILLED_NEW_ARRAY_RANGE:
- GenFilledNewArray(cUnit, NewMemCallInfo(cUnit, bb, mir, kStatic,
+ GenFilledNewArray(cu, NewMemCallInfo(cu, bb, mir, kStatic,
true /* range */));
break;
case Instruction::NEW_ARRAY:
- GenNewArray(cUnit, vC, rlDest, rlSrc[0]);
+ GenNewArray(cu, vC, rl_dest, rl_src[0]);
break;
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32:
- if (bb->taken->startOffset <= mir->offset) {
- GenSuspendTestAndBranch(cUnit, optFlags, &labelList[bb->taken->id]);
+ if (bb->taken->start_offset <= mir->offset) {
+ GenSuspendTestAndBranch(cu, opt_flags, &label_list[bb->taken->id]);
} else {
- OpUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+ OpUnconditionalBranch(cu, &label_list[bb->taken->id]);
}
break;
case Instruction::PACKED_SWITCH:
- GenPackedSwitch(cUnit, vB, rlSrc[0]);
+ GenPackedSwitch(cu, vB, rl_src[0]);
break;
case Instruction::SPARSE_SWITCH:
- GenSparseSwitch(cUnit, vB, rlSrc[0]);
+ GenSparseSwitch(cu, vB, rl_src[0]);
break;
case Instruction::CMPL_FLOAT:
case Instruction::CMPG_FLOAT:
case Instruction::CMPL_DOUBLE:
case Instruction::CMPG_DOUBLE:
- res = GenCmpFP(cUnit, opcode, rlDest, rlSrc[0], rlSrc[1]);
+ res = GenCmpFP(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::CMP_LONG:
- GenCmpLong(cUnit, rlDest, rlSrc[0], rlSrc[1]);
+ GenCmpLong(cu, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::IF_EQ:
@@ -300,15 +300,15 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- LIR* taken = &labelList[bb->taken->id];
- LIR* fallThrough = &labelList[bb->fallThrough->id];
- bool backwardBranch;
- backwardBranch = (bb->taken->startOffset <= mir->offset);
- if (backwardBranch) {
- GenSuspendTest(cUnit, optFlags);
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* fall_through = &label_list[bb->fall_through->id];
+ bool backward_branch;
+ backward_branch = (bb->taken->start_offset <= mir->offset);
+ if (backward_branch) {
+ GenSuspendTest(cu, opt_flags);
}
- GenCompareAndBranch(cUnit, opcode, rlSrc[0], rlSrc[1], taken,
- fallThrough);
+ GenCompareAndBranch(cu, opcode, rl_src[0], rl_src[1], taken,
+ fall_through);
break;
}
@@ -318,128 +318,128 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- LIR* taken = &labelList[bb->taken->id];
- LIR* fallThrough = &labelList[bb->fallThrough->id];
- bool backwardBranch;
- backwardBranch = (bb->taken->startOffset <= mir->offset);
- if (backwardBranch) {
- GenSuspendTest(cUnit, optFlags);
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* fall_through = &label_list[bb->fall_through->id];
+ bool backward_branch;
+ backward_branch = (bb->taken->start_offset <= mir->offset);
+ if (backward_branch) {
+ GenSuspendTest(cu, opt_flags);
}
- GenCompareZeroAndBranch(cUnit, opcode, rlSrc[0], taken, fallThrough);
+ GenCompareZeroAndBranch(cu, opcode, rl_src[0], taken, fall_through);
break;
}
case Instruction::AGET_WIDE:
- GenArrayGet(cUnit, optFlags, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
+ GenArrayGet(cu, opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
break;
case Instruction::AGET:
case Instruction::AGET_OBJECT:
- GenArrayGet(cUnit, optFlags, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
+ GenArrayGet(cu, opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
break;
case Instruction::AGET_BOOLEAN:
- GenArrayGet(cUnit, optFlags, kUnsignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
+ GenArrayGet(cu, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
break;
case Instruction::AGET_BYTE:
- GenArrayGet(cUnit, optFlags, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
+ GenArrayGet(cu, opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
break;
case Instruction::AGET_CHAR:
- GenArrayGet(cUnit, optFlags, kUnsignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
+ GenArrayGet(cu, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
break;
case Instruction::AGET_SHORT:
- GenArrayGet(cUnit, optFlags, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
+ GenArrayGet(cu, opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
break;
case Instruction::APUT_WIDE:
- GenArrayPut(cUnit, optFlags, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
+ GenArrayPut(cu, opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3);
break;
case Instruction::APUT:
- GenArrayPut(cUnit, optFlags, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
+ GenArrayPut(cu, opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2);
break;
case Instruction::APUT_OBJECT:
- GenArrayObjPut(cUnit, optFlags, rlSrc[1], rlSrc[2], rlSrc[0], 2);
+ GenArrayObjPut(cu, opt_flags, rl_src[1], rl_src[2], rl_src[0], 2);
break;
case Instruction::APUT_SHORT:
case Instruction::APUT_CHAR:
- GenArrayPut(cUnit, optFlags, kUnsignedHalf, rlSrc[1], rlSrc[2], rlSrc[0], 1);
+ GenArrayPut(cu, opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1);
break;
case Instruction::APUT_BYTE:
case Instruction::APUT_BOOLEAN:
- GenArrayPut(cUnit, optFlags, kUnsignedByte, rlSrc[1], rlSrc[2],
- rlSrc[0], 0);
+ GenArrayPut(cu, opt_flags, kUnsignedByte, rl_src[1], rl_src[2],
+ rl_src[0], 0);
break;
case Instruction::IGET_OBJECT:
//case Instruction::IGET_OBJECT_VOLATILE:
- GenIGet(cUnit, vC, optFlags, kWord, rlDest, rlSrc[0], false, true);
+ GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
break;
case Instruction::IGET_WIDE:
//case Instruction::IGET_WIDE_VOLATILE:
- GenIGet(cUnit, vC, optFlags, kLong, rlDest, rlSrc[0], true, false);
+ GenIGet(cu, vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
break;
case Instruction::IGET:
//case Instruction::IGET_VOLATILE:
- GenIGet(cUnit, vC, optFlags, kWord, rlDest, rlSrc[0], false, false);
+ GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_CHAR:
- GenIGet(cUnit, vC, optFlags, kUnsignedHalf, rlDest, rlSrc[0], false, false);
+ GenIGet(cu, vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_SHORT:
- GenIGet(cUnit, vC, optFlags, kSignedHalf, rlDest, rlSrc[0], false, false);
+ GenIGet(cu, vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_BOOLEAN:
case Instruction::IGET_BYTE:
- GenIGet(cUnit, vC, optFlags, kUnsignedByte, rlDest, rlSrc[0], false, false);
+ GenIGet(cu, vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
break;
case Instruction::IPUT_WIDE:
//case Instruction::IPUT_WIDE_VOLATILE:
- GenIPut(cUnit, vC, optFlags, kLong, rlSrc[0], rlSrc[1], true, false);
+ GenIPut(cu, vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
break;
case Instruction::IPUT_OBJECT:
//case Instruction::IPUT_OBJECT_VOLATILE:
- GenIPut(cUnit, vC, optFlags, kWord, rlSrc[0], rlSrc[1], false, true);
+ GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
break;
case Instruction::IPUT:
//case Instruction::IPUT_VOLATILE:
- GenIPut(cUnit, vC, optFlags, kWord, rlSrc[0], rlSrc[1], false, false);
+ GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_BOOLEAN:
case Instruction::IPUT_BYTE:
- GenIPut(cUnit, vC, optFlags, kUnsignedByte, rlSrc[0], rlSrc[1], false, false);
+ GenIPut(cu, vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_CHAR:
- GenIPut(cUnit, vC, optFlags, kUnsignedHalf, rlSrc[0], rlSrc[1], false, false);
+ GenIPut(cu, vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_SHORT:
- GenIPut(cUnit, vC, optFlags, kSignedHalf, rlSrc[0], rlSrc[1], false, false);
+ GenIPut(cu, vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::SGET_OBJECT:
- GenSget(cUnit, vB, rlDest, false, true);
+ GenSget(cu, vB, rl_dest, false, true);
break;
case Instruction::SGET:
case Instruction::SGET_BOOLEAN:
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT:
- GenSget(cUnit, vB, rlDest, false, false);
+ GenSget(cu, vB, rl_dest, false, false);
break;
case Instruction::SGET_WIDE:
- GenSget(cUnit, vB, rlDest, true, false);
+ GenSget(cu, vB, rl_dest, true, false);
break;
case Instruction::SPUT_OBJECT:
- GenSput(cUnit, vB, rlSrc[0], false, true);
+ GenSput(cu, vB, rl_src[0], false, true);
break;
case Instruction::SPUT:
@@ -447,80 +447,80 @@
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT:
- GenSput(cUnit, vB, rlSrc[0], false, false);
+ GenSput(cu, vB, rl_src[0], false, false);
break;
case Instruction::SPUT_WIDE:
- GenSput(cUnit, vB, rlSrc[0], true, false);
+ GenSput(cu, vB, rl_src[0], true, false);
break;
case Instruction::INVOKE_STATIC_RANGE:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kStatic, true));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kStatic, true));
break;
case Instruction::INVOKE_STATIC:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kStatic, false));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kStatic, false));
break;
case Instruction::INVOKE_DIRECT:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kDirect, false));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kDirect, false));
break;
case Instruction::INVOKE_DIRECT_RANGE:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kDirect, true));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kDirect, true));
break;
case Instruction::INVOKE_VIRTUAL:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kVirtual, false));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kVirtual, false));
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kVirtual, true));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kVirtual, true));
break;
case Instruction::INVOKE_SUPER:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kSuper, false));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kSuper, false));
break;
case Instruction::INVOKE_SUPER_RANGE:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kSuper, true));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kSuper, true));
break;
case Instruction::INVOKE_INTERFACE:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kInterface, false));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kInterface, false));
break;
case Instruction::INVOKE_INTERFACE_RANGE:
- GenInvoke(cUnit, NewMemCallInfo(cUnit, bb, mir, kInterface, true));
+ GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kInterface, true));
break;
case Instruction::NEG_INT:
case Instruction::NOT_INT:
- res = GenArithOpInt(cUnit, opcode, rlDest, rlSrc[0], rlSrc[0]);
+ res = GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
break;
case Instruction::NEG_LONG:
case Instruction::NOT_LONG:
- res = GenArithOpLong(cUnit, opcode, rlDest, rlSrc[0], rlSrc[0]);
+ res = GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
break;
case Instruction::NEG_FLOAT:
- res = GenArithOpFloat(cUnit, opcode, rlDest, rlSrc[0], rlSrc[0]);
+ res = GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
break;
case Instruction::NEG_DOUBLE:
- res = GenArithOpDouble(cUnit, opcode, rlDest, rlSrc[0], rlSrc[0]);
+ res = GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
break;
case Instruction::INT_TO_LONG:
- GenIntToLong(cUnit, rlDest, rlSrc[0]);
+ GenIntToLong(cu, rl_dest, rl_src[0]);
break;
case Instruction::LONG_TO_INT:
- rlSrc[0] = UpdateLocWide(cUnit, rlSrc[0]);
- rlSrc[0] = WideToNarrow(cUnit, rlSrc[0]);
- StoreValue(cUnit, rlDest, rlSrc[0]);
+ rl_src[0] = UpdateLocWide(cu, rl_src[0]);
+ rl_src[0] = WideToNarrow(cu, rl_src[0]);
+ StoreValue(cu, rl_dest, rl_src[0]);
break;
case Instruction::INT_TO_BYTE:
case Instruction::INT_TO_SHORT:
case Instruction::INT_TO_CHAR:
- GenIntNarrowing(cUnit, opcode, rlDest, rlSrc[0]);
+ GenIntNarrowing(cu, opcode, rl_dest, rl_src[0]);
break;
case Instruction::INT_TO_FLOAT:
@@ -533,7 +533,7 @@
case Instruction::DOUBLE_TO_INT:
case Instruction::DOUBLE_TO_LONG:
case Instruction::DOUBLE_TO_FLOAT:
- GenConversion(cUnit, opcode, rlDest, rlSrc[0]);
+ GenConversion(cu, opcode, rl_dest, rl_src[0]);
break;
case Instruction::ADD_INT:
@@ -558,7 +558,7 @@
case Instruction::SHL_INT_2ADDR:
case Instruction::SHR_INT_2ADDR:
case Instruction::USHR_INT_2ADDR:
- GenArithOpInt(cUnit, opcode, rlDest, rlSrc[0], rlSrc[1]);
+ GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::ADD_LONG:
@@ -577,7 +577,7 @@
case Instruction::AND_LONG_2ADDR:
case Instruction::OR_LONG_2ADDR:
case Instruction::XOR_LONG_2ADDR:
- GenArithOpLong(cUnit, opcode, rlDest, rlSrc[0], rlSrc[1]);
+ GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::SHL_LONG:
@@ -586,7 +586,7 @@
case Instruction::SHL_LONG_2ADDR:
case Instruction::SHR_LONG_2ADDR:
case Instruction::USHR_LONG_2ADDR:
- GenShiftOpLong(cUnit, opcode, rlDest, rlSrc[0], rlSrc[1]);
+ GenShiftOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::ADD_FLOAT:
@@ -599,7 +599,7 @@
case Instruction::MUL_FLOAT_2ADDR:
case Instruction::DIV_FLOAT_2ADDR:
case Instruction::REM_FLOAT_2ADDR:
- GenArithOpFloat(cUnit, opcode, rlDest, rlSrc[0], rlSrc[1]);
+ GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::ADD_DOUBLE:
@@ -612,7 +612,7 @@
case Instruction::MUL_DOUBLE_2ADDR:
case Instruction::DIV_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE_2ADDR:
- GenArithOpDouble(cUnit, opcode, rlDest, rlSrc[0], rlSrc[1]);
+ GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
break;
case Instruction::RSUB_INT:
@@ -634,7 +634,7 @@
case Instruction::SHL_INT_LIT8:
case Instruction::SHR_INT_LIT8:
case Instruction::USHR_INT_LIT8:
- GenArithOpIntLit(cUnit, opcode, rlDest, rlSrc[0], vC);
+ GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], vC);
break;
default:
@@ -644,47 +644,47 @@
}
/* Extended MIR instructions like PHI */
-static void HandleExtendedMethodMIR(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
+static void HandleExtendedMethodMIR(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
{
- int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
+ int op_offset = mir->dalvikInsn.opcode - kMirOpFirst;
char* msg = NULL;
- if (cUnit->printMe) {
- msg = static_cast<char*>(NewMem(cUnit, strlen(extendedMIROpNames[opOffset]) + 1,
+ if (cu->verbose) {
+ msg = static_cast<char*>(NewMem(cu, strlen(extended_mir_op_names[op_offset]) + 1,
false, kAllocDebugInfo));
- strcpy(msg, extendedMIROpNames[opOffset]);
+ strcpy(msg, extended_mir_op_names[op_offset]);
}
- LIR* op = NewLIR1(cUnit, kPseudoExtended, reinterpret_cast<uintptr_t>(msg));
+ LIR* op = NewLIR1(cu, kPseudoExtended, reinterpret_cast<uintptr_t>(msg));
switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
case kMirOpPhi: {
- char* ssaString = NULL;
- if (cUnit->printMe) {
- ssaString = GetSSAString(cUnit, mir->ssaRep);
+ char* ssa_string = NULL;
+ if (cu->verbose) {
+ ssa_string = GetSSAString(cu, mir->ssa_rep);
}
- op->flags.isNop = true;
- NewLIR1(cUnit, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssaString));
+ op->flags.is_nop = true;
+ NewLIR1(cu, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssa_string));
break;
}
case kMirOpCopy: {
- RegLocation rlSrc = GetSrc(cUnit, mir, 0);
- RegLocation rlDest = GetDest(cUnit, mir);
- StoreValue(cUnit, rlDest, rlSrc);
+ RegLocation rl_src = GetSrc(cu, mir, 0);
+ RegLocation rl_dest = GetDest(cu, mir);
+ StoreValue(cu, rl_dest, rl_src);
break;
}
case kMirOpFusedCmplFloat:
- GenFusedFPCmpBranch(cUnit, bb, mir, false /*gt bias*/, false /*double*/);
+ GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, false /*double*/);
break;
case kMirOpFusedCmpgFloat:
- GenFusedFPCmpBranch(cUnit, bb, mir, true /*gt bias*/, false /*double*/);
+ GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, false /*double*/);
break;
case kMirOpFusedCmplDouble:
- GenFusedFPCmpBranch(cUnit, bb, mir, false /*gt bias*/, true /*double*/);
+ GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, true /*double*/);
break;
case kMirOpFusedCmpgDouble:
- GenFusedFPCmpBranch(cUnit, bb, mir, true /*gt bias*/, true /*double*/);
+ GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, true /*double*/);
break;
case kMirOpFusedCmpLong:
- GenFusedLongCmpBranch(cUnit, bb, mir);
+ GenFusedLongCmpBranch(cu, bb, mir);
break;
default:
break;
@@ -692,167 +692,167 @@
}
/* Handle the content in each basic block */
-static bool MethodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
+static bool MethodBlockCodeGen(CompilationUnit* cu, BasicBlock* bb)
{
- if (bb->blockType == kDead) return false;
- cUnit->currentDalvikOffset = bb->startOffset;
+ if (bb->block_type == kDead) return false;
+ cu->current_dalvik_offset = bb->start_offset;
MIR* mir;
- LIR* labelList = cUnit->blockLabelList;
- int blockId = bb->id;
+ LIR* label_list = cu->block_label_list;
+ int block_id = bb->id;
- cUnit->curBlock = bb;
- labelList[blockId].operands[0] = bb->startOffset;
+ cu->cur_block = bb;
+ label_list[block_id].operands[0] = bb->start_offset;
/* Insert the block label */
- labelList[blockId].opcode = kPseudoNormalBlockLabel;
- AppendLIR(cUnit, &labelList[blockId]);
+ label_list[block_id].opcode = kPseudoNormalBlockLabel;
+ AppendLIR(cu, &label_list[block_id]);
- LIR* headLIR = NULL;
+ LIR* head_lir = NULL;
/* If this is a catch block, export the start address */
- if (bb->catchEntry) {
- headLIR = NewLIR0(cUnit, kPseudoExportedPC);
+ if (bb->catch_entry) {
+ head_lir = NewLIR0(cu, kPseudoExportedPC);
}
/* Free temp registers and reset redundant store tracking */
- ResetRegPool(cUnit);
- ResetDefTracking(cUnit);
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
- ClobberAllRegs(cUnit);
+ ClobberAllRegs(cu);
- if (bb->blockType == kEntryBlock) {
- int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
- GenEntrySequence(cUnit, &cUnit->regLocation[startVReg],
- cUnit->regLocation[cUnit->methodSReg]);
- } else if (bb->blockType == kExitBlock) {
- GenExitSequence(cUnit);
+ if (bb->block_type == kEntryBlock) {
+ int start_vreg = cu->num_dalvik_registers - cu->num_ins;
+ GenEntrySequence(cu, &cu->reg_location[start_vreg],
+ cu->reg_location[cu->method_sreg]);
+ } else if (bb->block_type == kExitBlock) {
+ GenExitSequence(cu);
}
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- ResetRegPool(cUnit);
- if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
- ClobberAllRegs(cUnit);
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+ ResetRegPool(cu);
+ if (cu->disable_opt & (1 << kTrackLiveTemps)) {
+ ClobberAllRegs(cu);
}
- if (cUnit->disableOpt & (1 << kSuppressLoads)) {
- ResetDefTracking(cUnit);
+ if (cu->disable_opt & (1 << kSuppressLoads)) {
+ ResetDefTracking(cu);
}
#ifndef NDEBUG
/* Reset temp tracking sanity check */
- cUnit->liveSReg = INVALID_SREG;
+ cu->live_sreg = INVALID_SREG;
#endif
- cUnit->currentDalvikOffset = mir->offset;
+ cu->current_dalvik_offset = mir->offset;
int opcode = mir->dalvikInsn.opcode;
- LIR* boundaryLIR;
+ LIR* boundary_lir;
/* Mark the beginning of a Dalvik instruction for line tracking */
- char* instStr = cUnit->printMe ?
- GetDalvikDisassembly(cUnit, mir->dalvikInsn, "") : NULL;
- boundaryLIR = MarkBoundary(cUnit, mir->offset, instStr);
+ char* inst_str = cu->verbose ?
+ GetDalvikDisassembly(cu, mir->dalvikInsn, "") : NULL;
+ boundary_lir = MarkBoundary(cu, mir->offset, inst_str);
/* Remember the first LIR for this block */
- if (headLIR == NULL) {
- headLIR = boundaryLIR;
- /* Set the first boundaryLIR as a scheduling barrier */
- headLIR->defMask = ENCODE_ALL;
+ if (head_lir == NULL) {
+ head_lir = boundary_lir;
+ /* Set the first boundary_lir as a scheduling barrier */
+ head_lir->def_mask = ENCODE_ALL;
}
/* Don't generate the SSA annotation unless verbose mode is on */
- if (cUnit->printMe && mir->ssaRep) {
- char* ssaString = GetSSAString(cUnit, mir->ssaRep);
- NewLIR1(cUnit, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssaString));
+ if (cu->verbose && mir->ssa_rep) {
+ char* ssa_string = GetSSAString(cu, mir->ssa_rep);
+ NewLIR1(cu, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssa_string));
}
if (opcode == kMirOpCheck) {
// Combine check and work halves of throwing instruction.
- MIR* workHalf = mir->meta.throwInsn;
- mir->dalvikInsn.opcode = workHalf->dalvikInsn.opcode;
- opcode = workHalf->dalvikInsn.opcode;
- SSARepresentation* ssaRep = workHalf->ssaRep;
- workHalf->ssaRep = mir->ssaRep;
- mir->ssaRep = ssaRep;
- workHalf->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ MIR* work_half = mir->meta.throw_insn;
+ mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
+ opcode = work_half->dalvikInsn.opcode;
+ SSARepresentation* ssa_rep = work_half->ssa_rep;
+ work_half->ssa_rep = mir->ssa_rep;
+ mir->ssa_rep = ssa_rep;
+ work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
if (opcode >= kMirOpFirst) {
- HandleExtendedMethodMIR(cUnit, bb, mir);
+ HandleExtendedMethodMIR(cu, bb, mir);
continue;
}
- bool notHandled = CompileDalvikInstruction(cUnit, mir, bb, labelList);
- if (notHandled) {
+ bool not_handled = CompileDalvikInstruction(cu, mir, bb, label_list);
+ if (not_handled) {
LOG(FATAL) << StringPrintf("%#06x: Opcode %#x (%s)",
mir->offset, opcode,
Instruction::Name(mir->dalvikInsn.opcode));
}
}
- if (headLIR) {
+ if (head_lir) {
/*
* Eliminate redundant loads/stores and delay stores into later
* slots
*/
- ApplyLocalOptimizations(cUnit, headLIR, cUnit->lastLIRInsn);
+ ApplyLocalOptimizations(cu, head_lir, cu->last_lir_insn);
/*
* Generate an unconditional branch to the fallthrough block.
*/
- if (bb->fallThrough) {
- OpUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
+ if (bb->fall_through) {
+ OpUnconditionalBranch(cu, &label_list[bb->fall_through->id]);
}
}
return false;
}
-void SpecialMIR2LIR(CompilationUnit* cUnit, SpecialCaseHandler specialCase)
+void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case)
{
/* Find the first DalvikByteCode block */
- int numReachableBlocks = cUnit->numReachableBlocks;
- const GrowableList *blockList = &cUnit->blockList;
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ const GrowableList *block_list = &cu->block_list;
BasicBlock*bb = NULL;
- for (int idx = 0; idx < numReachableBlocks; idx++) {
- int dfsIndex = cUnit->dfsOrder.elemList[idx];
- bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, dfsIndex));
- if (bb->blockType == kDalvikByteCode) {
+ for (int idx = 0; idx < num_reachable_blocks; idx++) {
+ int dfs_index = cu->dfs_order.elem_list[idx];
+ bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dfs_index));
+ if (bb->block_type == kDalvikByteCode) {
break;
}
}
if (bb == NULL) {
return;
}
- DCHECK_EQ(bb->startOffset, 0);
- DCHECK(bb->firstMIRInsn != NULL);
+ DCHECK_EQ(bb->start_offset, 0);
+ DCHECK(bb->first_mir_insn != NULL);
/* Get the first instruction */
- MIR* mir = bb->firstMIRInsn;
+ MIR* mir = bb->first_mir_insn;
/* Free temp registers and reset redundant store tracking */
- ResetRegPool(cUnit);
- ResetDefTracking(cUnit);
- ClobberAllRegs(cUnit);
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ ClobberAllRegs(cu);
- GenSpecialCase(cUnit, bb, mir, specialCase);
+ GenSpecialCase(cu, bb, mir, special_case);
}
-void MethodMIR2LIR(CompilationUnit* cUnit)
+void MethodMIR2LIR(CompilationUnit* cu)
{
/* Used to hold the labels of each block */
- cUnit->blockLabelList =
- static_cast<LIR*>(NewMem(cUnit, sizeof(LIR) * cUnit->numBlocks, true, kAllocLIR));
+ cu->block_label_list =
+ static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->num_blocks, true, kAllocLIR));
- DataFlowAnalysisDispatcher(cUnit, MethodBlockCodeGen,
+ DataFlowAnalysisDispatcher(cu, MethodBlockCodeGen,
kPreOrderDFSTraversal, false /* Iterative */);
- HandleSuspendLaunchPads(cUnit);
+ HandleSuspendLaunchPads(cu);
- HandleThrowLaunchPads(cUnit);
+ HandleThrowLaunchPads(cu);
- HandleIntrinsicLaunchPads(cUnit);
+ HandleIntrinsicLaunchPads(cu);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations))) {
- RemoveRedundantBranches(cUnit);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations))) {
+ RemoveRedundantBranches(cu);
}
}
diff --git a/src/compiler/codegen/method_codegen_driver.h b/src/compiler/codegen/method_codegen_driver.h
index 5eb9b6e..4c0ffba 100644
--- a/src/compiler/codegen/method_codegen_driver.h
+++ b/src/compiler/codegen/method_codegen_driver.h
@@ -19,11 +19,11 @@
namespace art {
// TODO: move GenInvoke to gen_invoke.cc
-void GenInvoke(CompilationUnit* cUnit, CallInfo* info);
+void GenInvoke(CompilationUnit* cu, CallInfo* info);
// TODO: move GenInvoke to gen_invoke.cc or utils
-CallInfo* NewMemCallInfo(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir, InvokeType type, bool isRange);
-void SpecialMIR2LIR(CompilationUnit* cUnit, SpecialCaseHandler specialCase);
-void MethodMIR2LIR(CompilationUnit* cUnit);
+CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
+void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case);
+void MethodMIR2LIR(CompilationUnit* cu);
} // namespace art
diff --git a/src/compiler/codegen/mips/assemble_mips.cc b/src/compiler/codegen/mips/assemble_mips.cc
index 9bee7af..b80784f 100644
--- a/src/compiler/codegen/mips/assemble_mips.cc
+++ b/src/compiler/codegen/mips/assemble_mips.cc
@@ -458,12 +458,12 @@
* NOTE: An out-of-range bal isn't supported because it should
* never happen with the current PIC model.
*/
-static void ConvertShortToLongBranch(CompilationUnit* cUnit, LIR* lir)
+static void ConvertShortToLongBranch(CompilationUnit* cu, LIR* lir)
{
// For conditional branches we'll need to reverse the sense
bool unconditional = false;
int opcode = lir->opcode;
- int dalvikOffset = lir->dalvikOffset;
+ int dalvik_offset = lir->dalvik_offset;
switch (opcode) {
case kMipsBal:
LOG(FATAL) << "long branch and link unsupported";
@@ -481,31 +481,31 @@
default:
LOG(FATAL) << "Unexpected branch kind " << opcode;
}
- LIR* hopTarget = NULL;
+ LIR* hop_target = NULL;
if (!unconditional) {
- hopTarget = RawLIR(cUnit, dalvikOffset, kPseudoTargetLabel);
- LIR* hopBranch = RawLIR(cUnit, dalvikOffset, opcode, lir->operands[0],
- lir->operands[1], 0, 0, 0, hopTarget);
- InsertLIRBefore(lir, hopBranch);
+ hop_target = RawLIR(cu, dalvik_offset, kPseudoTargetLabel);
+ LIR* hop_branch = RawLIR(cu, dalvik_offset, opcode, lir->operands[0],
+ lir->operands[1], 0, 0, 0, hop_target);
+ InsertLIRBefore(lir, hop_branch);
}
- LIR* currPC = RawLIR(cUnit, dalvikOffset, kMipsCurrPC);
- InsertLIRBefore(lir, currPC);
- LIR* anchor = RawLIR(cUnit, dalvikOffset, kPseudoTargetLabel);
- LIR* deltaHi = RawLIR(cUnit, dalvikOffset, kMipsDeltaHi, r_AT, 0,
+ LIR* curr_pc = RawLIR(cu, dalvik_offset, kMipsCurrPC);
+ InsertLIRBefore(lir, curr_pc);
+ LIR* anchor = RawLIR(cu, dalvik_offset, kPseudoTargetLabel);
+ LIR* delta_hi = RawLIR(cu, dalvik_offset, kMipsDeltaHi, r_AT, 0,
reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
- InsertLIRBefore(lir, deltaHi);
+ InsertLIRBefore(lir, delta_hi);
InsertLIRBefore(lir, anchor);
- LIR* deltaLo = RawLIR(cUnit, dalvikOffset, kMipsDeltaLo, r_AT, 0,
+ LIR* delta_lo = RawLIR(cu, dalvik_offset, kMipsDeltaLo, r_AT, 0,
reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
- InsertLIRBefore(lir, deltaLo);
- LIR* addu = RawLIR(cUnit, dalvikOffset, kMipsAddu, r_AT, r_AT, r_RA);
+ InsertLIRBefore(lir, delta_lo);
+ LIR* addu = RawLIR(cu, dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
InsertLIRBefore(lir, addu);
- LIR* jr = RawLIR(cUnit, dalvikOffset, kMipsJr, r_AT);
+ LIR* jr = RawLIR(cu, dalvik_offset, kMipsJr, r_AT);
InsertLIRBefore(lir, jr);
if (!unconditional) {
- InsertLIRBefore(lir, hopTarget);
+ InsertLIRBefore(lir, hop_target);
}
- lir->flags.isNop = true;
+ lir->flags.is_nop = true;
}
/*
@@ -514,19 +514,19 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus AssembleInstructions(CompilationUnit *cUnit,
- uintptr_t startAddr)
+AssemblerStatus AssembleInstructions(CompilationUnit *cu,
+ uintptr_t start_addr)
{
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
- for (lir = cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+ for (lir = cu->first_lir_insn; lir; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
- if (lir->flags.isNop) {
+ if (lir->flags.is_nop) {
continue;
}
@@ -543,101 +543,101 @@
* then it is a Switch/Data table.
*/
int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
- int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
// Fits
lir->operands[1] = delta;
} else {
// Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
- LIR *newDeltaHi =
- RawLIR(cUnit, lir->dalvikOffset, kMipsDeltaHi,
+ LIR *new_delta_hi =
+ RawLIR(cu, lir->dalvik_offset, kMipsDeltaHi,
lir->operands[0], 0, lir->operands[2],
lir->operands[3], 0, lir->target);
- InsertLIRBefore(lir, newDeltaHi);
- LIR *newDeltaLo =
- RawLIR(cUnit, lir->dalvikOffset, kMipsDeltaLo,
+ InsertLIRBefore(lir, new_delta_hi);
+ LIR *new_delta_lo =
+ RawLIR(cu, lir->dalvik_offset, kMipsDeltaLo,
lir->operands[0], 0, lir->operands[2],
lir->operands[3], 0, lir->target);
- InsertLIRBefore(lir, newDeltaLo);
- LIR *newAddu =
- RawLIR(cUnit, lir->dalvikOffset, kMipsAddu,
+ InsertLIRBefore(lir, new_delta_lo);
+ LIR *new_addu =
+ RawLIR(cu, lir->dalvik_offset, kMipsAddu,
lir->operands[0], lir->operands[0], r_RA);
- InsertLIRBefore(lir, newAddu);
- lir->flags.isNop = true;
+ InsertLIRBefore(lir, new_addu);
+ lir->flags.is_nop = true;
res = kRetryAll;
}
} else if (lir->opcode == kMipsDeltaLo) {
int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
- int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = delta & 0xffff;
} else if (lir->opcode == kMipsDeltaHi) {
int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
- int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = (delta >> 16) & 0xffff;
} else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
}
if (delta > 131068 || delta < -131069) {
res = kRetryAll;
- ConvertShortToLongBranch(cUnit, lir);
+ ConvertShortToLongBranch(cu, lir);
} else {
lir->operands[0] = delta >> 2;
}
} else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
}
if (delta > 131068 || delta < -131069) {
res = kRetryAll;
- ConvertShortToLongBranch(cUnit, lir);
+ ConvertShortToLongBranch(cu, lir);
} else {
lir->operands[1] = delta >> 2;
}
} else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
}
if (delta > 131068 || delta < -131069) {
res = kRetryAll;
- ConvertShortToLongBranch(cUnit, lir);
+ ConvertShortToLongBranch(cu, lir);
} else {
lir->operands[2] = delta >> 2;
}
} else if (lir->opcode == kMipsJal) {
- uintptr_t curPC = (startAddr + lir->offset + 4) & ~3;
+ uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
uintptr_t target = lir->operands[0];
/* ensure PC-region branch can be used */
- DCHECK_EQ((curPC & 0xF0000000), (target & 0xF0000000));
+ DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
if (target & 0x3) {
LOG(FATAL) << "Jump target not multiple of 4: " << target;
}
lir->operands[0] = target >> 2;
} else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
- LIR *targetLIR = lir->target;
- uintptr_t target = startAddr + targetLIR->offset;
+ LIR *target_lir = lir->target;
+ uintptr_t target = start_addr + target_lir->offset;
lir->operands[1] = target >> 16;
} else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
- LIR *targetLIR = lir->target;
- uintptr_t target = startAddr + targetLIR->offset;
+ LIR *target_lir = lir->target;
+ uintptr_t target = start_addr + target_lir->offset;
lir->operands[2] = lir->operands[2] + target;
}
}
@@ -657,54 +657,54 @@
uint32_t operand;
uint32_t value;
operand = lir->operands[i];
- switch (encoder->fieldLoc[i].kind) {
+ switch (encoder->field_loc[i].kind) {
case kFmtUnused:
break;
case kFmtBitBlt:
- if (encoder->fieldLoc[i].start == 0 && encoder->fieldLoc[i].end == 31) {
+ if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
value = operand;
} else {
- value = (operand << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ value = (operand << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
}
bits |= value;
break;
case kFmtBlt5_2:
value = (operand & 0x1f);
- bits |= (value << encoder->fieldLoc[i].start);
- bits |= (value << encoder->fieldLoc[i].end);
+ bits |= (value << encoder->field_loc[i].start);
+ bits |= (value << encoder->field_loc[i].end);
break;
case kFmtDfp: {
DCHECK(MIPS_DOUBLEREG(operand));
DCHECK_EQ((operand & 0x1), 0U);
- value = ((operand & MIPS_FP_REG_MASK) << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
bits |= value;
break;
}
case kFmtSfp:
DCHECK(MIPS_SINGLEREG(operand));
- value = ((operand & MIPS_FP_REG_MASK) << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
bits |= value;
break;
default:
- LOG(FATAL) << "Bad encoder format: " << encoder->fieldLoc[i].kind;
+ LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
}
}
// We only support little-endian MIPS.
- cUnit->codeBuffer.push_back(bits & 0xff);
- cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
+ cu->code_buffer.push_back(bits & 0xff);
+ cu->code_buffer.push_back((bits >> 8) & 0xff);
+ cu->code_buffer.push_back((bits >> 16) & 0xff);
+ cu->code_buffer.push_back((bits >> 24) & 0xff);
// TUNING: replace with proper delay slot handling
if (encoder->size == 8) {
const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
uint32_t bits = encoder->skeleton;
- cUnit->codeBuffer.push_back(bits & 0xff);
- cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
+ cu->code_buffer.push_back(bits & 0xff);
+ cu->code_buffer.push_back((bits >> 8) & 0xff);
+ cu->code_buffer.push_back((bits >> 16) & 0xff);
+ cu->code_buffer.push_back((bits >> 24) & 0xff);
}
}
return res;
@@ -718,23 +718,23 @@
* Target-dependent offset assignment.
* independent.
*/
-int AssignInsnOffsets(CompilationUnit* cUnit)
+int AssignInsnOffsets(CompilationUnit* cu)
{
- LIR* mipsLIR;
+ LIR* mips_lir;
int offset = 0;
- for (mipsLIR = cUnit->firstLIRInsn; mipsLIR; mipsLIR = NEXT_LIR(mipsLIR)) {
- mipsLIR->offset = offset;
- if (mipsLIR->opcode >= 0) {
- if (!mipsLIR->flags.isNop) {
- offset += mipsLIR->flags.size;
+ for (mips_lir = cu->first_lir_insn; mips_lir; mips_lir = NEXT_LIR(mips_lir)) {
+ mips_lir->offset = offset;
+ if (mips_lir->opcode >= 0) {
+ if (!mips_lir->flags.is_nop) {
+ offset += mips_lir->flags.size;
}
- } else if (mipsLIR->opcode == kPseudoPseudoAlign4) {
+ } else if (mips_lir->opcode == kPseudoPseudoAlign4) {
if (offset & 0x2) {
offset += 2;
- mipsLIR->operands[0] = 1;
+ mips_lir->operands[0] = 1;
} else {
- mipsLIR->operands[0] = 0;
+ mips_lir->operands[0] = 0;
}
}
/* Pseudo opcodes don't consume space */
diff --git a/src/compiler/codegen/mips/call_mips.cc b/src/compiler/codegen/mips/call_mips.cc
index 33a7aed..b25b7e6 100644
--- a/src/compiler/codegen/mips/call_mips.cc
+++ b/src/compiler/codegen/mips/call_mips.cc
@@ -23,8 +23,8 @@
namespace art {
-void GenSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- SpecialCaseHandler specialCase)
+void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
{
// TODO
}
@@ -42,176 +42,176 @@
*
* The test loop will look something like:
*
- * ori rEnd, r_ZERO, #tableSize ; size in bytes
+ * ori rEnd, r_ZERO, #table_size ; size in bytes
* jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
* nop ; opportunistically fill
* BaseLabel:
* addiu rBase, r_RA, <table> - <BaseLabel> ; table relative to BaseLabel
addu rEnd, rEnd, rBase ; end of table
- * lw rVal, [rSP, vRegOff] ; Test Value
+ * lw r_val, [rSP, v_reg_off] ; Test Value
* loop:
* beq rBase, rEnd, done
- * lw rKey, 0(rBase)
+ * lw r_key, 0(rBase)
* addu rBase, 8
- * bne rVal, rKey, loop
- * lw rDisp, -4(rBase)
- * addu r_RA, rDisp
+ * bne r_val, r_key, loop
+ * lw r_disp, -4(rBase)
+ * addu r_RA, r_disp
* jr r_RA
* done:
*
*/
-void GenSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpSparseSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tabRec =
- static_cast<SwitchTable*>(NewMem(cUnit, sizeof(SwitchTable), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
int elements = table[1];
- tabRec->targets =
- static_cast<LIR**>(NewMem(cUnit, elements * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cUnit, &cUnit->switchTables, reinterpret_cast<uintptr_t>(tabRec));
+ tab_rec->targets =
+ static_cast<LIR**>(NewMem(cu, elements * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
// The table is composed of 8-byte key/disp pairs
- int byteSize = elements * 8;
+ int byte_size = elements * 8;
- int sizeHi = byteSize >> 16;
- int sizeLo = byteSize & 0xffff;
+ int size_hi = byte_size >> 16;
+ int size_lo = byte_size & 0xffff;
- int rEnd = AllocTemp(cUnit);
- if (sizeHi) {
- NewLIR2(cUnit, kMipsLui, rEnd, sizeHi);
+ int rEnd = AllocTemp(cu);
+ if (size_hi) {
+ NewLIR2(cu, kMipsLui, rEnd, size_hi);
}
// Must prevent code motion for the curr pc pair
- GenBarrier(cUnit); // Scheduling barrier
- NewLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ GenBarrier(cu); // Scheduling barrier
+ NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot
- if (sizeHi) {
- NewLIR3(cUnit, kMipsOri, rEnd, rEnd, sizeLo);
+ if (size_hi) {
+ NewLIR3(cu, kMipsOri, rEnd, rEnd, size_lo);
} else {
- NewLIR3(cUnit, kMipsOri, rEnd, r_ZERO, sizeLo);
+ NewLIR3(cu, kMipsOri, rEnd, r_ZERO, size_lo);
}
- GenBarrier(cUnit); // Scheduling barrier
+ GenBarrier(cu); // Scheduling barrier
// Construct BaseLabel and set up table base register
- LIR* baseLabel = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
// Remember base label so offsets can be computed later
- tabRec->anchor = baseLabel;
- int rBase = AllocTemp(cUnit);
- NewLIR4(cUnit, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(baseLabel),
- reinterpret_cast<uintptr_t>(tabRec));
- OpRegRegReg(cUnit, kOpAdd, rEnd, rEnd, rBase);
+ tab_rec->anchor = base_label;
+ int rBase = AllocTemp(cu);
+ NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+ reinterpret_cast<uintptr_t>(tab_rec));
+ OpRegRegReg(cu, kOpAdd, rEnd, rEnd, rBase);
// Grab switch test value
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
// Test loop
- int rKey = AllocTemp(cUnit);
- LIR* loopLabel = NewLIR0(cUnit, kPseudoTargetLabel);
- LIR* exitBranch = OpCmpBranch(cUnit , kCondEq, rBase, rEnd, NULL);
- LoadWordDisp(cUnit, rBase, 0, rKey);
- OpRegImm(cUnit, kOpAdd, rBase, 8);
- OpCmpBranch(cUnit, kCondNe, rlSrc.lowReg, rKey, loopLabel);
- int rDisp = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rBase, -4, rDisp);
- OpRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
- OpReg(cUnit, kOpBx, r_RA);
+ int r_key = AllocTemp(cu);
+ LIR* loop_label = NewLIR0(cu, kPseudoTargetLabel);
+ LIR* exit_branch = OpCmpBranch(cu , kCondEq, rBase, rEnd, NULL);
+ LoadWordDisp(cu, rBase, 0, r_key);
+ OpRegImm(cu, kOpAdd, rBase, 8);
+ OpCmpBranch(cu, kCondNe, rl_src.low_reg, r_key, loop_label);
+ int r_disp = AllocTemp(cu);
+ LoadWordDisp(cu, rBase, -4, r_disp);
+ OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
+ OpReg(cu, kOpBx, r_RA);
// Loop exit
- LIR* exitLabel = NewLIR0(cUnit, kPseudoTargetLabel);
- exitBranch->target = exitLabel;
+ LIR* exit_label = NewLIR0(cu, kPseudoTargetLabel);
+ exit_branch->target = exit_label;
}
/*
* Code pattern will look something like:
*
- * lw rVal
+ * lw r_val
* jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
* nop ; opportunistically fill
- * [subiu rVal, bias] ; Remove bias if lowVal != 0
+ * [subiu r_val, bias] ; Remove bias if low_val != 0
* bound check -> done
- * lw rDisp, [r_RA, rVal]
- * addu r_RA, rDisp
+ * lw r_disp, [r_RA, r_val]
+ * addu r_RA, r_disp
* jr r_RA
* done:
*/
-void GenPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tabRec =
- static_cast<SwitchTable*>(NewMem(cUnit, sizeof(SwitchTable), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
int size = table[1];
- tabRec->targets = static_cast<LIR**>(NewMem(cUnit, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cUnit, &cUnit->switchTables, reinterpret_cast<uintptr_t>(tabRec));
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
// Get the switch value
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
// Prepare the bias. If too big, handle 1st stage here
- int lowKey = s4FromSwitchData(&table[2]);
- bool largeBias = false;
- int rKey;
- if (lowKey == 0) {
- rKey = rlSrc.lowReg;
- } else if ((lowKey & 0xffff) != lowKey) {
- rKey = AllocTemp(cUnit);
- LoadConstant(cUnit, rKey, lowKey);
- largeBias = true;
+ int low_key = s4FromSwitchData(&table[2]);
+ bool large_bias = false;
+ int r_key;
+ if (low_key == 0) {
+ r_key = rl_src.low_reg;
+ } else if ((low_key & 0xffff) != low_key) {
+ r_key = AllocTemp(cu);
+ LoadConstant(cu, r_key, low_key);
+ large_bias = true;
} else {
- rKey = AllocTemp(cUnit);
+ r_key = AllocTemp(cu);
}
// Must prevent code motion for the curr pc pair
- GenBarrier(cUnit);
- NewLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ GenBarrier(cu);
+ NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot with bias strip
- if (lowKey == 0) {
- NewLIR0(cUnit, kMipsNop);
+ if (low_key == 0) {
+ NewLIR0(cu, kMipsNop);
} else {
- if (largeBias) {
- OpRegRegReg(cUnit, kOpSub, rKey, rlSrc.lowReg, rKey);
+ if (large_bias) {
+ OpRegRegReg(cu, kOpSub, r_key, rl_src.low_reg, r_key);
} else {
- OpRegRegImm(cUnit, kOpSub, rKey, rlSrc.lowReg, lowKey);
+ OpRegRegImm(cu, kOpSub, r_key, rl_src.low_reg, low_key);
}
}
- GenBarrier(cUnit); // Scheduling barrier
+ GenBarrier(cu); // Scheduling barrier
// Construct BaseLabel and set up table base register
- LIR* baseLabel = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
// Remember base label so offsets can be computed later
- tabRec->anchor = baseLabel;
+ tab_rec->anchor = base_label;
// Bounds check - if < 0 or >= size continue following switch
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondHi, rKey, size-1, NULL);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondHi, r_key, size-1, NULL);
// Materialize the table base pointer
- int rBase = AllocTemp(cUnit);
- NewLIR4(cUnit, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(baseLabel),
- reinterpret_cast<uintptr_t>(tabRec));
+ int rBase = AllocTemp(cu);
+ NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+ reinterpret_cast<uintptr_t>(tab_rec));
// Load the displacement from the switch table
- int rDisp = AllocTemp(cUnit);
- LoadBaseIndexed(cUnit, rBase, rKey, rDisp, 2, kWord);
+ int r_disp = AllocTemp(cu);
+ LoadBaseIndexed(cu, rBase, r_key, r_disp, 2, kWord);
// Add to r_AP and go
- OpRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
- OpReg(cUnit, kOpBx, r_RA);
+ OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
+ OpReg(cu, kOpBx, r_RA);
- /* branchOver target here */
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
+ /* branch_over target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
}
/*
@@ -224,155 +224,155 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void GenFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tabRec =
- reinterpret_cast<FillArrayData*>(NewMem(cUnit, sizeof(FillArrayData), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
- uint16_t width = tabRec->table[1];
- uint32_t size = tabRec->table[2] | ((static_cast<uint32_t>(tabRec->table[3])) << 16);
- tabRec->size = (size * width) + 8;
+ FillArrayData *tab_rec =
+ reinterpret_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ uint16_t width = tab_rec->table[1];
+ uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+ tab_rec->size = (size * width) + 8;
- InsertGrowableList(cUnit, &cUnit->fillArrayData, reinterpret_cast<uintptr_t>(tabRec));
+ InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
// Making a call - use explicit registers
- FlushAllRegs(cUnit); /* Everything to home location */
- LockCallTemps(cUnit);
- LoadValueDirectFixed(cUnit, rlSrc, rMIPS_ARG0);
+ FlushAllRegs(cu); /* Everything to home location */
+ LockCallTemps(cu);
+ LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);
// Must prevent code motion for the curr pc pair
- GenBarrier(cUnit);
- NewLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ GenBarrier(cu);
+ NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot with the helper load
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
- GenBarrier(cUnit); // Scheduling barrier
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+ GenBarrier(cu); // Scheduling barrier
// Construct BaseLabel and set up table base register
- LIR* baseLabel = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
// Materialize a pointer to the fill data image
- NewLIR4(cUnit, kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(baseLabel),
- reinterpret_cast<uintptr_t>(tabRec));
+ NewLIR4(cu, kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
+ reinterpret_cast<uintptr_t>(tab_rec));
// And go...
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rTgt); // ( array*, fill_data* )
- MarkSafepointPC(cUnit, callInst);
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt); // ( array*, fill_data* )
+ MarkSafepointPC(cu, call_inst);
}
/*
* TODO: implement fast path to short-circuit thin-lock case
*/
-void GenMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
- LoadValueDirectFixed(cUnit, rlSrc, rMIPS_ARG0); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, rMIPS_ARG0, optFlags);
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - artLockObjectFromCode(self, obj);
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode));
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rTgt);
- MarkSafepointPC(cUnit, callInst);
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
}
/*
* TODO: implement fast path to short-circuit thin-lock case
*/
-void GenMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
- LoadValueDirectFixed(cUnit, rlSrc, rMIPS_ARG0); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, rMIPS_ARG0, optFlags);
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - UnlockObjectFromCode(obj);
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rTgt);
- MarkSafepointPC(cUnit, callInst);
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
}
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void MarkGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
+void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
{
- int regCardBase = AllocTemp(cUnit);
- int regCardNo = AllocTemp(cUnit);
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
- LoadWordDisp(cUnit, rMIPS_SELF, Thread::CardTableOffset().Int32Value(), regCardBase);
- OpRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, CardTable::kCardShift);
- StoreBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
+ int reg_card_base = AllocTemp(cu);
+ int reg_card_no = AllocTemp(cu);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
+ LoadWordDisp(cu, rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+ OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+ StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
kUnsignedByte);
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
- FreeTemp(cUnit, regCardBase);
- FreeTemp(cUnit, regCardNo);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+ FreeTemp(cu, reg_card_base);
+ FreeTemp(cu, reg_card_no);
}
-void GenEntrySequence(CompilationUnit* cUnit, RegLocation* ArgLocs,
- RegLocation rlMethod)
+void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
/*
* On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register
* allocation mechanism know so it doesn't try to use any of them when
* expanding the frame or flushing. This leaves the utility
* code with a single temp: r12. This should be enough.
*/
- LockTemp(cUnit, rMIPS_ARG0);
- LockTemp(cUnit, rMIPS_ARG1);
- LockTemp(cUnit, rMIPS_ARG2);
- LockTemp(cUnit, rMIPS_ARG3);
+ LockTemp(cu, rMIPS_ARG0);
+ LockTemp(cu, rMIPS_ARG1);
+ LockTemp(cu, rMIPS_ARG2);
+ LockTemp(cu, rMIPS_ARG3);
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- (static_cast<size_t>(cUnit->frameSize) < Thread::kStackOverflowReservedBytes));
- NewLIR0(cUnit, kPseudoMethodEntry);
- int checkReg = AllocTemp(cUnit);
- int newSP = AllocTemp(cUnit);
- if (!skipOverflowCheck) {
+ bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+ (static_cast<size_t>(cu->frame_size) < Thread::kStackOverflowReservedBytes));
+ NewLIR0(cu, kPseudoMethodEntry);
+ int check_reg = AllocTemp(cu);
+ int new_sp = AllocTemp(cu);
+ if (!skip_overflow_check) {
/* Load stack limit */
- LoadWordDisp(cUnit, rMIPS_SELF, Thread::StackEndOffset().Int32Value(), checkReg);
+ LoadWordDisp(cu, rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
}
/* Spill core callee saves */
- SpillCoreRegs(cUnit);
+ SpillCoreRegs(cu);
/* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
- DCHECK_EQ(cUnit->numFPSpills, 0);
- if (!skipOverflowCheck) {
- OpRegRegImm(cUnit, kOpSub, newSP, rMIPS_SP, cUnit->frameSize - (spillCount * 4));
- GenRegRegCheck(cUnit, kCondCc, newSP, checkReg, kThrowStackOverflow);
- OpRegCopy(cUnit, rMIPS_SP, newSP); // Establish stack
+ DCHECK_EQ(cu->num_fp_spills, 0);
+ if (!skip_overflow_check) {
+ OpRegRegImm(cu, kOpSub, new_sp, rMIPS_SP, cu->frame_size - (spill_count * 4));
+ GenRegRegCheck(cu, kCondCc, new_sp, check_reg, kThrowStackOverflow);
+ OpRegCopy(cu, rMIPS_SP, new_sp); // Establish stack
} else {
- OpRegImm(cUnit, kOpSub, rMIPS_SP, cUnit->frameSize - (spillCount * 4));
+ OpRegImm(cu, kOpSub, rMIPS_SP, cu->frame_size - (spill_count * 4));
}
- FlushIns(cUnit, ArgLocs, rlMethod);
+ FlushIns(cu, ArgLocs, rl_method);
- FreeTemp(cUnit, rMIPS_ARG0);
- FreeTemp(cUnit, rMIPS_ARG1);
- FreeTemp(cUnit, rMIPS_ARG2);
- FreeTemp(cUnit, rMIPS_ARG3);
+ FreeTemp(cu, rMIPS_ARG0);
+ FreeTemp(cu, rMIPS_ARG1);
+ FreeTemp(cu, rMIPS_ARG2);
+ FreeTemp(cu, rMIPS_ARG3);
}
-void GenExitSequence(CompilationUnit* cUnit)
+void GenExitSequence(CompilationUnit* cu)
{
/*
* In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
* allocated by the register utilities as temps.
*/
- LockTemp(cUnit, rMIPS_RET0);
- LockTemp(cUnit, rMIPS_RET1);
+ LockTemp(cu, rMIPS_RET0);
+ LockTemp(cu, rMIPS_RET1);
- NewLIR0(cUnit, kPseudoMethodExit);
- UnSpillCoreRegs(cUnit);
- OpReg(cUnit, kOpBx, r_RA);
+ NewLIR0(cu, kPseudoMethodExit);
+ UnSpillCoreRegs(cu);
+ OpReg(cu, kOpBx, r_RA);
}
} // namespace art
diff --git a/src/compiler/codegen/mips/fp_mips.cc b/src/compiler/codegen/mips/fp_mips.cc
index 942259d..8f33dfa 100644
--- a/src/compiler/codegen/mips/fp_mips.cc
+++ b/src/compiler/codegen/mips/fp_mips.cc
@@ -21,12 +21,12 @@
namespace art {
-bool GenArithOpFloat(CompilationUnit *cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
#ifdef __mips_hard_float
int op = kMipsNop;
- RegLocation rlResult;
+ RegLocation rl_result;
/*
* Don't attempt to optimize register usage since these opcodes call out to
@@ -52,29 +52,29 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
case Instruction::NEG_FLOAT: {
- return GenArithOpFloatPortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpFloatPortable(cu, opcode, rl_dest, rl_src1, rl_src2);
}
default:
return true;
}
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR3(cUnit, op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR3(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
return false;
#else
- return GenArithOpFloatPortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpFloatPortable(cu, opcode, rl_dest, rl_src1, rl_src2);
#endif
}
-bool GenArithOpDouble(CompilationUnit *cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
{
#ifdef __mips_hard_float
int op = kMipsNop;
- RegLocation rlResult;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::ADD_DOUBLE_2ADDR:
@@ -96,34 +96,34 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
case Instruction::NEG_DOUBLE: {
- return GenArithOpDoublePortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpDoublePortable(cu, opcode, rl_dest, rl_src1, rl_src2);
}
default:
return true;
}
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- DCHECK(rlSrc1.wide);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- DCHECK(rlSrc2.wide);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- DCHECK(rlDest.wide);
- DCHECK(rlResult.wide);
- NewLIR3(cUnit, op, S2d(rlResult.lowReg, rlResult.highReg), S2d(rlSrc1.lowReg, rlSrc1.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
- StoreValueWide(cUnit, rlDest, rlResult);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
#else
- return GenArithOpDoublePortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpDoublePortable(cu, opcode, rl_dest, rl_src1, rl_src2);
#endif
}
-bool GenConversion(CompilationUnit *cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc)
+bool GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src)
{
#ifdef __mips_hard_float
int op = kMipsNop;
- int srcReg;
- RegLocation rlResult;
+ int src_reg;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::INT_TO_FLOAT:
op = kMipsFcvtsw;
@@ -143,34 +143,34 @@
case Instruction::FLOAT_TO_LONG:
case Instruction::LONG_TO_FLOAT:
case Instruction::DOUBLE_TO_LONG:
- return GenConversionPortable(cUnit, opcode, rlDest, rlSrc);
+ return GenConversionPortable(cu, opcode, rl_dest, rl_src);
default:
return true;
}
- if (rlSrc.wide) {
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- srcReg = S2d(rlSrc.lowReg, rlSrc.highReg);
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, kFPReg);
- srcReg = rlSrc.lowReg;
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
}
- if (rlDest.wide) {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, S2d(rlResult.lowReg, rlResult.highReg), srcReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ if (rl_dest.wide) {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, rlResult.lowReg, srcReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, rl_result.low_reg, src_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
return false;
#else
- return GenConversionPortable(cUnit, opcode, rlDest, rlSrc);
+ return GenConversionPortable(cu, opcode, rl_dest, rl_src);
#endif
}
-bool GenCmpFP(CompilationUnit *cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenCmpFP(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
bool wide = true;
int offset;
@@ -193,49 +193,49 @@
default:
return true;
}
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit);
+ FlushAllRegs(cu);
+ LockCallTemps(cu);
if (wide) {
- LoadValueDirectWideFixed(cUnit, rlSrc1, rMIPS_FARG0, rMIPS_FARG1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, rMIPS_FARG2, rMIPS_FARG3);
+ LoadValueDirectWideFixed(cu, rl_src1, rMIPS_FARG0, rMIPS_FARG1);
+ LoadValueDirectWideFixed(cu, rl_src2, rMIPS_FARG2, rMIPS_FARG3);
} else {
- LoadValueDirectFixed(cUnit, rlSrc1, rMIPS_FARG0);
- LoadValueDirectFixed(cUnit, rlSrc2, rMIPS_FARG2);
+ LoadValueDirectFixed(cu, rl_src1, rMIPS_FARG0);
+ LoadValueDirectFixed(cu, rl_src2, rMIPS_FARG2);
}
- int rTgt = LoadHelper(cUnit, offset);
+ int r_tgt = LoadHelper(cu, offset);
// NOTE: not a safepoint
- OpReg(cUnit, kOpBlx, rTgt);
- RegLocation rlResult = GetReturn(cUnit, false);
- StoreValue(cUnit, rlDest, rlResult);
+ OpReg(cu, kOpBlx, r_tgt);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-void GenFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- bool gtBias, bool isDouble)
+void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ bool gt_bias, bool is_double)
{
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
-void GenNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ StoreValue(cu, rl_dest, rl_result);
}
-void GenNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValueWide(cUnit, rlSrc, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
- OpRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
}
-bool GenInlinedMinMaxInt(CompilationUnit *cUnit, CallInfo* info, bool isMin)
+bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
{
// TODO: need Mips implementation
return false;
diff --git a/src/compiler/codegen/mips/int_mips.cc b/src/compiler/codegen/mips/int_mips.cc
index 29b08ed..273e4bd 100644
--- a/src/compiler/codegen/mips/int_mips.cc
+++ b/src/compiler/codegen/mips/int_mips.cc
@@ -39,106 +39,106 @@
* finish:
*
*/
-void GenCmpLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
- int t0 = AllocTemp(cUnit);
- int t1 = AllocTemp(cUnit);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- NewLIR3(cUnit, kMipsSlt, t0, rlSrc1.highReg, rlSrc2.highReg);
- NewLIR3(cUnit, kMipsSlt, t1, rlSrc2.highReg, rlSrc1.highReg);
- NewLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
- LIR* branch = OpCmpImmBranch(cUnit, kCondNe, rlResult.lowReg, 0, NULL);
- NewLIR3(cUnit, kMipsSltu, t0, rlSrc1.lowReg, rlSrc2.lowReg);
- NewLIR3(cUnit, kMipsSltu, t1, rlSrc2.lowReg, rlSrc1.lowReg);
- NewLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
- FreeTemp(cUnit, t0);
- FreeTemp(cUnit, t1);
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ int t0 = AllocTemp(cu);
+ int t1 = AllocTemp(cu);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ NewLIR3(cu, kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
+ NewLIR3(cu, kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
+ NewLIR3(cu, kMipsSubu, rl_result.low_reg, t1, t0);
+ LIR* branch = OpCmpImmBranch(cu, kCondNe, rl_result.low_reg, 0, NULL);
+ NewLIR3(cu, kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR3(cu, kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
+ NewLIR3(cu, kMipsSubu, rl_result.low_reg, t1, t0);
+ FreeTemp(cu, t0);
+ FreeTemp(cu, t1);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
branch->target = target;
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
}
-LIR* OpCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
int src2, LIR* target)
{
LIR* branch;
- MipsOpCode sltOp;
- MipsOpCode brOp;
- bool cmpZero = false;
+ MipsOpCode slt_op;
+ MipsOpCode br_op;
+ bool cmp_zero = false;
bool swapped = false;
switch (cond) {
case kCondEq:
- brOp = kMipsBeq;
- cmpZero = true;
+ br_op = kMipsBeq;
+ cmp_zero = true;
break;
case kCondNe:
- brOp = kMipsBne;
- cmpZero = true;
+ br_op = kMipsBne;
+ cmp_zero = true;
break;
case kCondCc:
- sltOp = kMipsSltu;
- brOp = kMipsBnez;
+ slt_op = kMipsSltu;
+ br_op = kMipsBnez;
break;
case kCondCs:
- sltOp = kMipsSltu;
- brOp = kMipsBeqz;
+ slt_op = kMipsSltu;
+ br_op = kMipsBeqz;
break;
case kCondGe:
- sltOp = kMipsSlt;
- brOp = kMipsBeqz;
+ slt_op = kMipsSlt;
+ br_op = kMipsBeqz;
break;
case kCondGt:
- sltOp = kMipsSlt;
- brOp = kMipsBnez;
+ slt_op = kMipsSlt;
+ br_op = kMipsBnez;
swapped = true;
break;
case kCondLe:
- sltOp = kMipsSlt;
- brOp = kMipsBeqz;
+ slt_op = kMipsSlt;
+ br_op = kMipsBeqz;
swapped = true;
break;
case kCondLt:
- sltOp = kMipsSlt;
- brOp = kMipsBnez;
+ slt_op = kMipsSlt;
+ br_op = kMipsBnez;
break;
case kCondHi: // Gtu
- sltOp = kMipsSltu;
- brOp = kMipsBnez;
+ slt_op = kMipsSltu;
+ br_op = kMipsBnez;
swapped = true;
break;
default:
LOG(FATAL) << "No support for ConditionCode: " << cond;
return NULL;
}
- if (cmpZero) {
- branch = NewLIR2(cUnit, brOp, src1, src2);
+ if (cmp_zero) {
+ branch = NewLIR2(cu, br_op, src1, src2);
} else {
- int tReg = AllocTemp(cUnit);
+ int t_reg = AllocTemp(cu);
if (swapped) {
- NewLIR3(cUnit, sltOp, tReg, src2, src1);
+ NewLIR3(cu, slt_op, t_reg, src2, src1);
} else {
- NewLIR3(cUnit, sltOp, tReg, src1, src2);
+ NewLIR3(cu, slt_op, t_reg, src1, src2);
}
- branch = NewLIR1(cUnit, brOp, tReg);
- FreeTemp(cUnit, tReg);
+ branch = NewLIR1(cu, br_op, t_reg);
+ FreeTemp(cu, t_reg);
}
branch->target = target;
return branch;
}
-LIR* OpCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
- int checkValue, LIR* target)
+LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+ int check_value, LIR* target)
{
LIR* branch;
- if (checkValue != 0) {
+ if (check_value != 0) {
// TUNING: handle s16 & kCondLt/Mi case using slti
- int tReg = AllocTemp(cUnit);
- LoadConstant(cUnit, tReg, checkValue);
- branch = OpCmpBranch(cUnit, cond, reg, tReg, target);
- FreeTemp(cUnit, tReg);
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, check_value);
+ branch = OpCmpBranch(cu, cond, reg, t_reg, target);
+ FreeTemp(cu, t_reg);
return branch;
}
MipsOpCode opc;
@@ -152,211 +152,211 @@
case kCondNe: opc = kMipsBnez; break;
default:
// Tuning: use slti when applicable
- int tReg = AllocTemp(cUnit);
- LoadConstant(cUnit, tReg, checkValue);
- branch = OpCmpBranch(cUnit, cond, reg, tReg, target);
- FreeTemp(cUnit, tReg);
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, check_value);
+ branch = OpCmpBranch(cu, cond, reg, t_reg, target);
+ FreeTemp(cu, t_reg);
return branch;
}
- branch = NewLIR1(cUnit, opc, reg);
+ branch = NewLIR1(cu, opc, reg);
branch->target = target;
return branch;
}
-LIR* OpRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
+LIR* OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
{
#ifdef __mips_hard_float
- if (MIPS_FPREG(rDest) || MIPS_FPREG(rSrc))
- return FpRegCopy(cUnit, rDest, rSrc);
+ if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src))
+ return FpRegCopy(cu, r_dest, r_src);
#endif
- LIR* res = RawLIR(cUnit, cUnit->currentDalvikOffset, kMipsMove,
- rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, kMipsMove,
+ r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
-LIR* OpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+LIR* OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
{
- LIR *res = OpRegCopyNoInsert(cUnit, rDest, rSrc);
- AppendLIR(cUnit, res);
+ LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
+ AppendLIR(cu, res);
return res;
}
-void OpRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
+void OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
+ int src_lo, int src_hi)
{
#ifdef __mips_hard_float
- bool destFP = MIPS_FPREG(destLo) && MIPS_FPREG(destHi);
- bool srcFP = MIPS_FPREG(srcLo) && MIPS_FPREG(srcHi);
- assert(MIPS_FPREG(srcLo) == MIPS_FPREG(srcHi));
- assert(MIPS_FPREG(destLo) == MIPS_FPREG(destHi));
- if (destFP) {
- if (srcFP) {
- OpRegCopy(cUnit, S2d(destLo, destHi), S2d(srcLo, srcHi));
+ bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi);
+ bool src_fp = MIPS_FPREG(src_lo) && MIPS_FPREG(src_hi);
+ assert(MIPS_FPREG(src_lo) == MIPS_FPREG(src_hi));
+ assert(MIPS_FPREG(dest_lo) == MIPS_FPREG(dest_hi));
+ if (dest_fp) {
+ if (src_fp) {
+ OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
} else {
/* note the operands are swapped for the mtc1 instr */
- NewLIR2(cUnit, kMipsMtc1, srcLo, destLo);
- NewLIR2(cUnit, kMipsMtc1, srcHi, destHi);
+ NewLIR2(cu, kMipsMtc1, src_lo, dest_lo);
+ NewLIR2(cu, kMipsMtc1, src_hi, dest_hi);
}
} else {
- if (srcFP) {
- NewLIR2(cUnit, kMipsMfc1, destLo, srcLo);
- NewLIR2(cUnit, kMipsMfc1, destHi, srcHi);
+ if (src_fp) {
+ NewLIR2(cu, kMipsMfc1, dest_lo, src_lo);
+ NewLIR2(cu, kMipsMfc1, dest_hi, src_hi);
} else {
// Handle overlap
- if (srcHi == destLo) {
- OpRegCopy(cUnit, destHi, srcHi);
- OpRegCopy(cUnit, destLo, srcLo);
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
} else {
- OpRegCopy(cUnit, destLo, srcLo);
- OpRegCopy(cUnit, destHi, srcHi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
}
}
}
#else
// Handle overlap
- if (srcHi == destLo) {
- OpRegCopy(cUnit, destHi, srcHi);
- OpRegCopy(cUnit, destLo, srcLo);
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
} else {
- OpRegCopy(cUnit, destLo, srcLo);
- OpRegCopy(cUnit, destHi, srcHi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
}
#endif
}
-void GenFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
+void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
{
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
-LIR* GenRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
+LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
int reg1, int base, int offset, ThrowKind kind)
{
LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
return NULL;
}
-RegLocation GenDivRem(CompilationUnit* cUnit, RegLocation rlDest, int reg1, int reg2, bool isDiv)
+RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2, bool is_div)
{
- NewLIR4(cUnit, kMipsDiv, r_HI, r_LO, reg1, reg2);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- if (isDiv) {
- NewLIR2(cUnit, kMipsMflo, rlResult.lowReg, r_LO);
+ NewLIR4(cu, kMipsDiv, r_HI, r_LO, reg1, reg2);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (is_div) {
+ NewLIR2(cu, kMipsMflo, rl_result.low_reg, r_LO);
} else {
- NewLIR2(cUnit, kMipsMfhi, rlResult.lowReg, r_HI);
+ NewLIR2(cu, kMipsMfhi, rl_result.low_reg, r_HI);
}
- return rlResult;
+ return rl_result;
}
-RegLocation GenDivRemLit(CompilationUnit* cUnit, RegLocation rlDest, int reg1, int lit, bool isDiv)
+RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit, bool is_div)
{
- int tReg = AllocTemp(cUnit);
- NewLIR3(cUnit, kMipsAddiu, tReg, r_ZERO, lit);
- NewLIR4(cUnit, kMipsDiv, r_HI, r_LO, reg1, tReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- if (isDiv) {
- NewLIR2(cUnit, kMipsMflo, rlResult.lowReg, r_LO);
+ int t_reg = AllocTemp(cu);
+ NewLIR3(cu, kMipsAddiu, t_reg, r_ZERO, lit);
+ NewLIR4(cu, kMipsDiv, r_HI, r_LO, reg1, t_reg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (is_div) {
+ NewLIR2(cu, kMipsMflo, rl_result.low_reg, r_LO);
} else {
- NewLIR2(cUnit, kMipsMfhi, rlResult.lowReg, r_HI);
+ NewLIR2(cu, kMipsMfhi, rl_result.low_reg, r_HI);
}
- FreeTemp(cUnit, tReg);
- return rlResult;
+ FreeTemp(cu, t_reg);
+ return rl_result;
}
-void OpLea(CompilationUnit* cUnit, int rBase, int reg1, int reg2, int scale, int offset)
+void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
{
LOG(FATAL) << "Unexpected use of OpLea for Arm";
}
-void OpTlsCmp(CompilationUnit* cUnit, int offset, int val)
+void OpTlsCmp(CompilationUnit* cu, int offset, int val)
{
LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
}
-bool GenInlinedCas32(CompilationUnit* cUnit, CallInfo* info, bool need_write_barrier) {
- DCHECK_NE(cUnit->instructionSet, kThumb2);
+bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
return false;
}
-bool GenInlinedSqrt(CompilationUnit* cUnit, CallInfo* info) {
- DCHECK_NE(cUnit->instructionSet, kThumb2);
+bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
return false;
}
-LIR* OpPcRelLoad(CompilationUnit* cUnit, int reg, LIR* target) {
+LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
return NULL;
}
-LIR* OpVldm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
{
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
return NULL;
}
-LIR* OpVstm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
{
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
return NULL;
}
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
- RegLocation rlResult, int lit,
- int firstBit, int secondBit)
+void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
{
- int tReg = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, secondBit - firstBit);
- OpRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, tReg);
- FreeTemp(cUnit, tReg);
- if (firstBit != 0) {
- OpRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
+ int t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ if (first_bit != 0) {
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
}
}
-void GenDivZeroCheck(CompilationUnit* cUnit, int regLo, int regHi)
+void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
{
- int tReg = AllocTemp(cUnit);
- OpRegRegReg(cUnit, kOpOr, tReg, regLo, regHi);
- GenImmedCheck(cUnit, kCondEq, tReg, 0, kThrowDivZero);
- FreeTemp(cUnit, tReg);
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
+ GenImmedCheck(cu, kCondEq, t_reg, 0, kThrowDivZero);
+ FreeTemp(cu, t_reg);
}
// Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cUnit, LIR* target)
+LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
{
- OpRegImm(cUnit, kOpSub, rMIPS_SUSPEND, 1);
- return OpCmpImmBranch(cUnit, (target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
+ OpRegImm(cu, kOpSub, rMIPS_SUSPEND, 1);
+ return OpCmpImmBranch(cu, (target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
}
// Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cUnit, ConditionCode cCode, int reg, LIR* target)
+LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
{
- OpRegImm(cUnit, kOpSub, reg, 1);
- return OpCmpImmBranch(cUnit, cCode, reg, 0, target);
+ OpRegImm(cu, kOpSub, reg, 1);
+ return OpCmpImmBranch(cu, c_code, reg, 0, target);
}
-bool SmallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
- RegLocation rlSrc, RegLocation rlDest, int lit)
+bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
{
LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
return false;
}
-LIR* OpIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide)
+LIR* OpIT(CompilationUnit* cu, ArmConditionCode cond, const char* guide)
{
LOG(FATAL) << "Unexpected use of OpIT in Mips";
return NULL;
}
-bool GenAddLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
/*
* [v1 v0] = [a1 a0] + [a3 a2];
* addu v0,a2,a0
@@ -365,22 +365,22 @@
* addu v1,v1,t1
*/
- OpRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc2.lowReg, rlSrc1.lowReg);
- int tReg = AllocTemp(cUnit);
- OpRegRegReg(cUnit, kOpAdd, tReg, rlSrc2.highReg, rlSrc1.highReg);
- NewLIR3(cUnit, kMipsSltu, rlResult.highReg, rlResult.lowReg, rlSrc2.lowReg);
- OpRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlResult.highReg, tReg);
- FreeTemp(cUnit, tReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
+ NewLIR3(cu, kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenSubLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
/*
* [v1 v0] = [a1 a0] - [a3 a2];
* sltu t1,a0,a2
@@ -389,21 +389,21 @@
* subu v1,v1,t1
*/
- int tReg = AllocTemp(cUnit);
- NewLIR3(cUnit, kMipsSltu, tReg, rlSrc1.lowReg, rlSrc2.lowReg);
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
- OpRegRegReg(cUnit, kOpSub, rlResult.highReg, rlSrc1.highReg, rlSrc2.highReg);
- OpRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
- FreeTemp(cUnit, tReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ int t_reg = AllocTemp(cu);
+ NewLIR3(cu, kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenNegLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
+bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src)
{
- rlSrc = LoadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
/*
* [v1 v0] = -[a1 a0]
* negu v0,a0
@@ -412,32 +412,32 @@
* subu v1,v1,t1
*/
- OpRegReg(cUnit, kOpNeg, rlResult.lowReg, rlSrc.lowReg);
- OpRegReg(cUnit, kOpNeg, rlResult.highReg, rlSrc.highReg);
- int tReg = AllocTemp(cUnit);
- NewLIR3(cUnit, kMipsSltu, tReg, r_ZERO, rlResult.lowReg);
- OpRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
- FreeTemp(cUnit, tReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ OpRegReg(cu, kOpNeg, rl_result.low_reg, rl_src.low_reg);
+ OpRegReg(cu, kOpNeg, rl_result.high_reg, rl_src.high_reg);
+ int t_reg = AllocTemp(cu);
+ NewLIR3(cu, kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenAndLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
return false;
}
-bool GenOrLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
return false;
}
-bool GenXorLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
return false;
diff --git a/src/compiler/codegen/mips/mips_lir.h b/src/compiler/codegen/mips/mips_lir.h
index de39b7f..e3d9b62 100644
--- a/src/compiler/codegen/mips/mips_lir.h
+++ b/src/compiler/codegen/mips/mips_lir.h
@@ -82,7 +82,7 @@
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | curMethod* | <<== sp w/ 16-byte alignment
+ * | cur_method* | <<== sp w/ 16-byte alignment
* +========================+
*/
@@ -406,7 +406,7 @@
};
/* Bit flags describing the behavior of each native opcode */
-/* Instruction assembly fieldLoc kind */
+/* Instruction assembly field_loc kind */
enum MipsEncodingKind {
kFmtUnused,
kFmtBitBlt, /* Bit string using end/start */
@@ -422,7 +422,7 @@
MipsEncodingKind kind;
int end; /* end for kFmtBitBlt, 1-bit slice end for FP regs */
int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
- } fieldLoc[4];
+ } field_loc[4];
MipsOpCode opcode;
uint64_t flags;
const char *name;
diff --git a/src/compiler/codegen/mips/target_mips.cc b/src/compiler/codegen/mips/target_mips.cc
index d264343..b9159ed 100644
--- a/src/compiler/codegen/mips/target_mips.cc
+++ b/src/compiler/codegen/mips/target_mips.cc
@@ -23,19 +23,19 @@
namespace art {
-static int coreRegs[] = {r_ZERO, r_AT, r_V0, r_V1, r_A0, r_A1, r_A2, r_A3,
- r_T0, r_T1, r_T2, r_T3, r_T4, r_T5, r_T6, r_T7,
- r_S0, r_S1, r_S2, r_S3, r_S4, r_S5, r_S6, r_S7, r_T8,
- r_T9, r_K0, r_K1, r_GP, r_SP, r_FP, r_RA};
+static int core_regs[] = {r_ZERO, r_AT, r_V0, r_V1, r_A0, r_A1, r_A2, r_A3,
+ r_T0, r_T1, r_T2, r_T3, r_T4, r_T5, r_T6, r_T7,
+ r_S0, r_S1, r_S2, r_S3, r_S4, r_S5, r_S6, r_S7, r_T8,
+ r_T9, r_K0, r_K1, r_GP, r_SP, r_FP, r_RA};
static int ReservedRegs[] = {r_ZERO, r_AT, r_S0, r_S1, r_K0, r_K1, r_GP, r_SP,
r_RA};
-static int coreTemps[] = {r_V0, r_V1, r_A0, r_A1, r_A2, r_A3, r_T0, r_T1, r_T2,
- r_T3, r_T4, r_T5, r_T6, r_T7, r_T8};
+static int core_temps[] = {r_V0, r_V1, r_A0, r_A1, r_A2, r_A3, r_T0, r_T1, r_T2,
+ r_T3, r_T4, r_T5, r_T6, r_T7, r_T8};
#ifdef __mips_hard_float
static int FpRegs[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
-static int fpTemps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
- r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
+static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
+ r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
#endif
RegLocation LocCReturn()
@@ -88,9 +88,9 @@
}
// Create a double from a pair of singles.
-int S2d(int lowReg, int highReg)
+int S2d(int low_reg, int high_reg)
{
- return MIPS_S2D(lowReg, highReg);
+ return MIPS_S2D(low_reg, high_reg);
}
// Is reg a single or double?
@@ -126,20 +126,20 @@
/*
* Decode the register id.
*/
-uint64_t GetRegMaskCommon(CompilationUnit* cUnit, int reg)
+uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
{
uint64_t seed;
int shift;
- int regId;
+ int reg_id;
- regId = reg & 0x1f;
+ reg_id = reg & 0x1f;
/* Each double register is equal to a pair of single-precision FP registers */
seed = MIPS_DOUBLEREG(reg) ? 3 : 1;
/* FP register starts at bit position 16 */
shift = MIPS_FPREG(reg) ? kMipsFPReg0 : 0;
/* Expand the double register id into single offset */
- shift += regId;
+ shift += reg_id;
return (seed << shift);
}
@@ -149,29 +149,29 @@
}
-void SetupTargetResourceMasks(CompilationUnit* cUnit, LIR* lir)
+void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
{
- DCHECK_EQ(cUnit->instructionSet, kMips);
+ DCHECK_EQ(cu->instruction_set, kMips);
// Mips-specific resource map setup here.
uint64_t flags = EncodingMap[lir->opcode].flags;
if (flags & REG_DEF_SP) {
- lir->defMask |= ENCODE_MIPS_REG_SP;
+ lir->def_mask |= ENCODE_MIPS_REG_SP;
}
if (flags & REG_USE_SP) {
- lir->useMask |= ENCODE_MIPS_REG_SP;
+ lir->use_mask |= ENCODE_MIPS_REG_SP;
}
if (flags & REG_DEF_LR) {
- lir->defMask |= ENCODE_MIPS_REG_LR;
+ lir->def_mask |= ENCODE_MIPS_REG_LR;
}
}
/* For dumping instructions */
#define MIPS_REG_COUNT 32
-static const char *mipsRegName[MIPS_REG_COUNT] = {
+static const char *mips_reg_name[MIPS_REG_COUNT] = {
"zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -182,23 +182,23 @@
* Interpret a format string and build a string no longer than size
* See format key in Assemble.c.
*/
-std::string BuildInsnString(const char *fmt, LIR *lir, unsigned char* baseAddr)
+std::string BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr)
{
std::string buf;
int i;
- const char *fmtEnd = &fmt[strlen(fmt)];
+ const char *fmt_end = &fmt[strlen(fmt)];
char tbuf[256];
char nc;
- while (fmt < fmtEnd) {
+ while (fmt < fmt_end) {
int operand;
if (*fmt == '!') {
fmt++;
- DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT(fmt, fmt_end);
nc = *fmt++;
if (nc=='!') {
strcpy(tbuf, "!");
} else {
- DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT(fmt, fmt_end);
DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
operand = lir->operands[nc-'0'];
switch (*fmt++) {
@@ -233,7 +233,7 @@
sprintf(tbuf,"%d", operand*2);
break;
case 't':
- sprintf(tbuf,"0x%08x (L%p)", reinterpret_cast<uintptr_t>(baseAddr) + lir->offset + 4 +
+ sprintf(tbuf,"0x%08x (L%p)", reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 +
(operand << 2), lir->target);
break;
case 'T':
@@ -243,7 +243,7 @@
int offset_1 = lir->operands[0];
int offset_2 = NEXT_LIR(lir)->operands[0];
uintptr_t target =
- (((reinterpret_cast<uintptr_t>(baseAddr) + lir->offset + 4) & ~3) +
+ (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
(offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
sprintf(tbuf, "%p", reinterpret_cast<void*>(target));
break;
@@ -255,7 +255,7 @@
break;
case 'r':
DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
- strcpy(tbuf, mipsRegName[operand]);
+ strcpy(tbuf, mips_reg_name[operand]);
break;
case 'N':
// Placeholder for delay slot handling
@@ -275,7 +275,7 @@
}
// FIXME: need to redo resource maps for MIPS - fix this at that time
-void DumpResourceMask(LIR *mipsLIR, uint64_t mask, const char *prefix)
+void DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix)
{
char buf[256];
buf[0] = 0;
@@ -300,9 +300,9 @@
strcat(buf, "fpcc ");
}
/* Memory bits */
- if (mipsLIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", mipsLIR->aliasInfo & 0xffff,
- (mipsLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ if (mips_lir && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", mips_lir->alias_info & 0xffff,
+ (mips_lir->alias_info & 0x80000000) ? "(+1)" : "");
}
if (mask & ENCODE_LITERAL) {
strcat(buf, "lit ");
@@ -321,15 +321,15 @@
}
/*
- * TUNING: is leaf? Can't just use "hasInvoke" to determine as some
+ * TUNING: is leaf? Can't just use "has_invoke" to determine as some
* instructions might call out to C/assembly helper functions. Until
* machinery is in place, always spill lr.
*/
-void AdjustSpillMask(CompilationUnit* cUnit)
+void AdjustSpillMask(CompilationUnit* cu)
{
- cUnit->coreSpillMask |= (1 << r_RA);
- cUnit->numCoreSpills++;
+ cu->core_spill_mask |= (1 << r_RA);
+ cu->num_core_spills++;
}
/*
@@ -338,40 +338,40 @@
* include any holes in the mask. Associate holes with
* Dalvik register INVALID_VREG (0xFFFFU).
*/
-void MarkPreservedSingle(CompilationUnit* cUnit, int sReg, int reg)
+void MarkPreservedSingle(CompilationUnit* cu, int s_reg, int reg)
{
LOG(FATAL) << "No support yet for promoted FP regs";
}
-void FlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
+void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
{
- RegisterInfo* info1 = GetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = GetRegInfo(cUnit, reg2);
+ RegisterInfo* info1 = GetRegInfo(cu, reg1);
+ RegisterInfo* info2 = GetRegInfo(cu, reg2);
DCHECK(info1 && info2 && info1->pair && info2->pair &&
(info1->partner == info2->reg) &&
(info2->partner == info1->reg));
if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
+ if (!(info1->is_temp && info2->is_temp)) {
+ /* Should not happen. If it does, there's a problem in eval_loc */
LOG(FATAL) << "Long half-temp, half-promoted";
}
info1->dirty = false;
info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) < SRegToVReg(cUnit, info1->sReg))
+ if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- StoreBaseDispWide(cUnit, rMIPS_SP, VRegOffset(cUnit, vReg), info1->reg, info1->partner);
+ int v_reg = SRegToVReg(cu, info1->s_reg);
+ StoreBaseDispWide(cu, rMIPS_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
}
}
-void FlushReg(CompilationUnit* cUnit, int reg)
+void FlushReg(CompilationUnit* cu, int reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
+ RegisterInfo* info = GetRegInfo(cu, reg);
if (info->live && info->dirty) {
info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- StoreBaseDisp(cUnit, rMIPS_SP, VRegOffset(cUnit, vReg), reg, kWord);
+ int v_reg = SRegToVReg(cu, info->s_reg);
+ StoreBaseDisp(cu, rMIPS_SP, VRegOffset(cu, v_reg), reg, kWord);
}
}
@@ -381,85 +381,85 @@
}
/* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cUnit)
+void ClobberCalleeSave(CompilationUnit *cu)
{
- Clobber(cUnit, r_ZERO);
- Clobber(cUnit, r_AT);
- Clobber(cUnit, r_V0);
- Clobber(cUnit, r_V1);
- Clobber(cUnit, r_A0);
- Clobber(cUnit, r_A1);
- Clobber(cUnit, r_A2);
- Clobber(cUnit, r_A3);
- Clobber(cUnit, r_T0);
- Clobber(cUnit, r_T1);
- Clobber(cUnit, r_T2);
- Clobber(cUnit, r_T3);
- Clobber(cUnit, r_T4);
- Clobber(cUnit, r_T5);
- Clobber(cUnit, r_T6);
- Clobber(cUnit, r_T7);
- Clobber(cUnit, r_T8);
- Clobber(cUnit, r_T9);
- Clobber(cUnit, r_K0);
- Clobber(cUnit, r_K1);
- Clobber(cUnit, r_GP);
- Clobber(cUnit, r_FP);
- Clobber(cUnit, r_RA);
- Clobber(cUnit, r_F0);
- Clobber(cUnit, r_F1);
- Clobber(cUnit, r_F2);
- Clobber(cUnit, r_F3);
- Clobber(cUnit, r_F4);
- Clobber(cUnit, r_F5);
- Clobber(cUnit, r_F6);
- Clobber(cUnit, r_F7);
- Clobber(cUnit, r_F8);
- Clobber(cUnit, r_F9);
- Clobber(cUnit, r_F10);
- Clobber(cUnit, r_F11);
- Clobber(cUnit, r_F12);
- Clobber(cUnit, r_F13);
- Clobber(cUnit, r_F14);
- Clobber(cUnit, r_F15);
+ Clobber(cu, r_ZERO);
+ Clobber(cu, r_AT);
+ Clobber(cu, r_V0);
+ Clobber(cu, r_V1);
+ Clobber(cu, r_A0);
+ Clobber(cu, r_A1);
+ Clobber(cu, r_A2);
+ Clobber(cu, r_A3);
+ Clobber(cu, r_T0);
+ Clobber(cu, r_T1);
+ Clobber(cu, r_T2);
+ Clobber(cu, r_T3);
+ Clobber(cu, r_T4);
+ Clobber(cu, r_T5);
+ Clobber(cu, r_T6);
+ Clobber(cu, r_T7);
+ Clobber(cu, r_T8);
+ Clobber(cu, r_T9);
+ Clobber(cu, r_K0);
+ Clobber(cu, r_K1);
+ Clobber(cu, r_GP);
+ Clobber(cu, r_FP);
+ Clobber(cu, r_RA);
+ Clobber(cu, r_F0);
+ Clobber(cu, r_F1);
+ Clobber(cu, r_F2);
+ Clobber(cu, r_F3);
+ Clobber(cu, r_F4);
+ Clobber(cu, r_F5);
+ Clobber(cu, r_F6);
+ Clobber(cu, r_F7);
+ Clobber(cu, r_F8);
+ Clobber(cu, r_F9);
+ Clobber(cu, r_F10);
+ Clobber(cu, r_F11);
+ Clobber(cu, r_F12);
+ Clobber(cu, r_F13);
+ Clobber(cu, r_F14);
+ Clobber(cu, r_F15);
}
-RegLocation GetReturnWideAlt(CompilationUnit* cUnit)
+RegLocation GetReturnWideAlt(CompilationUnit* cu)
{
UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS";
RegLocation res = LocCReturnWide();
return res;
}
-RegLocation GetReturnAlt(CompilationUnit* cUnit)
+RegLocation GetReturnAlt(CompilationUnit* cu)
{
UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS";
RegLocation res = LocCReturn();
return res;
}
-RegisterInfo* GetRegInfo(CompilationUnit* cUnit, int reg)
+RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
{
- return MIPS_FPREG(reg) ? &cUnit->regPool->FPRegs[reg & MIPS_FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return MIPS_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & MIPS_FP_REG_MASK]
+ : &cu->reg_pool->core_regs[reg];
}
/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cUnit)
+void LockCallTemps(CompilationUnit* cu)
{
- LockTemp(cUnit, rMIPS_ARG0);
- LockTemp(cUnit, rMIPS_ARG1);
- LockTemp(cUnit, rMIPS_ARG2);
- LockTemp(cUnit, rMIPS_ARG3);
+ LockTemp(cu, rMIPS_ARG0);
+ LockTemp(cu, rMIPS_ARG1);
+ LockTemp(cu, rMIPS_ARG2);
+ LockTemp(cu, rMIPS_ARG3);
}
/* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cUnit)
+void FreeCallTemps(CompilationUnit* cu)
{
- FreeTemp(cUnit, rMIPS_ARG0);
- FreeTemp(cUnit, rMIPS_ARG1);
- FreeTemp(cUnit, rMIPS_ARG2);
- FreeTemp(cUnit, rMIPS_ARG3);
+ FreeTemp(cu, rMIPS_ARG0);
+ FreeTemp(cu, rMIPS_ARG1);
+ FreeTemp(cu, rMIPS_ARG2);
+ FreeTemp(cu, rMIPS_ARG3);
}
/* Architecture-specific initializations and checks go here */
@@ -468,10 +468,10 @@
return true;
}
-void GenMemBarrier(CompilationUnit *cUnit, MemBarrierKind barrierKind)
+void GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
{
#if ANDROID_SMP != 0
- NewLIR1(cUnit, kMipsSync, 0 /* Only stype currently supported */);
+ NewLIR1(cu, kMipsSync, 0 /* Only stype currently supported */);
#endif
}
@@ -479,103 +479,103 @@
* Alloc a pair of core registers, or a double. Low reg in low byte,
* high reg in next byte.
*/
-int AllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
- int regClass)
+int AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+ int reg_class)
{
- int highReg;
- int lowReg;
+ int high_reg;
+ int low_reg;
int res = 0;
#ifdef __mips_hard_float
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- lowReg = AllocTempDouble(cUnit);
- highReg = lowReg + 1;
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ low_reg = AllocTempDouble(cu);
+ high_reg = low_reg + 1;
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
return res;
}
#endif
- lowReg = AllocTemp(cUnit);
- highReg = AllocTemp(cUnit);
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ low_reg = AllocTemp(cu);
+ high_reg = AllocTemp(cu);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
return res;
}
-int AllocTypedTemp(CompilationUnit *cUnit, bool fpHint, int regClass)
+int AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class)
{
#ifdef __mips_hard_float
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
{
- return AllocTempFloat(cUnit);
+ return AllocTempFloat(cu);
}
#endif
- return AllocTemp(cUnit);
+ return AllocTemp(cu);
}
-void CompilerInitializeRegAlloc(CompilationUnit* cUnit)
+void CompilerInitializeRegAlloc(CompilationUnit* cu)
{
- int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
- int numReserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
- int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
+ int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+ int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+ int num_temps = sizeof(core_temps)/sizeof(*core_temps);
#ifdef __mips_hard_float
- int numFPRegs = sizeof(FpRegs)/sizeof(*FpRegs);
- int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+ int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+ int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
#else
- int numFPRegs = 0;
- int numFPTemps = 0;
+ int num_fp_regs = 0;
+ int num_fp_temps = 0;
#endif
RegisterPool *pool =
- static_cast<RegisterPool*>(NewMem(cUnit, sizeof(*pool), true, kAllocRegAlloc));
- cUnit->regPool = pool;
- pool->numCoreRegs = numRegs;
- pool->coreRegs = static_cast<RegisterInfo*>
- (NewMem(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs), true, kAllocRegAlloc));
- pool->numFPRegs = numFPRegs;
+ static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
+ cu->reg_pool = pool;
+ pool->num_core_regs = num_regs;
+ pool->core_regs = static_cast<RegisterInfo*>
+ (NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs), true, kAllocRegAlloc));
+ pool->num_fp_regs = num_fp_regs;
pool->FPRegs = static_cast<RegisterInfo*>
- (NewMem(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true, kAllocRegAlloc));
- CompilerInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
- CompilerInitPool(pool->FPRegs, FpRegs, pool->numFPRegs);
+ (NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs), true, kAllocRegAlloc));
+ CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
+ CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
// Keep special registers from being allocated
- for (int i = 0; i < numReserved; i++) {
+ for (int i = 0; i < num_reserved; i++) {
if (NO_SUSPEND && (ReservedRegs[i] == rMIPS_SUSPEND)) {
//To measure cost of suspend check
continue;
}
- MarkInUse(cUnit, ReservedRegs[i]);
+ MarkInUse(cu, ReservedRegs[i]);
}
// Mark temp regs - all others not in use can be used for promotion
- for (int i = 0; i < numTemps; i++) {
- MarkTemp(cUnit, coreTemps[i]);
+ for (int i = 0; i < num_temps; i++) {
+ MarkTemp(cu, core_temps[i]);
}
- for (int i = 0; i < numFPTemps; i++) {
- MarkTemp(cUnit, fpTemps[i]);
+ for (int i = 0; i < num_fp_temps; i++) {
+ MarkTemp(cu, fp_temps[i]);
}
// Construct the alias map.
- cUnit->phiAliasMap = static_cast<int*>
- (NewMem(cUnit, cUnit->numSSARegs * sizeof(cUnit->phiAliasMap[0]), false, kAllocDFInfo));
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- cUnit->phiAliasMap[i] = i;
+ cu->phi_alias_map = static_cast<int*>
+ (NewMem(cu, cu->num_ssa_regs * sizeof(cu->phi_alias_map[0]), false, kAllocDFInfo));
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ cu->phi_alias_map[i] = i;
}
- for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
- int defReg = phi->ssaRep->defs[0];
- for (int i = 0; i < phi->ssaRep->numUses; i++) {
- for (int j = 0; j < cUnit->numSSARegs; j++) {
- if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
- cUnit->phiAliasMap[j] = defReg;
+ for (MIR* phi = cu->phi_list; phi; phi = phi->meta.phi_next) {
+ int def_reg = phi->ssa_rep->defs[0];
+ for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+ for (int j = 0; j < cu->num_ssa_regs; j++) {
+ if (cu->phi_alias_map[j] == phi->ssa_rep->uses[i]) {
+ cu->phi_alias_map[j] = def_reg;
}
}
}
}
}
-void FreeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree)
+void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+ RegLocation rl_free)
{
- if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
- (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
// No overlap, free both
- FreeTemp(cUnit, rlFree.lowReg);
- FreeTemp(cUnit, rlFree.highReg);
+ FreeTemp(cu, rl_free.low_reg);
+ FreeTemp(cu, rl_free.high_reg);
}
}
/*
@@ -584,42 +584,42 @@
* ensure that all branch instructions can be restarted if
* there is a trap in the shadow. Allocate a temp register.
*/
-int LoadHelper(CompilationUnit* cUnit, int offset)
+int LoadHelper(CompilationUnit* cu, int offset)
{
- LoadWordDisp(cUnit, rMIPS_SELF, offset, r_T9);
+ LoadWordDisp(cu, rMIPS_SELF, offset, r_T9);
return r_T9;
}
-void SpillCoreRegs(CompilationUnit* cUnit)
+void SpillCoreRegs(CompilationUnit* cu)
{
- if (cUnit->numCoreSpills == 0) {
+ if (cu->num_core_spills == 0) {
return;
}
- uint32_t mask = cUnit->coreSpillMask;
- int offset = cUnit->numCoreSpills * 4;
- OpRegImm(cUnit, kOpSub, rMIPS_SP, offset);
+ uint32_t mask = cu->core_spill_mask;
+ int offset = cu->num_core_spills * 4;
+ OpRegImm(cu, kOpSub, rMIPS_SP, offset);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
offset -= 4;
- StoreWordDisp(cUnit, rMIPS_SP, offset, reg);
+ StoreWordDisp(cu, rMIPS_SP, offset, reg);
}
}
}
-void UnSpillCoreRegs(CompilationUnit* cUnit)
+void UnSpillCoreRegs(CompilationUnit* cu)
{
- if (cUnit->numCoreSpills == 0) {
+ if (cu->num_core_spills == 0) {
return;
}
- uint32_t mask = cUnit->coreSpillMask;
- int offset = cUnit->frameSize;
+ uint32_t mask = cu->core_spill_mask;
+ int offset = cu->frame_size;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
offset -= 4;
- LoadWordDisp(cUnit, rMIPS_SP, offset, reg);
+ LoadWordDisp(cu, rMIPS_SP, offset, reg);
}
}
- OpRegImm(cUnit, kOpAdd, rMIPS_SP, cUnit->frameSize);
+ OpRegImm(cu, kOpAdd, rMIPS_SP, cu->frame_size);
}
bool BranchUnconditional(LIR* lir)
diff --git a/src/compiler/codegen/mips/utility_mips.cc b/src/compiler/codegen/mips/utility_mips.cc
index 011fc34..168b462 100644
--- a/src/compiler/codegen/mips/utility_mips.cc
+++ b/src/compiler/codegen/mips/utility_mips.cc
@@ -22,41 +22,41 @@
/* This file contains codegen for the MIPS32 ISA. */
-void GenBarrier(CompilationUnit *cUnit);
-void LoadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg);
-LIR *LoadWordDisp(CompilationUnit *cUnit, int rBase, int displacement,
- int rDest);
-LIR *StoreWordDisp(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrc);
-LIR *LoadConstant(CompilationUnit *cUnit, int rDest, int value);
+void GenBarrier(CompilationUnit *cu);
+void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg);
+LIR *LoadWordDisp(CompilationUnit *cu, int rBase, int displacement,
+ int r_dest);
+LIR *StoreWordDisp(CompilationUnit *cu, int rBase,
+ int displacement, int r_src);
+LIR *LoadConstant(CompilationUnit *cu, int r_dest, int value);
#ifdef __mips_hard_float
-LIR *FpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+LIR *FpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
{
int opcode;
/* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(MIPS_DOUBLEREG(rDest),MIPS_DOUBLEREG(rSrc));
- if (MIPS_DOUBLEREG(rDest)) {
+ DCHECK_EQ(MIPS_DOUBLEREG(r_dest),MIPS_DOUBLEREG(r_src));
+ if (MIPS_DOUBLEREG(r_dest)) {
opcode = kMipsFmovd;
} else {
- if (MIPS_SINGLEREG(rDest)) {
- if (MIPS_SINGLEREG(rSrc)) {
+ if (MIPS_SINGLEREG(r_dest)) {
+ if (MIPS_SINGLEREG(r_src)) {
opcode = kMipsFmovs;
} else {
/* note the operands are swapped for the mtc1 instr */
- int tOpnd = rSrc;
- rSrc = rDest;
- rDest = tOpnd;
+ int t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
opcode = kMipsMtc1;
}
} else {
- DCHECK(MIPS_SINGLEREG(rSrc));
+ DCHECK(MIPS_SINGLEREG(r_src));
opcode = kMipsMfc1;
}
}
- LIR* res = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rSrc, rDest);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_src, r_dest);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
@@ -68,54 +68,54 @@
* a high register, build constant into a low register and copy.
*
* No additional register clobbering operation performed. Use this version when
- * 1) rDest is freshly returned from AllocTemp or
+ * 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR *LoadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value)
+LIR *LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
{
LIR *res;
#ifdef __mips_hard_float
- int rDestSave = rDest;
- int isFpReg = MIPS_FPREG(rDest);
- if (isFpReg) {
- DCHECK(MIPS_SINGLEREG(rDest));
- rDest = AllocTemp(cUnit);
+ int r_dest_save = r_dest;
+ int is_fp_reg = MIPS_FPREG(r_dest);
+ if (is_fp_reg) {
+ DCHECK(MIPS_SINGLEREG(r_dest));
+ r_dest = AllocTemp(cu);
}
#endif
/* See if the value can be constructed cheaply */
if (value == 0) {
- res = NewLIR2(cUnit, kMipsMove, rDest, r_ZERO);
+ res = NewLIR2(cu, kMipsMove, r_dest, r_ZERO);
} else if ((value > 0) && (value <= 65535)) {
- res = NewLIR3(cUnit, kMipsOri, rDest, r_ZERO, value);
+ res = NewLIR3(cu, kMipsOri, r_dest, r_ZERO, value);
} else if ((value < 0) && (value >= -32768)) {
- res = NewLIR3(cUnit, kMipsAddiu, rDest, r_ZERO, value);
+ res = NewLIR3(cu, kMipsAddiu, r_dest, r_ZERO, value);
} else {
- res = NewLIR2(cUnit, kMipsLui, rDest, value>>16);
+ res = NewLIR2(cu, kMipsLui, r_dest, value>>16);
if (value & 0xffff)
- NewLIR3(cUnit, kMipsOri, rDest, rDest, value);
+ NewLIR3(cu, kMipsOri, r_dest, r_dest, value);
}
#ifdef __mips_hard_float
- if (isFpReg) {
- NewLIR2(cUnit, kMipsMtc1, rDest, rDestSave);
- FreeTemp(cUnit, rDest);
+ if (is_fp_reg) {
+ NewLIR2(cu, kMipsMtc1, r_dest, r_dest_save);
+ FreeTemp(cu, r_dest);
}
#endif
return res;
}
-LIR *OpBranchUnconditional(CompilationUnit *cUnit, OpKind op)
+LIR *OpBranchUnconditional(CompilationUnit *cu, OpKind op)
{
DCHECK_EQ(op, kOpUncondBr);
- return NewLIR1(cUnit, kMipsB, 0 /* offset to be patched */ );
+ return NewLIR1(cu, kMipsB, 0 /* offset to be patched */ );
}
-LIR *LoadMultiple(CompilationUnit *cUnit, int rBase, int rMask);
+LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask);
-LIR *OpReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
+LIR *OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
{
MipsOpCode opcode = kMipsNop;
switch (op) {
@@ -123,50 +123,50 @@
opcode = kMipsJalr;
break;
case kOpBx:
- return NewLIR1(cUnit, kMipsJr, rDestSrc);
+ return NewLIR1(cu, kMipsJr, r_dest_src);
break;
default:
LOG(FATAL) << "Bad case in OpReg";
}
- return NewLIR2(cUnit, opcode, r_RA, rDestSrc);
+ return NewLIR2(cu, opcode, r_RA, r_dest_src);
}
-LIR *OpRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
- int rSrc1, int value);
-LIR *OpRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
+LIR *OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest,
+ int r_src1, int value);
+LIR *OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1,
int value)
{
LIR *res;
bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
- bool shortForm = (absValue & 0xff) == absValue;
+ int abs_value = (neg) ? -value : value;
+ bool short_form = (abs_value & 0xff) == abs_value;
MipsOpCode opcode = kMipsNop;
switch (op) {
case kOpAdd:
- return OpRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
break;
case kOpSub:
- return OpRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
break;
default:
LOG(FATAL) << "Bad case in OpRegImm";
break;
}
- if (shortForm)
- res = NewLIR2(cUnit, opcode, rDestSrc1, absValue);
+ if (short_form)
+ res = NewLIR2(cu, opcode, r_dest_src1, abs_value);
else {
- int rScratch = AllocTemp(cUnit);
- res = LoadConstant(cUnit, rScratch, value);
+ int r_scratch = AllocTemp(cu);
+ res = LoadConstant(cu, r_scratch, value);
if (op == kOpCmp)
- NewLIR2(cUnit, opcode, rDestSrc1, rScratch);
+ NewLIR2(cu, opcode, r_dest_src1, r_scratch);
else
- NewLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rScratch);
+ NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, r_scratch);
}
return res;
}
-LIR *OpRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest,
- int rSrc1, int rSrc2)
+LIR *OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest,
+ int r_src1, int r_src2)
{
MipsOpCode opcode = kMipsNop;
switch (op) {
@@ -205,15 +205,15 @@
LOG(FATAL) << "bad case in OpRegRegReg";
break;
}
- return NewLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
+ return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
}
-LIR *OpRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
- int rSrc1, int value)
+LIR *OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest,
+ int r_src1, int value)
{
LIR *res;
MipsOpCode opcode = kMipsNop;
- bool shortForm = true;
+ bool short_form = true;
switch (op) {
case kOpAdd:
@@ -221,7 +221,7 @@
opcode = kMipsAddiu;
}
else {
- shortForm = false;
+ short_form = false;
opcode = kMipsAddu;
}
break;
@@ -231,7 +231,7 @@
opcode = kMipsAddiu;
}
else {
- shortForm = false;
+ short_form = false;
opcode = kMipsSubu;
}
break;
@@ -252,7 +252,7 @@
opcode = kMipsAndi;
}
else {
- shortForm = false;
+ short_form = false;
opcode = kMipsAnd;
}
break;
@@ -261,7 +261,7 @@
opcode = kMipsOri;
}
else {
- shortForm = false;
+ short_form = false;
opcode = kMipsOr;
}
break;
@@ -270,12 +270,12 @@
opcode = kMipsXori;
}
else {
- shortForm = false;
+ short_form = false;
opcode = kMipsXor;
}
break;
case kOpMul:
- shortForm = false;
+ short_form = false;
opcode = kMipsMul;
break;
default:
@@ -283,22 +283,22 @@
break;
}
- if (shortForm)
- res = NewLIR3(cUnit, opcode, rDest, rSrc1, value);
+ if (short_form)
+ res = NewLIR3(cu, opcode, r_dest, r_src1, value);
else {
- if (rDest != rSrc1) {
- res = LoadConstant(cUnit, rDest, value);
- NewLIR3(cUnit, opcode, rDest, rSrc1, rDest);
+ if (r_dest != r_src1) {
+ res = LoadConstant(cu, r_dest, value);
+ NewLIR3(cu, opcode, r_dest, r_src1, r_dest);
} else {
- int rScratch = AllocTemp(cUnit);
- res = LoadConstant(cUnit, rScratch, value);
- NewLIR3(cUnit, opcode, rDest, rSrc1, rScratch);
+ int r_scratch = AllocTemp(cu);
+ res = LoadConstant(cu, r_scratch, value);
+ NewLIR3(cu, opcode, r_dest, r_src1, r_scratch);
}
}
return res;
}
-LIR *OpRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int rSrc2)
+LIR *OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
{
MipsOpCode opcode = kMipsNop;
LIR *res;
@@ -307,62 +307,62 @@
opcode = kMipsMove;
break;
case kOpMvn:
- return NewLIR3(cUnit, kMipsNor, rDestSrc1, rSrc2, r_ZERO);
+ return NewLIR3(cu, kMipsNor, r_dest_src1, r_src2, r_ZERO);
case kOpNeg:
- return NewLIR3(cUnit, kMipsSubu, rDestSrc1, r_ZERO, rSrc2);
+ return NewLIR3(cu, kMipsSubu, r_dest_src1, r_ZERO, r_src2);
case kOpAdd:
case kOpAnd:
case kOpMul:
case kOpOr:
case kOpSub:
case kOpXor:
- return OpRegRegReg(cUnit, op, rDestSrc1, rDestSrc1, rSrc2);
+ return OpRegRegReg(cu, op, r_dest_src1, r_dest_src1, r_src2);
case kOp2Byte:
#if __mips_isa_rev>=2
- res = NewLIR2(cUnit, kMipsSeb, rDestSrc1, rSrc2);
+ res = NewLIR2(cu, kMipsSeb, r_dest_src1, r_src2);
#else
- res = OpRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 24);
- OpRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 24);
+ res = OpRegRegImm(cu, kOpLsl, r_dest_src1, r_src2, 24);
+ OpRegRegImm(cu, kOpAsr, r_dest_src1, r_dest_src1, 24);
#endif
return res;
case kOp2Short:
#if __mips_isa_rev>=2
- res = NewLIR2(cUnit, kMipsSeh, rDestSrc1, rSrc2);
+ res = NewLIR2(cu, kMipsSeh, r_dest_src1, r_src2);
#else
- res = OpRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 16);
- OpRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 16);
+ res = OpRegRegImm(cu, kOpLsl, r_dest_src1, r_src2, 16);
+ OpRegRegImm(cu, kOpAsr, r_dest_src1, r_dest_src1, 16);
#endif
return res;
case kOp2Char:
- return NewLIR3(cUnit, kMipsAndi, rDestSrc1, rSrc2, 0xFFFF);
+ return NewLIR3(cu, kMipsAndi, r_dest_src1, r_src2, 0xFFFF);
default:
LOG(FATAL) << "Bad case in OpRegReg";
break;
}
- return NewLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+ return NewLIR2(cu, opcode, r_dest_src1, r_src2);
}
-LIR *LoadConstantValueWide(CompilationUnit *cUnit, int rDestLo,
- int rDestHi, int valLo, int valHi)
+LIR *LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo,
+ int r_dest_hi, int val_lo, int val_hi)
{
LIR *res;
- res = LoadConstantNoClobber(cUnit, rDestLo, valLo);
- LoadConstantNoClobber(cUnit, rDestHi, valHi);
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
return res;
}
/* Load value from base + scaled index. */
-LIR *LoadBaseIndexed(CompilationUnit *cUnit, int rBase,
- int rIndex, int rDest, int scale, OpSize size)
+LIR *LoadBaseIndexed(CompilationUnit *cu, int rBase,
+ int r_index, int r_dest, int scale, OpSize size)
{
LIR *first = NULL;
LIR *res;
MipsOpCode opcode = kMipsNop;
- int tReg = AllocTemp(cUnit);
+ int t_reg = AllocTemp(cu);
#ifdef __mips_hard_float
- if (MIPS_FPREG(rDest)) {
- DCHECK(MIPS_SINGLEREG(rDest));
+ if (MIPS_FPREG(r_dest)) {
+ DCHECK(MIPS_SINGLEREG(r_dest));
DCHECK((size == kWord) || (size == kSingle));
size = kSingle;
} else {
@@ -372,10 +372,10 @@
#endif
if (!scale) {
- first = NewLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+ first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
} else {
- first = OpRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
- NewLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+ first = OpRegRegImm(cu, kOpLsl, t_reg, r_index, scale);
+ NewLIR3(cu, kMipsAddu, t_reg , rBase, t_reg);
}
switch (size) {
@@ -403,23 +403,23 @@
LOG(FATAL) << "Bad case in LoadBaseIndexed";
}
- res = NewLIR3(cUnit, opcode, rDest, 0, tReg);
- FreeTemp(cUnit, tReg);
+ res = NewLIR3(cu, opcode, r_dest, 0, t_reg);
+ FreeTemp(cu, t_reg);
return (first) ? first : res;
}
/* store value base base + scaled index. */
-LIR *StoreBaseIndexed(CompilationUnit *cUnit, int rBase,
- int rIndex, int rSrc, int scale, OpSize size)
+LIR *StoreBaseIndexed(CompilationUnit *cu, int rBase,
+ int r_index, int r_src, int scale, OpSize size)
{
LIR *first = NULL;
MipsOpCode opcode = kMipsNop;
- int rNewIndex = rIndex;
- int tReg = AllocTemp(cUnit);
+ int r_new_index = r_index;
+ int t_reg = AllocTemp(cu);
#ifdef __mips_hard_float
- if (MIPS_FPREG(rSrc)) {
- DCHECK(MIPS_SINGLEREG(rSrc));
+ if (MIPS_FPREG(r_src)) {
+ DCHECK(MIPS_SINGLEREG(r_src));
DCHECK((size == kWord) || (size == kSingle));
size = kSingle;
} else {
@@ -429,10 +429,10 @@
#endif
if (!scale) {
- first = NewLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+ first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
} else {
- first = OpRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
- NewLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+ first = OpRegRegImm(cu, kOpLsl, t_reg, r_index, scale);
+ NewLIR3(cu, kMipsAddu, t_reg , rBase, t_reg);
}
switch (size) {
@@ -455,61 +455,61 @@
default:
LOG(FATAL) << "Bad case in StoreBaseIndexed";
}
- NewLIR3(cUnit, opcode, rSrc, 0, tReg);
- FreeTemp(cUnit, rNewIndex);
+ NewLIR3(cu, opcode, r_src, 0, t_reg);
+ FreeTemp(cu, r_new_index);
return first;
}
-LIR *LoadMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask)
{
int i;
- int loadCnt = 0;
+ int load_cnt = 0;
LIR *res = NULL ;
- GenBarrier(cUnit);
+ GenBarrier(cu);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
- NewLIR3(cUnit, kMipsLw, i+r_A0, loadCnt*4, rBase);
- loadCnt++;
+ for (i = 0; i < 8; i++, r_mask >>= 1) {
+ if (r_mask & 0x1) { /* map r0 to MIPS r_A0 */
+ NewLIR3(cu, kMipsLw, i+r_A0, load_cnt*4, rBase);
+ load_cnt++;
}
}
- if (loadCnt) {/* increment after */
- NewLIR3(cUnit, kMipsAddiu, rBase, rBase, loadCnt*4);
+ if (load_cnt) {/* increment after */
+ NewLIR3(cu, kMipsAddiu, rBase, rBase, load_cnt*4);
}
- GenBarrier(cUnit);
+ GenBarrier(cu);
return res; /* NULL always returned which should be ok since no callers use it */
}
-LIR *StoreMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+LIR *StoreMultiple(CompilationUnit *cu, int rBase, int r_mask)
{
int i;
- int storeCnt = 0;
+ int store_cnt = 0;
LIR *res = NULL ;
- GenBarrier(cUnit);
+ GenBarrier(cu);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
- NewLIR3(cUnit, kMipsSw, i+r_A0, storeCnt*4, rBase);
- storeCnt++;
+ for (i = 0; i < 8; i++, r_mask >>= 1) {
+ if (r_mask & 0x1) { /* map r0 to MIPS r_A0 */
+ NewLIR3(cu, kMipsSw, i+r_A0, store_cnt*4, rBase);
+ store_cnt++;
}
}
- if (storeCnt) { /* increment after */
- NewLIR3(cUnit, kMipsAddiu, rBase, rBase, storeCnt*4);
+ if (store_cnt) { /* increment after */
+ NewLIR3(cu, kMipsAddiu, rBase, rBase, store_cnt*4);
}
- GenBarrier(cUnit);
+ GenBarrier(cu);
return res; /* NULL always returned which should be ok since no callers use it */
}
-LIR *LoadBaseDispBody(CompilationUnit *cUnit, int rBase,
- int displacement, int rDest, int rDestHi,
- OpSize size, int sReg)
+LIR *LoadBaseDispBody(CompilationUnit *cu, int rBase,
+ int displacement, int r_dest, int r_dest_hi,
+ OpSize size, int s_reg)
/*
* Load value from base + displacement. Optionally perform null check
- * on base (which must have an associated sReg and MIR). If not
+ * on base (which must have an associated s_reg and MIR). If not
* performing null check, incoming MIR can be null. IMPORTANT: this
* code must not allocate any new temps. If a new register is needed
* and base and dest are the same, spill some other register to
@@ -520,7 +520,7 @@
LIR *load = NULL;
LIR *load2 = NULL;
MipsOpCode opcode = kMipsNop;
- bool shortForm = IS_SIMM16(displacement);
+ bool short_form = IS_SIMM16(displacement);
bool pair = false;
switch (size) {
@@ -529,27 +529,27 @@
pair = true;
opcode = kMipsLw;
#ifdef __mips_hard_float
- if (MIPS_FPREG(rDest)) {
+ if (MIPS_FPREG(r_dest)) {
opcode = kMipsFlwc1;
- if (MIPS_DOUBLEREG(rDest)) {
- rDest = rDest - MIPS_FP_DOUBLE;
+ if (MIPS_DOUBLEREG(r_dest)) {
+ r_dest = r_dest - MIPS_FP_DOUBLE;
} else {
- DCHECK(MIPS_FPREG(rDestHi));
- DCHECK(rDest == (rDestHi - 1));
+ DCHECK(MIPS_FPREG(r_dest_hi));
+ DCHECK(r_dest == (r_dest_hi - 1));
}
- rDestHi = rDest + 1;
+ r_dest_hi = r_dest + 1;
}
#endif
- shortForm = IS_SIMM16_2WORD(displacement);
+ short_form = IS_SIMM16_2WORD(displacement);
DCHECK_EQ((displacement & 0x3), 0);
break;
case kWord:
case kSingle:
opcode = kMipsLw;
#ifdef __mips_hard_float
- if (MIPS_FPREG(rDest)) {
+ if (MIPS_FPREG(r_dest)) {
opcode = kMipsFlwc1;
- DCHECK(MIPS_SINGLEREG(rDest));
+ DCHECK(MIPS_SINGLEREG(r_dest));
}
#endif
DCHECK_EQ((displacement & 0x3), 0);
@@ -572,65 +572,65 @@
LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
}
- if (shortForm) {
+ if (short_form) {
if (!pair) {
- load = res = NewLIR3(cUnit, opcode, rDest, displacement, rBase);
+ load = res = NewLIR3(cu, opcode, r_dest, displacement, rBase);
} else {
- load = res = NewLIR3(cUnit, opcode, rDest,
+ load = res = NewLIR3(cu, opcode, r_dest,
displacement + LOWORD_OFFSET, rBase);
- load2 = NewLIR3(cUnit, opcode, rDestHi,
+ load2 = NewLIR3(cu, opcode, r_dest_hi,
displacement + HIWORD_OFFSET, rBase);
}
} else {
if (pair) {
- int rTmp = AllocFreeTemp(cUnit);
- res = OpRegRegImm(cUnit, kOpAdd, rTmp, rBase, displacement);
- load = NewLIR3(cUnit, opcode, rDest, LOWORD_OFFSET, rTmp);
- load2 = NewLIR3(cUnit, opcode, rDestHi, HIWORD_OFFSET, rTmp);
- FreeTemp(cUnit, rTmp);
+ int r_tmp = AllocFreeTemp(cu);
+ res = OpRegRegImm(cu, kOpAdd, r_tmp, rBase, displacement);
+ load = NewLIR3(cu, opcode, r_dest, LOWORD_OFFSET, r_tmp);
+ load2 = NewLIR3(cu, opcode, r_dest_hi, HIWORD_OFFSET, r_tmp);
+ FreeTemp(cu, r_tmp);
} else {
- int rTmp = (rBase == rDest) ? AllocFreeTemp(cUnit) : rDest;
- res = OpRegRegImm(cUnit, kOpAdd, rTmp, rBase, displacement);
- load = NewLIR3(cUnit, opcode, rDest, 0, rTmp);
- if (rTmp != rDest)
- FreeTemp(cUnit, rTmp);
+ int r_tmp = (rBase == r_dest) ? AllocFreeTemp(cu) : r_dest;
+ res = OpRegRegImm(cu, kOpAdd, r_tmp, rBase, displacement);
+ load = NewLIR3(cu, opcode, r_dest, 0, r_tmp);
+ if (r_tmp != r_dest)
+ FreeTemp(cu, r_tmp);
}
}
if (rBase == rMIPS_SP) {
AnnotateDalvikRegAccess(load,
(displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
- true /* isLoad */, pair /* is64bit */);
+ true /* is_load */, pair /* is64bit */);
if (pair) {
AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
- true /* isLoad */, pair /* is64bit */);
+ true /* is_load */, pair /* is64bit */);
}
}
return load;
}
-LIR *LoadBaseDisp(CompilationUnit *cUnit, int rBase,
- int displacement, int rDest, OpSize size, int sReg)
+LIR *LoadBaseDisp(CompilationUnit *cu, int rBase,
+ int displacement, int r_dest, OpSize size, int s_reg)
{
- return LoadBaseDispBody(cUnit, rBase, displacement, rDest, -1,
- size, sReg);
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1,
+ size, s_reg);
}
-LIR *LoadBaseDispWide(CompilationUnit *cUnit, int rBase,
- int displacement, int rDestLo, int rDestHi, int sReg)
+LIR *LoadBaseDispWide(CompilationUnit *cu, int rBase,
+ int displacement, int r_dest_lo, int r_dest_hi, int s_reg)
{
- return LoadBaseDispBody(cUnit, rBase, displacement, rDestLo, rDestHi,
- kLong, sReg);
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi,
+ kLong, s_reg);
}
-LIR *StoreBaseDispBody(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrc, int rSrcHi, OpSize size)
+LIR *StoreBaseDispBody(CompilationUnit *cu, int rBase,
+ int displacement, int r_src, int r_src_hi, OpSize size)
{
LIR *res;
LIR *store = NULL;
LIR *store2 = NULL;
MipsOpCode opcode = kMipsNop;
- bool shortForm = IS_SIMM16(displacement);
+ bool short_form = IS_SIMM16(displacement);
bool pair = false;
switch (size) {
@@ -639,27 +639,27 @@
pair = true;
opcode = kMipsSw;
#ifdef __mips_hard_float
- if (MIPS_FPREG(rSrc)) {
+ if (MIPS_FPREG(r_src)) {
opcode = kMipsFswc1;
- if (MIPS_DOUBLEREG(rSrc)) {
- rSrc = rSrc - MIPS_FP_DOUBLE;
+ if (MIPS_DOUBLEREG(r_src)) {
+ r_src = r_src - MIPS_FP_DOUBLE;
} else {
- DCHECK(MIPS_FPREG(rSrcHi));
- DCHECK_EQ(rSrc, (rSrcHi - 1));
+ DCHECK(MIPS_FPREG(r_src_hi));
+ DCHECK_EQ(r_src, (r_src_hi - 1));
}
- rSrcHi = rSrc + 1;
+ r_src_hi = r_src + 1;
}
#endif
- shortForm = IS_SIMM16_2WORD(displacement);
+ short_form = IS_SIMM16_2WORD(displacement);
DCHECK_EQ((displacement & 0x3), 0);
break;
case kWord:
case kSingle:
opcode = kMipsSw;
#ifdef __mips_hard_float
- if (MIPS_FPREG(rSrc)) {
+ if (MIPS_FPREG(r_src)) {
opcode = kMipsFswc1;
- DCHECK(MIPS_SINGLEREG(rSrc));
+ DCHECK(MIPS_SINGLEREG(r_src));
}
#endif
DCHECK_EQ((displacement & 0x3), 0);
@@ -677,95 +677,95 @@
LOG(FATAL) << "Bad case in StoreBaseIndexedBody";
}
- if (shortForm) {
+ if (short_form) {
if (!pair) {
- store = res = NewLIR3(cUnit, opcode, rSrc, displacement, rBase);
+ store = res = NewLIR3(cu, opcode, r_src, displacement, rBase);
} else {
- store = res = NewLIR3(cUnit, opcode, rSrc, displacement + LOWORD_OFFSET,
+ store = res = NewLIR3(cu, opcode, r_src, displacement + LOWORD_OFFSET,
rBase);
- store2 = NewLIR3(cUnit, opcode, rSrcHi, displacement + HIWORD_OFFSET,
+ store2 = NewLIR3(cu, opcode, r_src_hi, displacement + HIWORD_OFFSET,
rBase);
}
} else {
- int rScratch = AllocTemp(cUnit);
- res = OpRegRegImm(cUnit, kOpAdd, rScratch, rBase, displacement);
+ int r_scratch = AllocTemp(cu);
+ res = OpRegRegImm(cu, kOpAdd, r_scratch, rBase, displacement);
if (!pair) {
- store = NewLIR3(cUnit, opcode, rSrc, 0, rScratch);
+ store = NewLIR3(cu, opcode, r_src, 0, r_scratch);
} else {
- store = NewLIR3(cUnit, opcode, rSrc, LOWORD_OFFSET, rScratch);
- store2 = NewLIR3(cUnit, opcode, rSrcHi, HIWORD_OFFSET, rScratch);
+ store = NewLIR3(cu, opcode, r_src, LOWORD_OFFSET, r_scratch);
+ store2 = NewLIR3(cu, opcode, r_src_hi, HIWORD_OFFSET, r_scratch);
}
- FreeTemp(cUnit, rScratch);
+ FreeTemp(cu, r_scratch);
}
if (rBase == rMIPS_SP) {
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0))
- >> 2, false /* isLoad */, pair /* is64bit */);
+ >> 2, false /* is_load */, pair /* is64bit */);
if (pair) {
AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
- false /* isLoad */, pair /* is64bit */);
+ false /* is_load */, pair /* is64bit */);
}
}
return res;
}
-LIR *StoreBaseDisp(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrc, OpSize size)
+LIR *StoreBaseDisp(CompilationUnit *cu, int rBase,
+ int displacement, int r_src, OpSize size)
{
- return StoreBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
+ return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
}
-LIR *StoreBaseDispWide(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrcLo, int rSrcHi)
+LIR *StoreBaseDispWide(CompilationUnit *cu, int rBase,
+ int displacement, int r_src_lo, int r_src_hi)
{
- return StoreBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
+ return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
}
-void LoadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
+void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
{
- LoadWordDisp(cUnit, base, LOWORD_OFFSET , lowReg);
- LoadWordDisp(cUnit, base, HIWORD_OFFSET , highReg);
+ LoadWordDisp(cu, base, LOWORD_OFFSET , low_reg);
+ LoadWordDisp(cu, base, HIWORD_OFFSET , high_reg);
}
-LIR* OpThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset)
+LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
{
LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
return NULL;
}
-LIR* OpMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp)
+LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
{
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
return NULL;
}
-LIR* StoreBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rSrc, int rSrcHi,
- OpSize size, int sReg)
+LIR* StoreBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_src, int r_src_hi,
+ OpSize size, int s_reg)
{
LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
return NULL;
}
-LIR* OpRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase,
+LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
int offset)
{
LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
return NULL;
}
-LIR* LoadBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rDest, int rDestHi,
- OpSize size, int sReg)
+LIR* LoadBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_dest, int r_dest_hi,
+ OpSize size, int s_reg)
{
LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
return NULL;
}
-LIR* OpCondBranch(CompilationUnit* cUnit, ConditionCode cc, LIR* target)
+LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
{
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
return NULL;
diff --git a/src/compiler/codegen/ralloc_util.cc b/src/compiler/codegen/ralloc_util.cc
index e9a99c1..a26e0cd 100644
--- a/src/compiler/codegen/ralloc_util.cc
+++ b/src/compiler/codegen/ralloc_util.cc
@@ -29,170 +29,170 @@
* not affect the "liveness" of a temp register, which will stay
* live until it is either explicitly killed or reallocated.
*/
-void ResetRegPool(CompilationUnit* cUnit)
+void ResetRegPool(CompilationUnit* cu)
{
int i;
- for (i=0; i < cUnit->regPool->numCoreRegs; i++) {
- if (cUnit->regPool->coreRegs[i].isTemp)
- cUnit->regPool->coreRegs[i].inUse = false;
+ for (i=0; i < cu->reg_pool->num_core_regs; i++) {
+ if (cu->reg_pool->core_regs[i].is_temp)
+ cu->reg_pool->core_regs[i].in_use = false;
}
- for (i=0; i < cUnit->regPool->numFPRegs; i++) {
- if (cUnit->regPool->FPRegs[i].isTemp)
- cUnit->regPool->FPRegs[i].inUse = false;
+ for (i=0; i < cu->reg_pool->num_fp_regs; i++) {
+ if (cu->reg_pool->FPRegs[i].is_temp)
+ cu->reg_pool->FPRegs[i].in_use = false;
}
}
/*
* Set up temp & preserved register pools specialized by target.
- * Note: numRegs may be zero.
+ * Note: num_regs may be zero.
*/
-void CompilerInitPool(RegisterInfo* regs, int* regNums, int num)
+void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num)
{
int i;
for (i=0; i < num; i++) {
- regs[i].reg = regNums[i];
- regs[i].inUse = false;
- regs[i].isTemp = false;
+ regs[i].reg = reg_nums[i];
+ regs[i].in_use = false;
+ regs[i].is_temp = false;
regs[i].pair = false;
regs[i].live = false;
regs[i].dirty = false;
- regs[i].sReg = INVALID_SREG;
+ regs[i].s_reg = INVALID_SREG;
}
}
-static void DumpRegPool(RegisterInfo* p, int numRegs)
+static void DumpRegPool(RegisterInfo* p, int num_regs)
{
LOG(INFO) << "================================================";
- for (int i = 0; i < numRegs; i++) {
+ for (int i = 0; i < num_regs; i++) {
LOG(INFO) << StringPrintf(
"R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
- p[i].reg, p[i].isTemp, p[i].inUse, p[i].pair, p[i].partner,
- p[i].live, p[i].dirty, p[i].sReg, reinterpret_cast<uintptr_t>(p[i].defStart),
- reinterpret_cast<uintptr_t>(p[i].defEnd));
+ p[i].reg, p[i].is_temp, p[i].in_use, p[i].pair, p[i].partner,
+ p[i].live, p[i].dirty, p[i].s_reg, reinterpret_cast<uintptr_t>(p[i].def_start),
+ reinterpret_cast<uintptr_t>(p[i].def_end));
}
LOG(INFO) << "================================================";
}
-void DumpCoreRegPool(CompilationUnit* cUnit)
+void DumpCoreRegPool(CompilationUnit* cu)
{
- DumpRegPool(cUnit->regPool->coreRegs, cUnit->regPool->numCoreRegs);
+ DumpRegPool(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs);
}
-void DumpFpRegPool(CompilationUnit* cUnit)
+void DumpFpRegPool(CompilationUnit* cu)
{
- DumpRegPool(cUnit->regPool->FPRegs, cUnit->regPool->numFPRegs);
+ DumpRegPool(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs);
}
/* Mark a temp register as dead. Does not affect allocation state. */
-static void ClobberBody(CompilationUnit *cUnit, RegisterInfo* p)
+static void ClobberBody(CompilationUnit *cu, RegisterInfo* p)
{
- if (p->isTemp) {
+ if (p->is_temp) {
DCHECK(!(p->live && p->dirty)) << "Live & dirty temp in clobber";
p->live = false;
- p->sReg = INVALID_SREG;
- p->defStart = NULL;
- p->defEnd = NULL;
+ p->s_reg = INVALID_SREG;
+ p->def_start = NULL;
+ p->def_end = NULL;
if (p->pair) {
p->pair = false;
- Clobber(cUnit, p->partner);
+ Clobber(cu, p->partner);
}
}
}
/* Mark a temp register as dead. Does not affect allocation state. */
-void Clobber(CompilationUnit* cUnit, int reg)
+void Clobber(CompilationUnit* cu, int reg)
{
- ClobberBody(cUnit, GetRegInfo(cUnit, reg));
+ ClobberBody(cu, GetRegInfo(cu, reg));
}
-static void ClobberSRegBody(RegisterInfo* p, int numRegs, int sReg)
+static void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg)
{
int i;
- for (i=0; i< numRegs; i++) {
- if (p[i].sReg == sReg) {
- if (p[i].isTemp) {
+ for (i=0; i< num_regs; i++) {
+ if (p[i].s_reg == s_reg) {
+ if (p[i].is_temp) {
p[i].live = false;
}
- p[i].defStart = NULL;
- p[i].defEnd = NULL;
+ p[i].def_start = NULL;
+ p[i].def_end = NULL;
}
}
}
-/* Clobber any temp associated with an sReg. Could be in either class */
-void ClobberSReg(CompilationUnit* cUnit, int sReg)
+/* Clobber any temp associated with an s_reg. Could be in either class */
+void ClobberSReg(CompilationUnit* cu, int s_reg)
{
#ifndef NDEBUG
/* Reset live temp tracking sanity checker */
- if (sReg == cUnit->liveSReg) {
- cUnit->liveSReg = INVALID_SREG;
+ if (s_reg == cu->live_sreg) {
+ cu->live_sreg = INVALID_SREG;
}
#endif
- ClobberSRegBody(cUnit->regPool->coreRegs, cUnit->regPool->numCoreRegs, sReg);
- ClobberSRegBody(cUnit->regPool->FPRegs, cUnit->regPool->numFPRegs, sReg);
+ ClobberSRegBody(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs, s_reg);
+ ClobberSRegBody(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs, s_reg);
}
/*
* SSA names associated with the initial definitions of Dalvik
* registers are the same as the Dalvik register number (and
- * thus take the same position in the promotionMap. However,
+ * thus take the same position in the promotion_map. However,
* the special Method* and compiler temp resisters use negative
- * vReg numbers to distinguish them and can have an arbitrary
+ * v_reg numbers to distinguish them and can have an arbitrary
* ssa name (above the last original Dalvik register). This function
- * maps SSA names to positions in the promotionMap array.
+ * maps SSA names to positions in the promotion_map array.
*/
-static int SRegToPMap(CompilationUnit* cUnit, int sReg)
+static int SRegToPMap(CompilationUnit* cu, int s_reg)
{
- DCHECK_LT(sReg, cUnit->numSSARegs);
- DCHECK_GE(sReg, 0);
- int vReg = SRegToVReg(cUnit, sReg);
- if (vReg >= 0) {
- DCHECK_LT(vReg, cUnit->numDalvikRegisters);
- return vReg;
+ DCHECK_LT(s_reg, cu->num_ssa_regs);
+ DCHECK_GE(s_reg, 0);
+ int v_reg = SRegToVReg(cu, s_reg);
+ if (v_reg >= 0) {
+ DCHECK_LT(v_reg, cu->num_dalvik_registers);
+ return v_reg;
} else {
- int pos = std::abs(vReg) - std::abs(SSA_METHOD_BASEREG);
- DCHECK_LE(pos, cUnit->numCompilerTemps);
- return cUnit->numDalvikRegisters + pos;
+ int pos = std::abs(v_reg) - std::abs(SSA_METHOD_BASEREG);
+ DCHECK_LE(pos, cu->num_compiler_temps);
+ return cu->num_dalvik_registers + pos;
}
}
-void RecordCorePromotion(CompilationUnit* cUnit, int reg, int sReg)
+void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg)
{
- int pMapIdx = SRegToPMap(cUnit, sReg);
- int vReg = SRegToVReg(cUnit, sReg);
- GetRegInfo(cUnit, reg)->inUse = true;
- cUnit->coreSpillMask |= (1 << reg);
+ int p_map_idx = SRegToPMap(cu, s_reg);
+ int v_reg = SRegToVReg(cu, s_reg);
+ GetRegInfo(cu, reg)->in_use = true;
+ cu->core_spill_mask |= (1 << reg);
// Include reg for later sort
- cUnit->coreVmapTable.push_back(reg << VREG_NUM_WIDTH |
- (vReg & ((1 << VREG_NUM_WIDTH) - 1)));
- cUnit->numCoreSpills++;
- cUnit->promotionMap[pMapIdx].coreLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx].coreReg = reg;
+ cu->core_vmap_table.push_back(reg << VREG_NUM_WIDTH |
+ (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
+ cu->num_core_spills++;
+ cu->promotion_map[p_map_idx].core_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx].core_reg = reg;
}
/* Reserve a callee-save register. Return -1 if none available */
-static int AllocPreservedCoreReg(CompilationUnit* cUnit, int sReg)
+static int AllocPreservedCoreReg(CompilationUnit* cu, int s_reg)
{
int res = -1;
- RegisterInfo* coreRegs = cUnit->regPool->coreRegs;
- for (int i = 0; i < cUnit->regPool->numCoreRegs; i++) {
- if (!coreRegs[i].isTemp && !coreRegs[i].inUse) {
- res = coreRegs[i].reg;
- RecordCorePromotion(cUnit, res, sReg);
+ RegisterInfo* core_regs = cu->reg_pool->core_regs;
+ for (int i = 0; i < cu->reg_pool->num_core_regs; i++) {
+ if (!core_regs[i].is_temp && !core_regs[i].in_use) {
+ res = core_regs[i].reg;
+ RecordCorePromotion(cu, res, s_reg);
break;
}
}
return res;
}
-void RecordFpPromotion(CompilationUnit* cUnit, int reg, int sReg)
+void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg)
{
- int pMapIdx = SRegToPMap(cUnit, sReg);
- int vReg = SRegToVReg(cUnit, sReg);
- GetRegInfo(cUnit, reg)->inUse = true;
- MarkPreservedSingle(cUnit, vReg, reg);
- cUnit->promotionMap[pMapIdx].fpLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx].FpReg = reg;
+ int p_map_idx = SRegToPMap(cu, s_reg);
+ int v_reg = SRegToVReg(cu, s_reg);
+ GetRegInfo(cu, reg)->in_use = true;
+ MarkPreservedSingle(cu, v_reg, reg);
+ cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx].FpReg = reg;
}
/*
@@ -200,15 +200,15 @@
* even/odd allocation, but go ahead and allocate anything if not
* available. If nothing's available, return -1.
*/
-static int AllocPreservedSingle(CompilationUnit* cUnit, int sReg, bool even)
+static int AllocPreservedSingle(CompilationUnit* cu, int s_reg, bool even)
{
int res = -1;
- RegisterInfo* FPRegs = cUnit->regPool->FPRegs;
- for (int i = 0; i < cUnit->regPool->numFPRegs; i++) {
- if (!FPRegs[i].isTemp && !FPRegs[i].inUse &&
+ RegisterInfo* FPRegs = cu->reg_pool->FPRegs;
+ for (int i = 0; i < cu->reg_pool->num_fp_regs; i++) {
+ if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
((FPRegs[i].reg & 0x1) == 0) == even) {
res = FPRegs[i].reg;
- RecordFpPromotion(cUnit, res, sReg);
+ RecordFpPromotion(cu, res, s_reg);
break;
}
}
@@ -218,57 +218,57 @@
/*
* Somewhat messy code here. We want to allocate a pair of contiguous
* physical single-precision floating point registers starting with
- * an even numbered reg. It is possible that the paired sReg (sReg+1)
+ * an even numbered reg. It is possible that the paired s_reg (s_reg+1)
* has already been allocated - try to fit if possible. Fail to
* allocate if we can't meet the requirements for the pair of
- * sReg<=sX[even] & (sReg+1)<= sX+1.
+ * s_reg<=sX[even] & (s_reg+1)<= sX+1.
*/
-static int AllocPreservedDouble(CompilationUnit* cUnit, int sReg)
+static int AllocPreservedDouble(CompilationUnit* cu, int s_reg)
{
int res = -1; // Assume failure
- int vReg = SRegToVReg(cUnit, sReg);
- int pMapIdx = SRegToPMap(cUnit, sReg);
- if (cUnit->promotionMap[pMapIdx+1].fpLocation == kLocPhysReg) {
+ int v_reg = SRegToVReg(cu, s_reg);
+ int p_map_idx = SRegToPMap(cu, s_reg);
+ if (cu->promotion_map[p_map_idx+1].fp_location == kLocPhysReg) {
// Upper reg is already allocated. Can we fit?
- int highReg = cUnit->promotionMap[pMapIdx+1].FpReg;
- if ((highReg & 1) == 0) {
+ int high_reg = cu->promotion_map[p_map_idx+1].FpReg;
+ if ((high_reg & 1) == 0) {
// High reg is even - fail.
return res;
}
// Is the low reg of the pair free?
- RegisterInfo* p = GetRegInfo(cUnit, highReg-1);
- if (p->inUse || p->isTemp) {
+ RegisterInfo* p = GetRegInfo(cu, high_reg-1);
+ if (p->in_use || p->is_temp) {
// Already allocated or not preserved - fail.
return res;
}
// OK - good to go.
res = p->reg;
- p->inUse = true;
+ p->in_use = true;
DCHECK_EQ((res & 1), 0);
- MarkPreservedSingle(cUnit, vReg, res);
+ MarkPreservedSingle(cu, v_reg, res);
} else {
- RegisterInfo* FPRegs = cUnit->regPool->FPRegs;
- for (int i = 0; i < cUnit->regPool->numFPRegs; i++) {
- if (!FPRegs[i].isTemp && !FPRegs[i].inUse &&
+ RegisterInfo* FPRegs = cu->reg_pool->FPRegs;
+ for (int i = 0; i < cu->reg_pool->num_fp_regs; i++) {
+ if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
((FPRegs[i].reg & 0x1) == 0x0) &&
- !FPRegs[i+1].isTemp && !FPRegs[i+1].inUse &&
+ !FPRegs[i+1].is_temp && !FPRegs[i+1].in_use &&
((FPRegs[i+1].reg & 0x1) == 0x1) &&
(FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
res = FPRegs[i].reg;
- FPRegs[i].inUse = true;
- MarkPreservedSingle(cUnit, vReg, res);
- FPRegs[i+1].inUse = true;
+ FPRegs[i].in_use = true;
+ MarkPreservedSingle(cu, v_reg, res);
+ FPRegs[i+1].in_use = true;
DCHECK_EQ(res + 1, FPRegs[i+1].reg);
- MarkPreservedSingle(cUnit, vReg+1, res+1);
+ MarkPreservedSingle(cu, v_reg+1, res+1);
break;
}
}
}
if (res != -1) {
- cUnit->promotionMap[pMapIdx].fpLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx].FpReg = res;
- cUnit->promotionMap[pMapIdx+1].fpLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx+1].FpReg = res + 1;
+ cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx].FpReg = res;
+ cu->promotion_map[p_map_idx+1].fp_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx+1].FpReg = res + 1;
}
return res;
}
@@ -280,104 +280,104 @@
* single regs (but if can't still attempt to allocate a single, preferring
* first to allocate an odd register.
*/
-static int AllocPreservedFPReg(CompilationUnit* cUnit, int sReg, bool doubleStart)
+static int AllocPreservedFPReg(CompilationUnit* cu, int s_reg, bool double_start)
{
int res = -1;
- if (doubleStart) {
- res = AllocPreservedDouble(cUnit, sReg);
+ if (double_start) {
+ res = AllocPreservedDouble(cu, s_reg);
}
if (res == -1) {
- res = AllocPreservedSingle(cUnit, sReg, false /* try odd # */);
+ res = AllocPreservedSingle(cu, s_reg, false /* try odd # */);
}
if (res == -1)
- res = AllocPreservedSingle(cUnit, sReg, true /* try even # */);
+ res = AllocPreservedSingle(cu, s_reg, true /* try even # */);
return res;
}
-static int AllocTempBody(CompilationUnit* cUnit, RegisterInfo* p, int numRegs, int* nextTemp,
+static int AllocTempBody(CompilationUnit* cu, RegisterInfo* p, int num_regs, int* next_temp,
bool required)
{
int i;
- int next = *nextTemp;
- for (i=0; i< numRegs; i++) {
- if (next >= numRegs)
+ int next = *next_temp;
+ for (i=0; i< num_regs; i++) {
+ if (next >= num_regs)
next = 0;
- if (p[next].isTemp && !p[next].inUse && !p[next].live) {
- Clobber(cUnit, p[next].reg);
- p[next].inUse = true;
+ if (p[next].is_temp && !p[next].in_use && !p[next].live) {
+ Clobber(cu, p[next].reg);
+ p[next].in_use = true;
p[next].pair = false;
- *nextTemp = next + 1;
+ *next_temp = next + 1;
return p[next].reg;
}
next++;
}
- next = *nextTemp;
- for (i=0; i< numRegs; i++) {
- if (next >= numRegs)
+ next = *next_temp;
+ for (i=0; i< num_regs; i++) {
+ if (next >= num_regs)
next = 0;
- if (p[next].isTemp && !p[next].inUse) {
- Clobber(cUnit, p[next].reg);
- p[next].inUse = true;
+ if (p[next].is_temp && !p[next].in_use) {
+ Clobber(cu, p[next].reg);
+ p[next].in_use = true;
p[next].pair = false;
- *nextTemp = next + 1;
+ *next_temp = next + 1;
return p[next].reg;
}
next++;
}
if (required) {
- CodegenDump(cUnit);
- DumpRegPool(cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs);
+ CodegenDump(cu);
+ DumpRegPool(cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs);
LOG(FATAL) << "No free temp registers";
}
return -1; // No register available
}
//REDO: too many assumptions.
-int AllocTempDouble(CompilationUnit* cUnit)
+int AllocTempDouble(CompilationUnit* cu)
{
- RegisterInfo* p = cUnit->regPool->FPRegs;
- int numRegs = cUnit->regPool->numFPRegs;
+ RegisterInfo* p = cu->reg_pool->FPRegs;
+ int num_regs = cu->reg_pool->num_fp_regs;
/* Start looking at an even reg */
- int next = cUnit->regPool->nextFPReg & ~0x1;
+ int next = cu->reg_pool->next_fp_reg & ~0x1;
// First try to avoid allocating live registers
- for (int i=0; i < numRegs; i+=2) {
- if (next >= numRegs)
+ for (int i=0; i < num_regs; i+=2) {
+ if (next >= num_regs)
next = 0;
- if ((p[next].isTemp && !p[next].inUse && !p[next].live) &&
- (p[next+1].isTemp && !p[next+1].inUse && !p[next+1].live)) {
- Clobber(cUnit, p[next].reg);
- Clobber(cUnit, p[next+1].reg);
- p[next].inUse = true;
- p[next+1].inUse = true;
+ if ((p[next].is_temp && !p[next].in_use && !p[next].live) &&
+ (p[next+1].is_temp && !p[next+1].in_use && !p[next+1].live)) {
+ Clobber(cu, p[next].reg);
+ Clobber(cu, p[next+1].reg);
+ p[next].in_use = true;
+ p[next+1].in_use = true;
DCHECK_EQ((p[next].reg+1), p[next+1].reg);
DCHECK_EQ((p[next].reg & 0x1), 0);
- cUnit->regPool->nextFPReg = next + 2;
- if (cUnit->regPool->nextFPReg >= numRegs) {
- cUnit->regPool->nextFPReg = 0;
+ cu->reg_pool->next_fp_reg = next + 2;
+ if (cu->reg_pool->next_fp_reg >= num_regs) {
+ cu->reg_pool->next_fp_reg = 0;
}
return p[next].reg;
}
next += 2;
}
- next = cUnit->regPool->nextFPReg & ~0x1;
+ next = cu->reg_pool->next_fp_reg & ~0x1;
// No choice - find a pair and kill it.
- for (int i=0; i < numRegs; i+=2) {
- if (next >= numRegs)
+ for (int i=0; i < num_regs; i+=2) {
+ if (next >= num_regs)
next = 0;
- if (p[next].isTemp && !p[next].inUse && p[next+1].isTemp &&
- !p[next+1].inUse) {
- Clobber(cUnit, p[next].reg);
- Clobber(cUnit, p[next+1].reg);
- p[next].inUse = true;
- p[next+1].inUse = true;
+ if (p[next].is_temp && !p[next].in_use && p[next+1].is_temp &&
+ !p[next+1].in_use) {
+ Clobber(cu, p[next].reg);
+ Clobber(cu, p[next+1].reg);
+ p[next].in_use = true;
+ p[next+1].in_use = true;
DCHECK_EQ((p[next].reg+1), p[next+1].reg);
DCHECK_EQ((p[next].reg & 0x1), 0);
- cUnit->regPool->nextFPReg = next + 2;
- if (cUnit->regPool->nextFPReg >= numRegs) {
- cUnit->regPool->nextFPReg = 0;
+ cu->reg_pool->next_fp_reg = next + 2;
+ if (cu->reg_pool->next_fp_reg >= num_regs) {
+ cu->reg_pool->next_fp_reg = 0;
}
return p[next].reg;
}
@@ -388,59 +388,59 @@
}
/* Return a temp if one is available, -1 otherwise */
-int AllocFreeTemp(CompilationUnit* cUnit)
+int AllocFreeTemp(CompilationUnit* cu)
{
- return AllocTempBody(cUnit, cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs,
- &cUnit->regPool->nextCoreReg, true);
+ return AllocTempBody(cu, cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs,
+ &cu->reg_pool->next_core_reg, true);
}
-int AllocTemp(CompilationUnit* cUnit)
+int AllocTemp(CompilationUnit* cu)
{
- return AllocTempBody(cUnit, cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs,
- &cUnit->regPool->nextCoreReg, true);
+ return AllocTempBody(cu, cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs,
+ &cu->reg_pool->next_core_reg, true);
}
-int AllocTempFloat(CompilationUnit* cUnit)
+int AllocTempFloat(CompilationUnit* cu)
{
- return AllocTempBody(cUnit, cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs,
- &cUnit->regPool->nextFPReg, true);
+ return AllocTempBody(cu, cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs,
+ &cu->reg_pool->next_fp_reg, true);
}
-static RegisterInfo* AllocLiveBody(RegisterInfo* p, int numRegs, int sReg)
+static RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg)
{
int i;
- if (sReg == -1)
+ if (s_reg == -1)
return NULL;
- for (i=0; i < numRegs; i++) {
- if (p[i].live && (p[i].sReg == sReg)) {
- if (p[i].isTemp)
- p[i].inUse = true;
+ for (i=0; i < num_regs; i++) {
+ if (p[i].live && (p[i].s_reg == s_reg)) {
+ if (p[i].is_temp)
+ p[i].in_use = true;
return &p[i];
}
}
return NULL;
}
-RegisterInfo* AllocLive(CompilationUnit* cUnit, int sReg, int regClass)
+RegisterInfo* AllocLive(CompilationUnit* cu, int s_reg, int reg_class)
{
RegisterInfo* res = NULL;
- switch (regClass) {
+ switch (reg_class) {
case kAnyReg:
- res = AllocLiveBody(cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs, sReg);
+ res = AllocLiveBody(cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs, s_reg);
if (res)
break;
/* Intentional fallthrough */
case kCoreReg:
- res = AllocLiveBody(cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs, sReg);
+ res = AllocLiveBody(cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs, s_reg);
break;
case kFPReg:
- res = AllocLiveBody(cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs, sReg);
+ res = AllocLiveBody(cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs, s_reg);
break;
default:
LOG(FATAL) << "Invalid register type";
@@ -448,26 +448,26 @@
return res;
}
-void FreeTemp(CompilationUnit* cUnit, int reg)
+void FreeTemp(CompilationUnit* cu, int reg)
{
- RegisterInfo* p = cUnit->regPool->coreRegs;
- int numRegs = cUnit->regPool->numCoreRegs;
+ RegisterInfo* p = cu->reg_pool->core_regs;
+ int num_regs = cu->reg_pool->num_core_regs;
int i;
- for (i=0; i< numRegs; i++) {
+ for (i=0; i< num_regs; i++) {
if (p[i].reg == reg) {
- if (p[i].isTemp) {
- p[i].inUse = false;
+ if (p[i].is_temp) {
+ p[i].in_use = false;
}
p[i].pair = false;
return;
}
}
- p = cUnit->regPool->FPRegs;
- numRegs = cUnit->regPool->numFPRegs;
- for (i=0; i< numRegs; i++) {
+ p = cu->reg_pool->FPRegs;
+ num_regs = cu->reg_pool->num_fp_regs;
+ for (i=0; i< num_regs; i++) {
if (p[i].reg == reg) {
- if (p[i].isTemp) {
- p[i].inUse = false;
+ if (p[i].is_temp) {
+ p[i].in_use = false;
}
p[i].pair = false;
return;
@@ -476,19 +476,19 @@
LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
}
-RegisterInfo* IsLive(CompilationUnit* cUnit, int reg)
+RegisterInfo* IsLive(CompilationUnit* cu, int reg)
{
- RegisterInfo* p = cUnit->regPool->coreRegs;
- int numRegs = cUnit->regPool->numCoreRegs;
+ RegisterInfo* p = cu->reg_pool->core_regs;
+ int num_regs = cu->reg_pool->num_core_regs;
int i;
- for (i=0; i< numRegs; i++) {
+ for (i=0; i< num_regs; i++) {
if (p[i].reg == reg) {
return p[i].live ? &p[i] : NULL;
}
}
- p = cUnit->regPool->FPRegs;
- numRegs = cUnit->regPool->numFPRegs;
- for (i=0; i< numRegs; i++) {
+ p = cu->reg_pool->FPRegs;
+ num_regs = cu->reg_pool->num_fp_regs;
+ for (i=0; i< num_regs; i++) {
if (p[i].reg == reg) {
return p[i].live ? &p[i] : NULL;
}
@@ -496,21 +496,21 @@
return NULL;
}
-RegisterInfo* IsTemp(CompilationUnit* cUnit, int reg)
+RegisterInfo* IsTemp(CompilationUnit* cu, int reg)
{
- RegisterInfo* p = GetRegInfo(cUnit, reg);
- return (p->isTemp) ? p : NULL;
+ RegisterInfo* p = GetRegInfo(cu, reg);
+ return (p->is_temp) ? p : NULL;
}
-RegisterInfo* IsPromoted(CompilationUnit* cUnit, int reg)
+RegisterInfo* IsPromoted(CompilationUnit* cu, int reg)
{
- RegisterInfo* p = GetRegInfo(cUnit, reg);
- return (p->isTemp) ? NULL : p;
+ RegisterInfo* p = GetRegInfo(cu, reg);
+ return (p->is_temp) ? NULL : p;
}
-bool IsDirty(CompilationUnit* cUnit, int reg)
+bool IsDirty(CompilationUnit* cu, int reg)
{
- RegisterInfo* p = GetRegInfo(cUnit, reg);
+ RegisterInfo* p = GetRegInfo(cu, reg);
return p->dirty;
}
@@ -519,25 +519,25 @@
* register. No check is made to see if the register was previously
* allocated. Use with caution.
*/
-void LockTemp(CompilationUnit* cUnit, int reg)
+void LockTemp(CompilationUnit* cu, int reg)
{
- RegisterInfo* p = cUnit->regPool->coreRegs;
- int numRegs = cUnit->regPool->numCoreRegs;
+ RegisterInfo* p = cu->reg_pool->core_regs;
+ int num_regs = cu->reg_pool->num_core_regs;
int i;
- for (i=0; i< numRegs; i++) {
+ for (i=0; i< num_regs; i++) {
if (p[i].reg == reg) {
- DCHECK(p[i].isTemp);
- p[i].inUse = true;
+ DCHECK(p[i].is_temp);
+ p[i].in_use = true;
p[i].live = false;
return;
}
}
- p = cUnit->regPool->FPRegs;
- numRegs = cUnit->regPool->numFPRegs;
- for (i=0; i< numRegs; i++) {
+ p = cu->reg_pool->FPRegs;
+ num_regs = cu->reg_pool->num_fp_regs;
+ for (i=0; i< num_regs; i++) {
if (p[i].reg == reg) {
- DCHECK(p[i].isTemp);
- p[i].inUse = true;
+ DCHECK(p[i].is_temp);
+ p[i].in_use = true;
p[i].live = false;
return;
}
@@ -547,20 +547,20 @@
static void ResetDefBody(RegisterInfo* p)
{
- p->defStart = NULL;
- p->defEnd = NULL;
+ p->def_start = NULL;
+ p->def_end = NULL;
}
-void ResetDef(CompilationUnit* cUnit, int reg)
+void ResetDef(CompilationUnit* cu, int reg)
{
- ResetDefBody(GetRegInfo(cUnit, reg));
+ ResetDefBody(GetRegInfo(cu, reg));
}
-static void NullifyRange(CompilationUnit* cUnit, LIR *start, LIR *finish, int sReg1, int sReg2)
+static void NullifyRange(CompilationUnit* cu, LIR *start, LIR *finish, int s_reg1, int s_reg2)
{
if (start && finish) {
LIR *p;
- DCHECK_EQ(sReg1, sReg2);
+ DCHECK_EQ(s_reg1, s_reg2);
for (p = start; ;p = p->next) {
NopLIR(p);
if (p == finish)
@@ -574,15 +574,15 @@
* on entry start points to the LIR prior to the beginning of the
* sequence.
*/
-void MarkDef(CompilationUnit* cUnit, RegLocation rl,
+void MarkDef(CompilationUnit* cu, RegLocation rl,
LIR *start, LIR *finish)
{
DCHECK(!rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(cUnit, rl.lowReg);
- p->defStart = start->next;
- p->defEnd = finish;
+ RegisterInfo* p = GetRegInfo(cu, rl.low_reg);
+ p->def_start = start->next;
+ p->def_end = finish;
}
/*
@@ -590,228 +590,228 @@
* on entry start points to the LIR prior to the beginning of the
* sequence.
*/
-void MarkDefWide(CompilationUnit* cUnit, RegLocation rl,
+void MarkDefWide(CompilationUnit* cu, RegLocation rl,
LIR *start, LIR *finish)
{
DCHECK(rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(cUnit, rl.lowReg);
- ResetDef(cUnit, rl.highReg); // Only track low of pair
- p->defStart = start->next;
- p->defEnd = finish;
+ RegisterInfo* p = GetRegInfo(cu, rl.low_reg);
+ ResetDef(cu, rl.high_reg); // Only track low of pair
+ p->def_start = start->next;
+ p->def_end = finish;
}
-RegLocation WideToNarrow(CompilationUnit* cUnit, RegLocation rl)
+RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl)
{
DCHECK(rl.wide);
if (rl.location == kLocPhysReg) {
- RegisterInfo* infoLo = GetRegInfo(cUnit, rl.lowReg);
- RegisterInfo* infoHi = GetRegInfo(cUnit, rl.highReg);
- if (infoLo->isTemp) {
- infoLo->pair = false;
- infoLo->defStart = NULL;
- infoLo->defEnd = NULL;
+ RegisterInfo* info_lo = GetRegInfo(cu, rl.low_reg);
+ RegisterInfo* info_hi = GetRegInfo(cu, rl.high_reg);
+ if (info_lo->is_temp) {
+ info_lo->pair = false;
+ info_lo->def_start = NULL;
+ info_lo->def_end = NULL;
}
- if (infoHi->isTemp) {
- infoHi->pair = false;
- infoHi->defStart = NULL;
- infoHi->defEnd = NULL;
+ if (info_hi->is_temp) {
+ info_hi->pair = false;
+ info_hi->def_start = NULL;
+ info_hi->def_end = NULL;
}
}
rl.wide = false;
return rl;
}
-void ResetDefLoc(CompilationUnit* cUnit, RegLocation rl)
+void ResetDefLoc(CompilationUnit* cu, RegLocation rl)
{
DCHECK(!rl.wide);
- RegisterInfo* p = IsTemp(cUnit, rl.lowReg);
- if (p && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
+ RegisterInfo* p = IsTemp(cu, rl.low_reg);
+ if (p && !(cu->disable_opt & (1 << kSuppressLoads))) {
DCHECK(!p->pair);
- NullifyRange(cUnit, p->defStart, p->defEnd, p->sReg, rl.sRegLow);
+ NullifyRange(cu, p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
}
- ResetDef(cUnit, rl.lowReg);
+ ResetDef(cu, rl.low_reg);
}
-void ResetDefLocWide(CompilationUnit* cUnit, RegLocation rl)
+void ResetDefLocWide(CompilationUnit* cu, RegLocation rl)
{
DCHECK(rl.wide);
- RegisterInfo* pLow = IsTemp(cUnit, rl.lowReg);
- RegisterInfo* pHigh = IsTemp(cUnit, rl.highReg);
- if (pLow && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
- DCHECK(pLow->pair);
- NullifyRange(cUnit, pLow->defStart, pLow->defEnd, pLow->sReg, rl.sRegLow);
+ RegisterInfo* p_low = IsTemp(cu, rl.low_reg);
+ RegisterInfo* p_high = IsTemp(cu, rl.high_reg);
+ if (p_low && !(cu->disable_opt & (1 << kSuppressLoads))) {
+ DCHECK(p_low->pair);
+ NullifyRange(cu, p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
}
- if (pHigh && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
- DCHECK(pHigh->pair);
+ if (p_high && !(cu->disable_opt & (1 << kSuppressLoads))) {
+ DCHECK(p_high->pair);
}
- ResetDef(cUnit, rl.lowReg);
- ResetDef(cUnit, rl.highReg);
+ ResetDef(cu, rl.low_reg);
+ ResetDef(cu, rl.high_reg);
}
-void ResetDefTracking(CompilationUnit* cUnit)
+void ResetDefTracking(CompilationUnit* cu)
{
int i;
- for (i=0; i< cUnit->regPool->numCoreRegs; i++) {
- ResetDefBody(&cUnit->regPool->coreRegs[i]);
+ for (i=0; i< cu->reg_pool->num_core_regs; i++) {
+ ResetDefBody(&cu->reg_pool->core_regs[i]);
}
- for (i=0; i< cUnit->regPool->numFPRegs; i++) {
- ResetDefBody(&cUnit->regPool->FPRegs[i]);
+ for (i=0; i< cu->reg_pool->num_fp_regs; i++) {
+ ResetDefBody(&cu->reg_pool->FPRegs[i]);
}
}
-void ClobberAllRegs(CompilationUnit* cUnit)
+void ClobberAllRegs(CompilationUnit* cu)
{
int i;
- for (i=0; i< cUnit->regPool->numCoreRegs; i++) {
- ClobberBody(cUnit, &cUnit->regPool->coreRegs[i]);
+ for (i=0; i< cu->reg_pool->num_core_regs; i++) {
+ ClobberBody(cu, &cu->reg_pool->core_regs[i]);
}
- for (i=0; i< cUnit->regPool->numFPRegs; i++) {
- ClobberBody(cUnit, &cUnit->regPool->FPRegs[i]);
+ for (i=0; i< cu->reg_pool->num_fp_regs; i++) {
+ ClobberBody(cu, &cu->reg_pool->FPRegs[i]);
}
}
// Make sure nothing is live and dirty
-static void FlushAllRegsBody(CompilationUnit* cUnit, RegisterInfo* info, int numRegs)
+static void FlushAllRegsBody(CompilationUnit* cu, RegisterInfo* info, int num_regs)
{
int i;
- for (i=0; i < numRegs; i++) {
+ for (i=0; i < num_regs; i++) {
if (info[i].live && info[i].dirty) {
if (info[i].pair) {
- FlushRegWide(cUnit, info[i].reg, info[i].partner);
+ FlushRegWide(cu, info[i].reg, info[i].partner);
} else {
- FlushReg(cUnit, info[i].reg);
+ FlushReg(cu, info[i].reg);
}
}
}
}
-void FlushAllRegs(CompilationUnit* cUnit)
+void FlushAllRegs(CompilationUnit* cu)
{
- FlushAllRegsBody(cUnit, cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs);
- FlushAllRegsBody(cUnit, cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs);
- ClobberAllRegs(cUnit);
+ FlushAllRegsBody(cu, cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs);
+ FlushAllRegsBody(cu, cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs);
+ ClobberAllRegs(cu);
}
//TUNING: rewrite all of this reg stuff. Probably use an attribute table
-static bool RegClassMatches(int regClass, int reg)
+static bool RegClassMatches(int reg_class, int reg)
{
- if (regClass == kAnyReg) {
+ if (reg_class == kAnyReg) {
return true;
- } else if (regClass == kCoreReg) {
+ } else if (reg_class == kCoreReg) {
return !IsFpReg(reg);
} else {
return IsFpReg(reg);
}
}
-void MarkLive(CompilationUnit* cUnit, int reg, int sReg)
+void MarkLive(CompilationUnit* cu, int reg, int s_reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
- if ((info->reg == reg) && (info->sReg == sReg) && info->live) {
+ RegisterInfo* info = GetRegInfo(cu, reg);
+ if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) {
return; /* already live */
- } else if (sReg != INVALID_SREG) {
- ClobberSReg(cUnit, sReg);
- if (info->isTemp) {
+ } else if (s_reg != INVALID_SREG) {
+ ClobberSReg(cu, s_reg);
+ if (info->is_temp) {
info->live = true;
}
} else {
- /* Can't be live if no associated sReg */
- DCHECK(info->isTemp);
+ /* Can't be live if no associated s_reg */
+ DCHECK(info->is_temp);
info->live = false;
}
- info->sReg = sReg;
+ info->s_reg = s_reg;
}
-void MarkTemp(CompilationUnit* cUnit, int reg)
+void MarkTemp(CompilationUnit* cu, int reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
- info->isTemp = true;
+ RegisterInfo* info = GetRegInfo(cu, reg);
+ info->is_temp = true;
}
-void UnmarkTemp(CompilationUnit* cUnit, int reg)
+void UnmarkTemp(CompilationUnit* cu, int reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
- info->isTemp = false;
+ RegisterInfo* info = GetRegInfo(cu, reg);
+ info->is_temp = false;
}
-void MarkPair(CompilationUnit* cUnit, int lowReg, int highReg)
+void MarkPair(CompilationUnit* cu, int low_reg, int high_reg)
{
- RegisterInfo* infoLo = GetRegInfo(cUnit, lowReg);
- RegisterInfo* infoHi = GetRegInfo(cUnit, highReg);
- infoLo->pair = infoHi->pair = true;
- infoLo->partner = highReg;
- infoHi->partner = lowReg;
+ RegisterInfo* info_lo = GetRegInfo(cu, low_reg);
+ RegisterInfo* info_hi = GetRegInfo(cu, high_reg);
+ info_lo->pair = info_hi->pair = true;
+ info_lo->partner = high_reg;
+ info_hi->partner = low_reg;
}
-void MarkClean(CompilationUnit* cUnit, RegLocation loc)
+void MarkClean(CompilationUnit* cu, RegLocation loc)
{
- RegisterInfo* info = GetRegInfo(cUnit, loc.lowReg);
+ RegisterInfo* info = GetRegInfo(cu, loc.low_reg);
info->dirty = false;
if (loc.wide) {
- info = GetRegInfo(cUnit, loc.highReg);
+ info = GetRegInfo(cu, loc.high_reg);
info->dirty = false;
}
}
-void MarkDirty(CompilationUnit* cUnit, RegLocation loc)
+void MarkDirty(CompilationUnit* cu, RegLocation loc)
{
if (loc.home) {
// If already home, can't be dirty
return;
}
- RegisterInfo* info = GetRegInfo(cUnit, loc.lowReg);
+ RegisterInfo* info = GetRegInfo(cu, loc.low_reg);
info->dirty = true;
if (loc.wide) {
- info = GetRegInfo(cUnit, loc.highReg);
+ info = GetRegInfo(cu, loc.high_reg);
info->dirty = true;
}
}
-void MarkInUse(CompilationUnit* cUnit, int reg)
+void MarkInUse(CompilationUnit* cu, int reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
- info->inUse = true;
+ RegisterInfo* info = GetRegInfo(cu, reg);
+ info->in_use = true;
}
-static void CopyRegInfo(CompilationUnit* cUnit, int newReg, int oldReg)
+static void CopyRegInfo(CompilationUnit* cu, int new_reg, int old_reg)
{
- RegisterInfo* newInfo = GetRegInfo(cUnit, newReg);
- RegisterInfo* oldInfo = GetRegInfo(cUnit, oldReg);
+ RegisterInfo* new_info = GetRegInfo(cu, new_reg);
+ RegisterInfo* old_info = GetRegInfo(cu, old_reg);
// Target temp status must not change
- bool isTemp = newInfo->isTemp;
- *newInfo = *oldInfo;
+ bool is_temp = new_info->is_temp;
+ *new_info = *old_info;
// Restore target's temp status
- newInfo->isTemp = isTemp;
- newInfo->reg = newReg;
+ new_info->is_temp = is_temp;
+ new_info->reg = new_reg;
}
-static bool CheckCorePoolSanity(CompilationUnit* cUnit)
+static bool CheckCorePoolSanity(CompilationUnit* cu)
{
- for (static int i = 0; i < cUnit->regPool->numCoreRegs; i++) {
- if (cUnit->regPool->coreRegs[i].pair) {
- static int myReg = cUnit->regPool->coreRegs[i].reg;
- static int mySreg = cUnit->regPool->coreRegs[i].sReg;
- static int partnerReg = cUnit->regPool->coreRegs[i].partner;
- static RegisterInfo* partner = GetRegInfo(cUnit, partnerReg);
+ for (static int i = 0; i < cu->reg_pool->num_core_regs; i++) {
+ if (cu->reg_pool->core_regs[i].pair) {
+ static int my_reg = cu->reg_pool->core_regs[i].reg;
+ static int my_sreg = cu->reg_pool->core_regs[i].s_reg;
+ static int partner_reg = cu->reg_pool->core_regs[i].partner;
+ static RegisterInfo* partner = GetRegInfo(cu, partner_reg);
DCHECK(partner != NULL);
DCHECK(partner->pair);
- DCHECK_EQ(myReg, partner->partner);
- static int partnerSreg = partner->sReg;
- if (mySreg == INVALID_SREG) {
- DCHECK_EQ(partnerSreg, INVALID_SREG);
+ DCHECK_EQ(my_reg, partner->partner);
+ static int partner_sreg = partner->s_reg;
+ if (my_sreg == INVALID_SREG) {
+ DCHECK_EQ(partner_sreg, INVALID_SREG);
} else {
- int diff = mySreg - partnerSreg;
+ int diff = my_sreg - partner_sreg;
DCHECK((diff == -1) || (diff == 1));
}
}
- if (!cUnit->regPool->coreRegs[i].live) {
- DCHECK(cUnit->regPool->coreRegs[i].defStart == NULL);
- DCHECK(cUnit->regPool->coreRegs[i].defEnd == NULL);
+ if (!cu->reg_pool->core_regs[i].live) {
+ DCHECK(cu->reg_pool->core_regs[i].def_start == NULL);
+ DCHECK(cu->reg_pool->core_regs[i].def_end == NULL);
}
}
return true;
@@ -827,21 +827,21 @@
* if it's worthwhile trying to be more clever here.
*/
-RegLocation UpdateLoc(CompilationUnit* cUnit, RegLocation loc)
+RegLocation UpdateLoc(CompilationUnit* cu, RegLocation loc)
{
DCHECK(!loc.wide);
- DCHECK(CheckCorePoolSanity(cUnit));
+ DCHECK(CheckCorePoolSanity(cu));
if (loc.location != kLocPhysReg) {
DCHECK((loc.location == kLocDalvikFrame) ||
(loc.location == kLocCompilerTemp));
- RegisterInfo* infoLo = AllocLive(cUnit, loc.sRegLow, kAnyReg);
- if (infoLo) {
- if (infoLo->pair) {
- Clobber(cUnit, infoLo->reg);
- Clobber(cUnit, infoLo->partner);
- FreeTemp(cUnit, infoLo->reg);
+ RegisterInfo* info_lo = AllocLive(cu, loc.s_reg_low, kAnyReg);
+ if (info_lo) {
+ if (info_lo->pair) {
+ Clobber(cu, info_lo->reg);
+ Clobber(cu, info_lo->partner);
+ FreeTemp(cu, info_lo->reg);
} else {
- loc.lowReg = infoLo->reg;
+ loc.low_reg = info_lo->reg;
loc.location = kLocPhysReg;
}
}
@@ -850,55 +850,55 @@
return loc;
}
-/* see comments for updateLoc */
-RegLocation UpdateLocWide(CompilationUnit* cUnit, RegLocation loc)
+/* see comments for update_loc */
+RegLocation UpdateLocWide(CompilationUnit* cu, RegLocation loc)
{
DCHECK(loc.wide);
- DCHECK(CheckCorePoolSanity(cUnit));
+ DCHECK(CheckCorePoolSanity(cu));
if (loc.location != kLocPhysReg) {
DCHECK((loc.location == kLocDalvikFrame) ||
(loc.location == kLocCompilerTemp));
// Are the dalvik regs already live in physical registers?
- RegisterInfo* infoLo = AllocLive(cUnit, loc.sRegLow, kAnyReg);
- RegisterInfo* infoHi = AllocLive(cUnit,
- oatSRegHi(loc.sRegLow), kAnyReg);
+ RegisterInfo* info_lo = AllocLive(cu, loc.s_reg_low, kAnyReg);
+ RegisterInfo* info_hi = AllocLive(cu,
+ GetSRegHi(loc.s_reg_low), kAnyReg);
bool match = true;
- match = match && (infoLo != NULL);
- match = match && (infoHi != NULL);
+ match = match && (info_lo != NULL);
+ match = match && (info_hi != NULL);
// Are they both core or both FP?
- match = match && (IsFpReg(infoLo->reg) == IsFpReg(infoHi->reg));
+ match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
// If a pair of floating point singles, are they properly aligned?
- if (match && IsFpReg(infoLo->reg)) {
- match &= ((infoLo->reg & 0x1) == 0);
- match &= ((infoHi->reg - infoLo->reg) == 1);
+ if (match && IsFpReg(info_lo->reg)) {
+ match &= ((info_lo->reg & 0x1) == 0);
+ match &= ((info_hi->reg - info_lo->reg) == 1);
}
// If previously used as a pair, it is the same pair?
- if (match && (infoLo->pair || infoHi->pair)) {
- match = (infoLo->pair == infoHi->pair);
- match &= ((infoLo->reg == infoHi->partner) &&
- (infoHi->reg == infoLo->partner));
+ if (match && (info_lo->pair || info_hi->pair)) {
+ match = (info_lo->pair == info_hi->pair);
+ match &= ((info_lo->reg == info_hi->partner) &&
+ (info_hi->reg == info_lo->partner));
}
if (match) {
// Can reuse - update the register usage info
- loc.lowReg = infoLo->reg;
- loc.highReg = infoHi->reg;
+ loc.low_reg = info_lo->reg;
+ loc.high_reg = info_hi->reg;
loc.location = kLocPhysReg;
- MarkPair(cUnit, loc.lowReg, loc.highReg);
- DCHECK(!IsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+ MarkPair(cu, loc.low_reg, loc.high_reg);
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
- if (infoLo) {
- Clobber(cUnit, infoLo->reg);
- FreeTemp(cUnit, infoLo->reg);
- if (infoLo->pair)
- Clobber(cUnit, infoLo->partner);
+ if (info_lo) {
+ Clobber(cu, info_lo->reg);
+ FreeTemp(cu, info_lo->reg);
+ if (info_lo->pair)
+ Clobber(cu, info_lo->partner);
}
- if (infoHi) {
- Clobber(cUnit, infoHi->reg);
- FreeTemp(cUnit, infoHi->reg);
- if (infoHi->pair)
- Clobber(cUnit, infoHi->partner);
+ if (info_hi) {
+ Clobber(cu, info_hi->reg);
+ FreeTemp(cu, info_hi->reg);
+ if (info_hi->pair)
+ Clobber(cu, info_hi->partner);
}
}
return loc;
@@ -906,161 +906,161 @@
/* For use in cases we don't know (or care) width */
-RegLocation UpdateRawLoc(CompilationUnit* cUnit, RegLocation loc)
+RegLocation UpdateRawLoc(CompilationUnit* cu, RegLocation loc)
{
if (loc.wide)
- return UpdateLocWide(cUnit, loc);
+ return UpdateLocWide(cu, loc);
else
- return UpdateLoc(cUnit, loc);
+ return UpdateLoc(cu, loc);
}
-RegLocation EvalLocWide(CompilationUnit* cUnit, RegLocation loc, int regClass, bool update)
+RegLocation EvalLocWide(CompilationUnit* cu, RegLocation loc, int reg_class, bool update)
{
DCHECK(loc.wide);
- int newRegs;
- int lowReg;
- int highReg;
+ int new_regs;
+ int low_reg;
+ int high_reg;
- loc = UpdateLocWide(cUnit, loc);
+ loc = UpdateLocWide(cu, loc);
/* If already in registers, we can assume proper form. Right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.lowReg), IsFpReg(loc.highReg));
- DCHECK(!IsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
- if (!RegClassMatches(regClass, loc.lowReg)) {
+ DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg));
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ if (!RegClassMatches(reg_class, loc.low_reg)) {
/* Wrong register class. Reallocate and copy */
- newRegs = AllocTypedTempPair(cUnit, loc.fp, regClass);
- lowReg = newRegs & 0xff;
- highReg = (newRegs >> 8) & 0xff;
- OpRegCopyWide(cUnit, lowReg, highReg, loc.lowReg,
- loc.highReg);
- CopyRegInfo(cUnit, lowReg, loc.lowReg);
- CopyRegInfo(cUnit, highReg, loc.highReg);
- Clobber(cUnit, loc.lowReg);
- Clobber(cUnit, loc.highReg);
- loc.lowReg = lowReg;
- loc.highReg = highReg;
- MarkPair(cUnit, loc.lowReg, loc.highReg);
- DCHECK(!IsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+ new_regs = AllocTypedTempPair(cu, loc.fp, reg_class);
+ low_reg = new_regs & 0xff;
+ high_reg = (new_regs >> 8) & 0xff;
+ OpRegCopyWide(cu, low_reg, high_reg, loc.low_reg,
+ loc.high_reg);
+ CopyRegInfo(cu, low_reg, loc.low_reg);
+ CopyRegInfo(cu, high_reg, loc.high_reg);
+ Clobber(cu, loc.low_reg);
+ Clobber(cu, loc.high_reg);
+ loc.low_reg = low_reg;
+ loc.high_reg = high_reg;
+ MarkPair(cu, loc.low_reg, loc.high_reg);
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
}
return loc;
}
- DCHECK_NE(loc.sRegLow, INVALID_SREG);
- DCHECK_NE(oatSRegHi(loc.sRegLow), INVALID_SREG);
+ DCHECK_NE(loc.s_reg_low, INVALID_SREG);
+ DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
- newRegs = AllocTypedTempPair(cUnit, loc.fp, regClass);
- loc.lowReg = newRegs & 0xff;
- loc.highReg = (newRegs >> 8) & 0xff;
+ new_regs = AllocTypedTempPair(cu, loc.fp, reg_class);
+ loc.low_reg = new_regs & 0xff;
+ loc.high_reg = (new_regs >> 8) & 0xff;
- MarkPair(cUnit, loc.lowReg, loc.highReg);
+ MarkPair(cu, loc.low_reg, loc.high_reg);
if (update) {
loc.location = kLocPhysReg;
- MarkLive(cUnit, loc.lowReg, loc.sRegLow);
- MarkLive(cUnit, loc.highReg, oatSRegHi(loc.sRegLow));
+ MarkLive(cu, loc.low_reg, loc.s_reg_low);
+ MarkLive(cu, loc.high_reg, GetSRegHi(loc.s_reg_low));
}
- DCHECK(!IsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
return loc;
}
-RegLocation EvalLoc(CompilationUnit* cUnit, RegLocation loc,
- int regClass, bool update)
+RegLocation EvalLoc(CompilationUnit* cu, RegLocation loc,
+ int reg_class, bool update)
{
- int newReg;
+ int new_reg;
if (loc.wide)
- return EvalLocWide(cUnit, loc, regClass, update);
+ return EvalLocWide(cu, loc, reg_class, update);
- loc = UpdateLoc(cUnit, loc);
+ loc = UpdateLoc(cu, loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(regClass, loc.lowReg)) {
+ if (!RegClassMatches(reg_class, loc.low_reg)) {
/* Wrong register class. Realloc, copy and transfer ownership */
- newReg = AllocTypedTemp(cUnit, loc.fp, regClass);
- OpRegCopy(cUnit, newReg, loc.lowReg);
- CopyRegInfo(cUnit, newReg, loc.lowReg);
- Clobber(cUnit, loc.lowReg);
- loc.lowReg = newReg;
+ new_reg = AllocTypedTemp(cu, loc.fp, reg_class);
+ OpRegCopy(cu, new_reg, loc.low_reg);
+ CopyRegInfo(cu, new_reg, loc.low_reg);
+ Clobber(cu, loc.low_reg);
+ loc.low_reg = new_reg;
}
return loc;
}
- DCHECK_NE(loc.sRegLow, INVALID_SREG);
+ DCHECK_NE(loc.s_reg_low, INVALID_SREG);
- newReg = AllocTypedTemp(cUnit, loc.fp, regClass);
- loc.lowReg = newReg;
+ new_reg = AllocTypedTemp(cu, loc.fp, reg_class);
+ loc.low_reg = new_reg;
if (update) {
loc.location = kLocPhysReg;
- MarkLive(cUnit, loc.lowReg, loc.sRegLow);
+ MarkLive(cu, loc.low_reg, loc.s_reg_low);
}
return loc;
}
-RegLocation GetRawSrc(CompilationUnit* cUnit, MIR* mir, int num)
+RegLocation GetRawSrc(CompilationUnit* cu, MIR* mir, int num)
{
- DCHECK(num < mir->ssaRep->numUses);
- RegLocation res = cUnit->regLocation[mir->ssaRep->uses[num]];
+ DCHECK(num < mir->ssa_rep->num_uses);
+ RegLocation res = cu->reg_location[mir->ssa_rep->uses[num]];
return res;
}
-RegLocation GetRawDest(CompilationUnit* cUnit, MIR* mir)
+RegLocation GetRawDest(CompilationUnit* cu, MIR* mir)
{
- DCHECK_GT(mir->ssaRep->numDefs, 0);
- RegLocation res = cUnit->regLocation[mir->ssaRep->defs[0]];
+ DCHECK_GT(mir->ssa_rep->num_defs, 0);
+ RegLocation res = cu->reg_location[mir->ssa_rep->defs[0]];
return res;
}
-RegLocation GetDest(CompilationUnit* cUnit, MIR* mir)
+RegLocation GetDest(CompilationUnit* cu, MIR* mir)
{
- RegLocation res = GetRawDest(cUnit, mir);
+ RegLocation res = GetRawDest(cu, mir);
DCHECK(!res.wide);
return res;
}
-RegLocation GetSrc(CompilationUnit* cUnit, MIR* mir, int num)
+RegLocation GetSrc(CompilationUnit* cu, MIR* mir, int num)
{
- RegLocation res = GetRawSrc(cUnit, mir, num);
+ RegLocation res = GetRawSrc(cu, mir, num);
DCHECK(!res.wide);
return res;
}
-RegLocation GetDestWide(CompilationUnit* cUnit, MIR* mir)
+RegLocation GetDestWide(CompilationUnit* cu, MIR* mir)
{
- RegLocation res = GetRawDest(cUnit, mir);
+ RegLocation res = GetRawDest(cu, mir);
DCHECK(res.wide);
return res;
}
-RegLocation GetSrcWide(CompilationUnit* cUnit, MIR* mir,
+RegLocation GetSrcWide(CompilationUnit* cu, MIR* mir,
int low)
{
- RegLocation res = GetRawSrc(cUnit, mir, low);
+ RegLocation res = GetRawSrc(cu, mir, low);
DCHECK(res.wide);
return res;
}
-/* USE SSA names to count references of base Dalvik vRegs. */
-static void CountRefs(CompilationUnit *cUnit, BasicBlock* bb, RefCounts* coreCounts,
- RefCounts* fpCounts)
+/* USE SSA names to count references of base Dalvik v_regs. */
+static void CountRefs(CompilationUnit *cu, BasicBlock* bb, RefCounts* core_counts,
+ RefCounts* fp_counts)
{
- if ((cUnit->disableOpt & (1 << kPromoteRegs)) ||
- !((bb->blockType == kEntryBlock) || (bb->blockType == kExitBlock) ||
- (bb->blockType == kDalvikByteCode))) {
+ if ((cu->disable_opt & (1 << kPromoteRegs)) ||
+ !((bb->block_type == kEntryBlock) || (bb->block_type == kExitBlock) ||
+ (bb->block_type == kDalvikByteCode))) {
return;
}
- for (int i = 0; i < cUnit->numSSARegs;) {
- RegLocation loc = cUnit->regLocation[i];
- RefCounts* counts = loc.fp ? fpCounts : coreCounts;
- int pMapIdx = SRegToPMap(cUnit, loc.sRegLow);
+ for (int i = 0; i < cu->num_ssa_regs;) {
+ RegLocation loc = cu->reg_location[i];
+ RefCounts* counts = loc.fp ? fp_counts : core_counts;
+ int p_map_idx = SRegToPMap(cu, loc.s_reg_low);
if (loc.defined) {
- counts[pMapIdx].count += cUnit->useCounts.elemList[i];
+ counts[p_map_idx].count += cu->use_counts.elem_list[i];
}
if (loc.wide) {
if (loc.defined) {
if (loc.fp) {
- counts[pMapIdx].doubleStart = true;
- counts[pMapIdx+1].count += cUnit->useCounts.elemList[i+1];
+ counts[p_map_idx].double_start = true;
+ counts[p_map_idx+1].count += cu->use_counts.elem_list[i+1];
}
}
i += 2;
@@ -1082,7 +1082,7 @@
{
LOG(INFO) << msg;
for (int i = 0; i < size; i++) {
- LOG(INFO) << "sReg[" << arr[i].sReg << "]: " << arr[i].count;
+ LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count;
}
}
@@ -1090,15 +1090,15 @@
* Note: some portions of this code required even if the kPromoteRegs
* optimization is disabled.
*/
-void DoPromotion(CompilationUnit* cUnit)
+void DoPromotion(CompilationUnit* cu)
{
- int regBias = cUnit->numCompilerTemps + 1;
- int dalvikRegs = cUnit->numDalvikRegisters;
- int numRegs = dalvikRegs + regBias;
- const int promotionThreshold = 2;
+ int reg_bias = cu->num_compiler_temps + 1;
+ int dalvik_regs = cu->num_dalvik_registers;
+ int num_regs = dalvik_regs + reg_bias;
+ const int promotion_threshold = 2;
// Allow target code to add any special registers
- AdjustSpillMask(cUnit);
+ AdjustSpillMask(cu);
/*
* Simple register promotion. Just do a static count of the uses
@@ -1111,31 +1111,31 @@
* TUNING: replace with linear scan once we have the ability
* to describe register live ranges for GC.
*/
- RefCounts *coreRegs = static_cast<RefCounts*>(NewMem(cUnit, sizeof(RefCounts) * numRegs,
+ RefCounts *core_regs = static_cast<RefCounts*>(NewMem(cu, sizeof(RefCounts) * num_regs,
true, kAllocRegAlloc));
- RefCounts *FpRegs = static_cast<RefCounts *>(NewMem(cUnit, sizeof(RefCounts) * numRegs,
+ RefCounts *FpRegs = static_cast<RefCounts *>(NewMem(cu, sizeof(RefCounts) * num_regs,
true, kAllocRegAlloc));
// Set ssa names for original Dalvik registers
- for (int i = 0; i < dalvikRegs; i++) {
- coreRegs[i].sReg = FpRegs[i].sReg = i;
+ for (int i = 0; i < dalvik_regs; i++) {
+ core_regs[i].s_reg = FpRegs[i].s_reg = i;
}
// Set ssa name for Method*
- coreRegs[dalvikRegs].sReg = cUnit->methodSReg;
- FpRegs[dalvikRegs].sReg = cUnit->methodSReg; // For consistecy
- // Set ssa names for compilerTemps
- for (int i = 1; i <= cUnit->numCompilerTemps; i++) {
- CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cUnit->compilerTemps.elemList[i]);
- coreRegs[dalvikRegs + i].sReg = ct->sReg;
- FpRegs[dalvikRegs + i].sReg = ct->sReg;
+ core_regs[dalvik_regs].s_reg = cu->method_sreg;
+ FpRegs[dalvik_regs].s_reg = cu->method_sreg; // For consistecy
+ // Set ssa names for compiler_temps
+ for (int i = 1; i <= cu->num_compiler_temps; i++) {
+ CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu->compiler_temps.elem_list[i]);
+ core_regs[dalvik_regs + i].s_reg = ct->s_reg;
+ FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
}
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->blockList, &iterator);
+ GrowableListIteratorInit(&cu->block_list, &iterator);
while (true) {
BasicBlock* bb;
bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
if (bb == NULL) break;
- CountRefs(cUnit, bb, coreRegs, FpRegs);
+ CountRefs(cu, bb, core_regs, FpRegs);
}
/*
@@ -1143,29 +1143,29 @@
* register. Bias the counts to try to allocate any vreg that's
* used as the start of a pair first.
*/
- for (int i = 0; i < numRegs; i++) {
- if (FpRegs[i].doubleStart) {
+ for (int i = 0; i < num_regs; i++) {
+ if (FpRegs[i].double_start) {
FpRegs[i].count *= 2;
}
}
// Sort the count arrays
- qsort(coreRegs, numRegs, sizeof(RefCounts), SortCounts);
- qsort(FpRegs, numRegs, sizeof(RefCounts), SortCounts);
+ qsort(core_regs, num_regs, sizeof(RefCounts), SortCounts);
+ qsort(FpRegs, num_regs, sizeof(RefCounts), SortCounts);
- if (cUnit->printMe) {
- DumpCounts(coreRegs, numRegs, "Core regs after sort");
- DumpCounts(FpRegs, numRegs, "Fp regs after sort");
+ if (cu->verbose) {
+ DumpCounts(core_regs, num_regs, "Core regs after sort");
+ DumpCounts(FpRegs, num_regs, "Fp regs after sort");
}
- if (!(cUnit->disableOpt & (1 << kPromoteRegs))) {
+ if (!(cu->disable_opt & (1 << kPromoteRegs))) {
// Promote FpRegs
- for (int i = 0; (i < numRegs) &&
- (FpRegs[i].count >= promotionThreshold ); i++) {
- int pMapIdx = SRegToPMap(cUnit, FpRegs[i].sReg);
- if (cUnit->promotionMap[pMapIdx].fpLocation != kLocPhysReg) {
- int reg = AllocPreservedFPReg(cUnit, FpRegs[i].sReg,
- FpRegs[i].doubleStart);
+ for (int i = 0; (i < num_regs) &&
+ (FpRegs[i].count >= promotion_threshold ); i++) {
+ int p_map_idx = SRegToPMap(cu, FpRegs[i].s_reg);
+ if (cu->promotion_map[p_map_idx].fp_location != kLocPhysReg) {
+ int reg = AllocPreservedFPReg(cu, FpRegs[i].s_reg,
+ FpRegs[i].double_start);
if (reg < 0) {
break; // No more left
}
@@ -1173,21 +1173,21 @@
}
// Promote core regs
- for (int i = 0; (i < numRegs) &&
- (coreRegs[i].count > promotionThreshold); i++) {
- int pMapIdx = SRegToPMap(cUnit, coreRegs[i].sReg);
- if (cUnit->promotionMap[pMapIdx].coreLocation !=
+ for (int i = 0; (i < num_regs) &&
+ (core_regs[i].count > promotion_threshold); i++) {
+ int p_map_idx = SRegToPMap(cu, core_regs[i].s_reg);
+ if (cu->promotion_map[p_map_idx].core_location !=
kLocPhysReg) {
- int reg = AllocPreservedCoreReg(cUnit, coreRegs[i].sReg);
+ int reg = AllocPreservedCoreReg(cu, core_regs[i].s_reg);
if (reg < 0) {
break; // No more left
}
}
}
- } else if (cUnit->qdMode) {
- AllocPreservedCoreReg(cUnit, cUnit->methodSReg);
- for (int i = 0; i < numRegs; i++) {
- int reg = AllocPreservedCoreReg(cUnit, i);
+ } else if (cu->qd_mode) {
+ AllocPreservedCoreReg(cu, cu->method_sreg);
+ for (int i = 0; i < num_regs; i++) {
+ int reg = AllocPreservedCoreReg(cu, i);
if (reg < 0) {
break; // No more left
}
@@ -1196,70 +1196,70 @@
// Now, update SSA names to new home locations
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- RegLocation *curr = &cUnit->regLocation[i];
- int pMapIdx = SRegToPMap(cUnit, curr->sRegLow);
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ RegLocation *curr = &cu->reg_location[i];
+ int p_map_idx = SRegToPMap(cu, curr->s_reg_low);
if (!curr->wide) {
if (curr->fp) {
- if (cUnit->promotionMap[pMapIdx].fpLocation == kLocPhysReg) {
+ if (cu->promotion_map[p_map_idx].fp_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->lowReg = cUnit->promotionMap[pMapIdx].FpReg;
+ curr->low_reg = cu->promotion_map[p_map_idx].FpReg;
curr->home = true;
}
} else {
- if (cUnit->promotionMap[pMapIdx].coreLocation == kLocPhysReg) {
+ if (cu->promotion_map[p_map_idx].core_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->lowReg = cUnit->promotionMap[pMapIdx].coreReg;
+ curr->low_reg = cu->promotion_map[p_map_idx].core_reg;
curr->home = true;
}
}
- curr->highReg = INVALID_REG;
+ curr->high_reg = INVALID_REG;
} else {
- if (curr->highWord) {
+ if (curr->high_word) {
continue;
}
if (curr->fp) {
- if ((cUnit->promotionMap[pMapIdx].fpLocation == kLocPhysReg) &&
- (cUnit->promotionMap[pMapIdx+1].fpLocation ==
+ if ((cu->promotion_map[p_map_idx].fp_location == kLocPhysReg) &&
+ (cu->promotion_map[p_map_idx+1].fp_location ==
kLocPhysReg)) {
- int lowReg = cUnit->promotionMap[pMapIdx].FpReg;
- int highReg = cUnit->promotionMap[pMapIdx+1].FpReg;
+ int low_reg = cu->promotion_map[p_map_idx].FpReg;
+ int high_reg = cu->promotion_map[p_map_idx+1].FpReg;
// Doubles require pair of singles starting at even reg
- if (((lowReg & 0x1) == 0) && ((lowReg + 1) == highReg)) {
+ if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
curr->location = kLocPhysReg;
- curr->lowReg = lowReg;
- curr->highReg = highReg;
+ curr->low_reg = low_reg;
+ curr->high_reg = high_reg;
curr->home = true;
}
}
} else {
- if ((cUnit->promotionMap[pMapIdx].coreLocation == kLocPhysReg)
- && (cUnit->promotionMap[pMapIdx+1].coreLocation ==
+ if ((cu->promotion_map[p_map_idx].core_location == kLocPhysReg)
+ && (cu->promotion_map[p_map_idx+1].core_location ==
kLocPhysReg)) {
curr->location = kLocPhysReg;
- curr->lowReg = cUnit->promotionMap[pMapIdx].coreReg;
- curr->highReg = cUnit->promotionMap[pMapIdx+1].coreReg;
+ curr->low_reg = cu->promotion_map[p_map_idx].core_reg;
+ curr->high_reg = cu->promotion_map[p_map_idx+1].core_reg;
curr->home = true;
}
}
}
}
- if (cUnit->printMe) {
- DumpPromotionMap(cUnit);
+ if (cu->verbose) {
+ DumpPromotionMap(cu);
}
}
/* Returns sp-relative offset in bytes for a VReg */
-int VRegOffset(CompilationUnit* cUnit, int vReg)
+int VRegOffset(CompilationUnit* cu, int v_reg)
{
- return StackVisitor::GetVRegOffset(cUnit->code_item, cUnit->coreSpillMask,
- cUnit->fpSpillMask, cUnit->frameSize, vReg);
+ return StackVisitor::GetVRegOffset(cu->code_item, cu->core_spill_mask,
+ cu->fp_spill_mask, cu->frame_size, v_reg);
}
/* Returns sp-relative offset in bytes for a SReg */
-int SRegOffset(CompilationUnit* cUnit, int sReg)
+int SRegOffset(CompilationUnit* cu, int s_reg)
{
- return VRegOffset(cUnit, SRegToVReg(cUnit, sReg));
+ return VRegOffset(cu, SRegToVReg(cu, s_reg));
}
} // namespace art
diff --git a/src/compiler/codegen/ralloc_util.h b/src/compiler/codegen/ralloc_util.h
index 31fda52c..4e897ca 100644
--- a/src/compiler/codegen/ralloc_util.h
+++ b/src/compiler/codegen/ralloc_util.h
@@ -30,140 +30,140 @@
/* Static register use counts */
struct RefCounts {
int count;
- int sReg;
- bool doubleStart; // Starting vReg for a double
+ int s_reg;
+ bool double_start; // Starting v_reg for a double
};
/*
- * Get the "real" sreg number associated with an sReg slot. In general,
- * sReg values passed through codegen are the SSA names created by
- * dataflow analysis and refer to slot numbers in the cUnit->regLocation
+ * Get the "real" sreg number associated with an s_reg slot. In general,
+ * s_reg values passed through codegen are the SSA names created by
+ * dataflow analysis and refer to slot numbers in the cu->reg_location
* array. However, renaming is accomplished by simply replacing RegLocation
- * entries in the cUnit->reglocation[] array. Therefore, when location
+ * entries in the cu->reglocation[] array. Therefore, when location
* records for operands are first created, we need to ask the locRecord
* identified by the dataflow pass what it's new name is.
*/
-inline int oatSRegHi(int lowSreg) {
+inline int GetSRegHi(int lowSreg) {
return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
}
-inline bool oatLiveOut(CompilationUnit* cUnit, int sReg) {
+inline bool oat_live_out(CompilationUnit* cu, int s_reg) {
//For now.
return true;
}
inline int oatSSASrc(MIR* mir, int num) {
- DCHECK_GT(mir->ssaRep->numUses, num);
- return mir->ssaRep->uses[num];
+ DCHECK_GT(mir->ssa_rep->num_uses, num);
+ return mir->ssa_rep->uses[num];
}
-void ClobberSReg(CompilationUnit* cUnit, int sReg);
-RegLocation EvalLoc(CompilationUnit* cUnit, RegLocation loc,
- int regClass, bool update);
+void ClobberSReg(CompilationUnit* cu, int s_reg);
+RegLocation EvalLoc(CompilationUnit* cu, RegLocation loc,
+ int reg_class, bool update);
/* Mark a temp register as dead. Does not affect allocation state. */
-void Clobber(CompilationUnit* cUnit, int reg);
-RegLocation UpdateLoc(CompilationUnit* cUnit, RegLocation loc);
+void Clobber(CompilationUnit* cu, int reg);
+RegLocation UpdateLoc(CompilationUnit* cu, RegLocation loc);
-/* see comments for updateLoc */
-RegLocation UpdateLocWide(CompilationUnit* cUnit, RegLocation loc);
+/* see comments for update_loc */
+RegLocation UpdateLocWide(CompilationUnit* cu, RegLocation loc);
-RegLocation UpdateRawLoc(CompilationUnit* cUnit, RegLocation loc);
+RegLocation UpdateRawLoc(CompilationUnit* cu, RegLocation loc);
-void MarkLive(CompilationUnit* cUnit, int reg, int sReg);
+void MarkLive(CompilationUnit* cu, int reg, int s_reg);
-void MarkTemp(CompilationUnit* cUnit, int reg);
+void MarkTemp(CompilationUnit* cu, int reg);
-void UnmarkTemp(CompilationUnit* cUnit, int reg);
+void UnmarkTemp(CompilationUnit* cu, int reg);
-void MarkDirty(CompilationUnit* cUnit, RegLocation loc);
+void MarkDirty(CompilationUnit* cu, RegLocation loc);
-void MarkPair(CompilationUnit* cUnit, int lowReg, int highReg);
+void MarkPair(CompilationUnit* cu, int low_reg, int high_reg);
-void MarkClean(CompilationUnit* cUnit, RegLocation loc);
+void MarkClean(CompilationUnit* cu, RegLocation loc);
-void ResetDef(CompilationUnit* cUnit, int reg);
+void ResetDef(CompilationUnit* cu, int reg);
-void ResetDefLoc(CompilationUnit* cUnit, RegLocation rl);
+void ResetDefLoc(CompilationUnit* cu, RegLocation rl);
/* Set up temp & preserved register pools specialized by target */
-void CompilerInitPool(RegisterInfo* regs, int* regNums, int num);
+void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
/*
* Mark the beginning and end LIR of a def sequence. Note that
* on entry start points to the LIR prior to the beginning of the
* sequence.
*/
-void MarkDef(CompilationUnit* cUnit, RegLocation rl, LIR* start,
+void MarkDef(CompilationUnit* cu, RegLocation rl, LIR* start,
LIR* finish);
/*
* Mark the beginning and end LIR of a def sequence. Note that
* on entry start points to the LIR prior to the beginning of the
* sequence.
*/
-void MarkDefWide(CompilationUnit* cUnit, RegLocation rl,
+void MarkDefWide(CompilationUnit* cu, RegLocation rl,
LIR* start, LIR* finish);
// Get the LocRecord associated with an SSA name use.
-RegLocation GetSrc(CompilationUnit* cUnit, MIR* mir, int num);
-RegLocation GetSrcWide(CompilationUnit* cUnit, MIR* mir, int low);
+RegLocation GetSrc(CompilationUnit* cu, MIR* mir, int num);
+RegLocation GetSrcWide(CompilationUnit* cu, MIR* mir, int low);
// Non-width checking version
-RegLocation GetRawSrc(CompilationUnit* cUnit, MIR* mir, int num);
+RegLocation GetRawSrc(CompilationUnit* cu, MIR* mir, int num);
// Get the LocRecord associated with an SSA name def.
-RegLocation GetDest(CompilationUnit* cUnit, MIR* mir);
-RegLocation GetDestWide(CompilationUnit* cUnit, MIR* mir);
+RegLocation GetDest(CompilationUnit* cu, MIR* mir);
+RegLocation GetDestWide(CompilationUnit* cu, MIR* mir);
// Non-width checking version
-RegLocation GetRawDest(CompilationUnit* cUnit, MIR* mir);
+RegLocation GetRawDest(CompilationUnit* cu, MIR* mir);
-RegLocation GetReturnWide(CompilationUnit* cUnit, bool isDouble);
+RegLocation GetReturnWide(CompilationUnit* cu, bool is_double);
/* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit* cUnit);
+void ClobberCalleeSave(CompilationUnit* cu);
-RegisterInfo *IsTemp(CompilationUnit* cUnit, int reg);
+RegisterInfo *IsTemp(CompilationUnit* cu, int reg);
-RegisterInfo *IsPromoted(CompilationUnit* cUnit, int reg);
+RegisterInfo *IsPromoted(CompilationUnit* cu, int reg);
-bool IsDirty(CompilationUnit* cUnit, int reg);
+bool IsDirty(CompilationUnit* cu, int reg);
-void MarkInUse(CompilationUnit* cUnit, int reg);
+void MarkInUse(CompilationUnit* cu, int reg);
-int AllocTemp(CompilationUnit* cUnit);
+int AllocTemp(CompilationUnit* cu);
-int AllocTempFloat(CompilationUnit* cUnit);
+int AllocTempFloat(CompilationUnit* cu);
//REDO: too many assumptions.
-int AllocTempDouble(CompilationUnit* cUnit);
+int AllocTempDouble(CompilationUnit* cu);
-void FreeTemp(CompilationUnit* cUnit, int reg);
+void FreeTemp(CompilationUnit* cu, int reg);
-void ResetDefLocWide(CompilationUnit* cUnit, RegLocation rl);
+void ResetDefLocWide(CompilationUnit* cu, RegLocation rl);
-void ResetDefTracking(CompilationUnit* cUnit);
+void ResetDefTracking(CompilationUnit* cu);
-RegisterInfo *IsLive(CompilationUnit* cUnit, int reg);
+RegisterInfo *IsLive(CompilationUnit* cu, int reg);
/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cUnit);
+void LockCallTemps(CompilationUnit* cu);
-void FreeCallTemps(CompilationUnit* cUnit);
+void FreeCallTemps(CompilationUnit* cu);
-void FlushAllRegs(CompilationUnit* cUnit);
+void FlushAllRegs(CompilationUnit* cu);
-RegLocation GetReturnWideAlt(CompilationUnit* cUnit);
+RegLocation GetReturnWideAlt(CompilationUnit* cu);
-RegLocation GetReturn(CompilationUnit* cUnit, bool isFloat);
+RegLocation GetReturn(CompilationUnit* cu, bool is_float);
-RegLocation GetReturnAlt(CompilationUnit* cUnit);
+RegLocation GetReturnAlt(CompilationUnit* cu);
-/* Clobber any temp associated with an sReg. Could be in either class */
+/* Clobber any temp associated with an s_reg. Could be in either class */
/* Return a temp if one is available, -1 otherwise */
-int AllocFreeTemp(CompilationUnit* cUnit);
+int AllocFreeTemp(CompilationUnit* cu);
/* Attempt to allocate a callee-save register */
/*
@@ -171,44 +171,44 @@
* register. No check is made to see if the register was previously
* allocated. Use with caution.
*/
-void LockTemp(CompilationUnit* cUnit, int reg);
+void LockTemp(CompilationUnit* cu, int reg);
-RegLocation WideToNarrow(CompilationUnit* cUnit, RegLocation rl);
+RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl);
/*
* Free all allocated temps in the temp pools. Note that this does
* not affect the "liveness" of a temp register, which will stay
* live until it is either explicitly killed or reallocated.
*/
-void ResetRegPool(CompilationUnit* cUnit);
+void ResetRegPool(CompilationUnit* cu);
-void ClobberAllRegs(CompilationUnit* cUnit);
+void ClobberAllRegs(CompilationUnit* cu);
-void FlushRegWide(CompilationUnit* cUnit, int reg1, int reg2);
+void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
-void FlushReg(CompilationUnit* cUnit, int reg);
+void FlushReg(CompilationUnit* cu, int reg);
-void DoPromotion(CompilationUnit* cUnit);
-int VRegOffset(CompilationUnit* cUnit, int reg);
-int SRegOffset(CompilationUnit* cUnit, int reg);
-void RecordCorePromotion(CompilationUnit* cUnit, int reg, int sReg);
-void RecordFpPromotion(CompilationUnit* cUnit, int reg, int sReg);
+void DoPromotion(CompilationUnit* cu);
+int VRegOffset(CompilationUnit* cu, int reg);
+int SRegOffset(CompilationUnit* cu, int reg);
+void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg);
+void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg);
/* Architecture-dependent register allocation routines. */
-int AllocTypedTempPair(CompilationUnit* cUnit,
- bool fpHint, int regClass);
+int AllocTypedTempPair(CompilationUnit* cu,
+ bool fp_hint, int reg_class);
-int AllocTypedTemp(CompilationUnit* cUnit, bool fpHint, int regClass);
+int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
void oatDumpFPRegPool(CompilationUnit* cUint);
-RegisterInfo* GetRegInfo(CompilationUnit* cUnit, int reg);
+RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
void NopLIR(LIR* lir);
bool oatIsFPReg(int reg);
uint32_t oatFPRegMask(void);
-void AdjustSpillMask(CompilationUnit* cUnit);
-void MarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg);
-int ComputeFrameSize(CompilationUnit* cUnit);
+void AdjustSpillMask(CompilationUnit* cu);
+void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+int ComputeFrameSize(CompilationUnit* cu);
} // namespace art
diff --git a/src/compiler/codegen/target_list.h b/src/compiler/codegen/target_list.h
index 76c0bdb..0023d90 100644
--- a/src/compiler/codegen/target_list.h
+++ b/src/compiler/codegen/target_list.h
@@ -1,130 +1,130 @@
ArmConditionCode ArmConditionEncoding(ConditionCode code);
-AssemblerStatus AssembleInstructions(CompilationUnit* cUnit, uintptr_t startAddr);
+AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
bool DoubleReg(int reg);
bool FpReg(int reg);
-bool GenAddLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenAndLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenArithOpDouble(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenArithOpFloat(CompilationUnit *cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenArithOpFloat(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenCmpFP(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenConversion(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest, RegLocation rlSrc);
-bool GenInlinedCas32(CompilationUnit* cUnit, CallInfo* info, bool need_write_barrier);
-bool GenInlinedMinMaxInt(CompilationUnit *cUnit, CallInfo* info, bool isMin);
-bool GenInlinedSqrt(CompilationUnit* cUnit, CallInfo* info);
-bool GenNegLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
-bool GenOrLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenSubLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-bool GenXorLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
+bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
bool ArchInit();
bool ArchVariantInit(void);
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
bool SingleReg(int reg);
-bool SmallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode, RegLocation rlSrc, RegLocation rlDest, int lit);
-RegisterInfo* GetRegInfo(CompilationUnit* cUnit, int reg);
-RegLocation GetReturnAlt(CompilationUnit* cUnit);
-RegLocation GetReturnWideAlt(CompilationUnit* cUnit);
-void ClobberCalleeSave(CompilationUnit *cUnit);
-void FreeCallTemps(CompilationUnit* cUnit);
-void LockCallTemps(CompilationUnit* cUnit);
+bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode, RegLocation rl_src, RegLocation rl_dest, int lit);
+RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+RegLocation GetReturnAlt(CompilationUnit* cu);
+RegLocation GetReturnWideAlt(CompilationUnit* cu);
+void ClobberCalleeSave(CompilationUnit *cu);
+void FreeCallTemps(CompilationUnit* cu);
+void LockCallTemps(CompilationUnit* cu);
InstructionSet InstructionSet();
int EncodeShift(int code, int amount);
-int LoadHelper(CompilationUnit* cUnit, int offset);
+int LoadHelper(CompilationUnit* cu, int offset);
int ModifiedImmediate(uint32_t value);
-int AllocTypedTemp(CompilationUnit* cUnit, bool fpHint, int regClass);
-int AllocTypedTempPair(CompilationUnit* cUnit, bool fpHint, int regClass);
-int AssignInsnOffsets(CompilationUnit* cUnit);
+int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+int AssignInsnOffsets(CompilationUnit* cu);
int GetInsnSize(LIR* lir);
-int S2d(int lowReg, int highReg);
+int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
-LIR* FpRegCopy(CompilationUnit* cUnit, int rDest, int rSrc);
-LIR* GenRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode, int reg1, int base, int offset, ThrowKind kind);
-LIR* LoadBaseDispBody(CompilationUnit* cUnit, int rBase, int displacement, int rDest, int rDestHi, OpSize size, int sReg);
-LIR* LoadBaseDisp(CompilationUnit* cUnit, int rBase, int displacement, int rDest, OpSize size, int sReg);
-LIR* LoadBaseDispWide(CompilationUnit* cUnit, int rBase, int displacement, int rDestLo, int rDestHi, int sReg);
-LIR* LoadBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rDest, int scale, OpSize size);
-LIR* LoadBaseIndexedDisp(CompilationUnit *cUnit, int rBase, int rIndex, int scale, int displacement, int rDest, int rDestHi, OpSize size, int sReg);
-LIR* LoadConstantNoClobber(CompilationUnit* cUnit, int rDest, int value);
-LIR* LoadConstantValueWide(CompilationUnit* cUnit, int rDestLo, int rDestHi, int valLo, int valHi);
-LIR* LoadMultiple(CompilationUnit *cUnit, int rBase, int rMask);
-LIR* OpBranchUnconditional(CompilationUnit* cUnit, OpKind op);
-LIR* OpCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1, int src2, LIR* target);
-LIR* OpCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg, int checkValue, LIR* target);
-LIR* OpCondBranch(CompilationUnit* cUnit, ConditionCode cc, LIR* target);
-LIR* OpDecAndBranch(CompilationUnit* cUnit, ConditionCode cCode, int reg, LIR* target);
-LIR* OpIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide);
-LIR* OpMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp);
-LIR* OpPcRelLoad(CompilationUnit* cUnit, int reg, LIR* target);
-LIR* OpReg(CompilationUnit* cUnit, OpKind op, int rDestSrc);
-LIR* OpRegCopy(CompilationUnit* cUnit, int rDest, int rSrc);
-LIR* OpRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc);
-LIR* OpRegImm(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int value);
-LIR* OpRegMem(CompilationUnit* cUnit, OpKind op, int rDest, int rBase, int offset);
-LIR* OpRegReg(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int rSrc2);
-LIR* OpRegRegImm(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1, int value);
-LIR* OpRegRegReg(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1, int rSrc2);
-LIR* OpRegRegRegShift(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1, int rSrc2, int shift);
-LIR* OpRegRegShift(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int rSrc2, int shift);
-LIR* OpTestSuspend(CompilationUnit* cUnit, LIR* target);
-LIR* OpThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset);
-LIR* OpVldm(CompilationUnit* cUnit, int rBase, int count);
-LIR* OpVstm(CompilationUnit* cUnit, int rBase, int count);
-LIR* StoreBaseDispBody(CompilationUnit* cUnit, int rBase, int displacement, int rSrc, int rSrcHi, OpSize size);
-LIR* StoreBaseDisp(CompilationUnit* cUnit, int rBase, int displacement, int rSrc, OpSize size);
-LIR* StoreBaseDispWide(CompilationUnit* cUnit, int rBase, int displacement, int rSrcLo, int rSrcHi);
-LIR* StoreBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rSrc, int scale, OpSize size);
-LIR* StoreBaseIndexedDisp(CompilationUnit *cUnit, int rBase, int rIndex, int scale, int displacement, int rSrc, int rSrcHi, OpSize size, int sReg);
-LIR* StoreMultiple(CompilationUnit *cUnit, int rBase, int rMask);
-RegLocation ArgLoc(CompilationUnit* cUnit, RegLocation loc);
-RegLocation GenDivRem(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int regHi, bool isDiv);
-RegLocation GenDivRemLit(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int lit, bool isDiv);
-RegLocation LoadArg(CompilationUnit* cUnit, RegLocation loc);
+LIR* FpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base, int offset, ThrowKind kind);
+LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg);
+LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo, int r_dest_hi, int s_reg);
+LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale, OpSize size);
+LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale, int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg);
+LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int val_lo, int val_hi);
+LIR* LoadMultiple(CompilationUnit *cu, int rBase, int r_mask);
+LIR* OpBranchUnconditional(CompilationUnit* cu, OpKind op);
+LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2, LIR* target);
+LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value, LIR* target);
+LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target);
+LIR* OpIT(CompilationUnit* cu, ArmConditionCode cond, const char* guide);
+LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int r_src2);
+LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int r_src2, int shift);
+LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2, int shift);
+LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
+LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src, OpSize size);
+LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo, int r_src_hi);
+LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale, OpSize size);
+LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale, int displacement, int r_src, int r_src_hi, OpSize size, int s_reg);
+LIR* StoreMultiple(CompilationUnit *cu, int rBase, int r_mask);
+RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc);
+RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+RegLocation LoadArg(CompilationUnit* cu, RegLocation loc);
RegLocation LocCReturn();
RegLocation LocCReturnDouble();
RegLocation LocCReturnFloat();
RegLocation LocCReturnWide();
-std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* baseAddr);
-uint64_t GetRegMaskCommon(CompilationUnit* cUnit, int reg);
+std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
uint32_t FpRegMask();
uint32_t FpRegMask();
uint64_t GetPCUseDefEncoding();
-void FreeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep, RegLocation rlFree);
-void GenCmpLong(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2);
-void GenDivZeroCheck(CompilationUnit* cUnit, int regLo, int regHi);
-void GenEntrySequence(CompilationUnit* cUnit, RegLocation* ArgLocs, RegLocation rlMethod);
-void GenExitSequence(CompilationUnit* cUnit);
-void GenFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset, RegLocation rlSrc);
-void GenFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir, bool gtBias, bool isDouble);
-void GenFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir);
-void GenMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc);
-void GenMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc);
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc, RegLocation rlResult, int lit, int firstBit, int secondBit);
-void GenNegDouble(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
-void GenNegFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
-void GenPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset, RegLocation rlSrc);
-void GenPrintLabel(CompilationUnit *cUnit, MIR* mir);
-void GenSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset, RegLocation rlSrc);
-void GenSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir, SpecialCaseHandler specialCase);
-void LoadPair(CompilationUnit* cUnit, int base, int lowReg, int highReg);
-void MarkGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg);
-void AdjustSpillMask(CompilationUnit* cUnit);
-void ClobberCalleeSave(CompilationUnit *cUnit);
+void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method);
+void GenExitSequence(CompilationUnit* cu);
+void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src);
+void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src, RegLocation rl_result, int lit, int first_bit, int second_bit);
+void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src);
+void GenPrintLabel(CompilationUnit *cu, MIR* mir);
+void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src);
+void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
+void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+void AdjustSpillMask(CompilationUnit* cu);
+void ClobberCalleeSave(CompilationUnit *cu);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-void FlushReg(CompilationUnit* cUnit, int reg);
-void FlushRegWide(CompilationUnit* cUnit, int reg1, int reg2);
-void GenMemBarrier(CompilationUnit* cUnit, MemBarrierKind barrierKind);
-void CompilerInitializeRegAlloc(CompilationUnit* cUnit);
-void MarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg);
+void FlushReg(CompilationUnit* cu, int reg);
+void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+void CompilerInitializeRegAlloc(CompilationUnit* cu);
+void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
void NopLIR( LIR* lir);
-void OpLea(CompilationUnit* cUnit, int rBase, int reg1, int reg2, int scale, int offset);
-void OpRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi, int srcLo, int srcHi);
-void OpRegThreadMem(CompilationUnit* cUnit, OpKind op, int rDest, int threadOffset);
-void OpTlsCmp(CompilationUnit* cUnit, int offset, int val);
+void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset);
+void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo, int src_hi);
+void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset);
+void OpTlsCmp(CompilationUnit* cu, int offset, int val);
bool BranchUnconditional(LIR* lir);
-void SetupTargetResourceMasks(CompilationUnit* cUnit, LIR* lir);
-void SpillCoreRegs(CompilationUnit* cUnit);
-void UnSpillCoreRegs(CompilationUnit* cUnit);
+void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+void SpillCoreRegs(CompilationUnit* cu);
+void UnSpillCoreRegs(CompilationUnit* cu);
X86ConditionCode X86ConditionEncoding(ConditionCode cond);
uint64_t GetTargetInstFlags(int opcode);
const char* GetTargetInstName(int opcode);
diff --git a/src/compiler/codegen/x86/assemble_x86.cc b/src/compiler/codegen/x86/assemble_x86.cc
index 61cc7d9..78ba331 100644
--- a/src/compiler/codegen/x86/assemble_x86.cc
+++ b/src/compiler/codegen/x86/assemble_x86.cc
@@ -518,34 +518,34 @@
}
}
-static void EmitDisp(CompilationUnit* cUnit, int base, int disp) {
+static void EmitDisp(CompilationUnit* cu, int base, int disp) {
// BP requires an explicit disp, so do not omit it in the 0 case
if (disp == 0 && base != rBP) {
return;
} else if (IS_SIMM8(disp)) {
- cUnit->codeBuffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back(disp & 0xFF);
} else {
- cUnit->codeBuffer.push_back(disp & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 24) & 0xFF);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
}
}
-static void EmitOpReg(CompilationUnit* cUnit, const X86EncodingMap* entry, uint8_t reg) {
+static void EmitOpReg(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -558,51 +558,51 @@
}
if (reg >= 4) {
DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
- << " in " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
}
DCHECK_LT(reg, 8);
uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitOpMem(CompilationUnit* cUnit, const X86EncodingMap* entry, uint8_t base, int disp) {
+static void EmitOpMem(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t base, int disp) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
DCHECK_LT(entry->skeleton.modrm_opcode, 8);
DCHECK_LT(base, 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
- cUnit->codeBuffer.push_back(modrm);
- EmitDisp(cUnit, base, disp);
+ cu->code_buffer.push_back(modrm);
+ EmitDisp(cu, base, disp);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitMemReg(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitMemReg(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t base, int disp, uint8_t reg) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -615,43 +615,43 @@
}
if (reg >= 4) {
DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
- << " in " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
}
DCHECK_LT(reg, 8);
DCHECK_LT(base, 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | base;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
if (base == rX86_SP) {
// Special SIB for SP base
- cUnit->codeBuffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+ cu->code_buffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
}
- EmitDisp(cUnit, base, disp);
+ EmitDisp(cu, base, disp);
DCHECK_EQ(0, entry->skeleton.modrm_opcode);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitRegMem(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitRegMem(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, uint8_t base, int disp) {
// Opcode will flip operands.
- EmitMemReg(cUnit, entry, base, disp, reg);
+ EmitMemReg(cu, entry, base, disp, reg);
}
-static void EmitRegArray(CompilationUnit* cUnit, const X86EncodingMap* entry, uint8_t reg,
+static void EmitRegArray(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg,
uint8_t base, uint8_t index, int scale, int disp) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -664,36 +664,36 @@
}
DCHECK_LT(reg, 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | rX86_SP;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
DCHECK_LT(scale, 4);
DCHECK_LT(index, 8);
DCHECK_LT(base, 8);
uint8_t sib = (scale << 6) | (index << 3) | base;
- cUnit->codeBuffer.push_back(sib);
- EmitDisp(cUnit, base, disp);
+ cu->code_buffer.push_back(sib);
+ EmitDisp(cu, base, disp);
DCHECK_EQ(0, entry->skeleton.modrm_opcode);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitArrayReg(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitArrayReg(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t base, uint8_t index, int scale, int disp, uint8_t reg) {
// Opcode will flip operands.
- EmitRegArray(cUnit, entry, reg, base, index, scale, disp);
+ EmitRegArray(cu, entry, reg, base, index, scale, disp);
}
-static void EmitRegThread(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitRegThread(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, int disp) {
DCHECK_NE(entry->skeleton.prefix1, 0);
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -706,35 +706,35 @@
}
if (reg >= 4) {
DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
- << " in " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
}
DCHECK_LT(reg, 8);
uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
- cUnit->codeBuffer.push_back(modrm);
- cUnit->codeBuffer.push_back(disp & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 24) & 0xFF);
+ cu->code_buffer.push_back(modrm);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
DCHECK_EQ(0, entry->skeleton.modrm_opcode);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitRegReg(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitRegReg(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg1, uint8_t reg2) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -751,27 +751,27 @@
DCHECK_LT(reg1, 8);
DCHECK_LT(reg2, 8);
uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
DCHECK_EQ(0, entry->skeleton.modrm_opcode);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitRegRegImm(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitRegRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg1, uint8_t reg2, int32_t imm) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -788,24 +788,24 @@
DCHECK_LT(reg1, 8);
DCHECK_LT(reg2, 8);
uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
DCHECK_EQ(0, entry->skeleton.modrm_opcode);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
switch (entry->skeleton.immediate_bytes) {
case 1:
DCHECK(IS_SIMM8(imm));
- cUnit->codeBuffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
break;
case 2:
DCHECK(IS_SIMM16(imm));
- cUnit->codeBuffer.push_back(imm & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
break;
case 4:
- cUnit->codeBuffer.push_back(imm & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 24) & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
break;
default:
LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
@@ -814,24 +814,24 @@
}
}
-static void EmitRegImm(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, int imm) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
if (reg == rAX && entry->skeleton.ax_opcode != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.ax_opcode);
+ cu->code_buffer.push_back(entry->skeleton.ax_opcode);
} else {
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -843,23 +843,23 @@
reg = reg & X86_FP_REG_MASK;
}
uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
}
switch (entry->skeleton.immediate_bytes) {
case 1:
DCHECK(IS_SIMM8(imm));
- cUnit->codeBuffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
break;
case 2:
DCHECK(IS_SIMM16(imm));
- cUnit->codeBuffer.push_back(imm & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
break;
case 4:
- cUnit->codeBuffer.push_back(imm & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 24) & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
break;
default:
LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
@@ -868,21 +868,21 @@
}
}
-static void EmitThreadImm(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitThreadImm(CompilationUnit* cu, const X86EncodingMap* entry,
int disp, int imm) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -891,26 +891,26 @@
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
- cUnit->codeBuffer.push_back(modrm);
- cUnit->codeBuffer.push_back(disp & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 24) & 0xFF);
+ cu->code_buffer.push_back(modrm);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
switch (entry->skeleton.immediate_bytes) {
case 1:
DCHECK(IS_SIMM8(imm));
- cUnit->codeBuffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
break;
case 2:
DCHECK(IS_SIMM16(imm));
- cUnit->codeBuffer.push_back(imm & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
break;
case 4:
- cUnit->codeBuffer.push_back(imm & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 24) & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
break;
default:
LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
@@ -920,36 +920,36 @@
DCHECK_EQ(entry->skeleton.ax_opcode, 0);
}
-static void EmitMovRegImm(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitMovRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, int imm) {
DCHECK_LT(reg, 8);
- cUnit->codeBuffer.push_back(0xB8 + reg);
- cUnit->codeBuffer.push_back(imm & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((imm >> 24) & 0xFF);
+ cu->code_buffer.push_back(0xB8 + reg);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
}
-static void EmitShiftRegImm(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitShiftRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, int imm) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
if (imm != 1) {
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
} else {
// Shorter encoding for 1 bit shift
- cUnit->codeBuffer.push_back(entry->skeleton.ax_opcode);
+ cu->code_buffer.push_back(entry->skeleton.ax_opcode);
}
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -959,115 +959,115 @@
}
if (reg >= 4) {
DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
- << " in " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
}
DCHECK_LT(reg, 8);
uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
if (imm != 1) {
DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
DCHECK(IS_SIMM8(imm));
- cUnit->codeBuffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back(imm & 0xFF);
}
}
-static void EmitShiftRegCl(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitShiftRegCl(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, uint8_t cl) {
DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
DCHECK_LT(reg, 8);
uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitRegCond(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitRegCond(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, uint8_t condition) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0x0F, entry->skeleton.opcode);
- cUnit->codeBuffer.push_back(0x0F);
+ cu->code_buffer.push_back(0x0F);
DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
- cUnit->codeBuffer.push_back(0x90 | condition);
+ cu->code_buffer.push_back(0x90 | condition);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
DCHECK_LT(reg, 8);
uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
}
-static void EmitJmp(CompilationUnit* cUnit, const X86EncodingMap* entry, int rel) {
+static void EmitJmp(CompilationUnit* cu, const X86EncodingMap* entry, int rel) {
if (entry->opcode == kX86Jmp8) {
DCHECK(IS_SIMM8(rel));
- cUnit->codeBuffer.push_back(0xEB);
- cUnit->codeBuffer.push_back(rel & 0xFF);
+ cu->code_buffer.push_back(0xEB);
+ cu->code_buffer.push_back(rel & 0xFF);
} else if (entry->opcode == kX86Jmp32) {
- cUnit->codeBuffer.push_back(0xE9);
- cUnit->codeBuffer.push_back(rel & 0xFF);
- cUnit->codeBuffer.push_back((rel >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((rel >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((rel >> 24) & 0xFF);
+ cu->code_buffer.push_back(0xE9);
+ cu->code_buffer.push_back(rel & 0xFF);
+ cu->code_buffer.push_back((rel >> 8) & 0xFF);
+ cu->code_buffer.push_back((rel >> 16) & 0xFF);
+ cu->code_buffer.push_back((rel >> 24) & 0xFF);
} else {
DCHECK(entry->opcode == kX86JmpR);
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
uint8_t reg = static_cast<uint8_t>(rel);
DCHECK_LT(reg, 8);
uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
}
}
-static void EmitJcc(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitJcc(CompilationUnit* cu, const X86EncodingMap* entry,
int rel, uint8_t cc) {
DCHECK_LT(cc, 16);
if (entry->opcode == kX86Jcc8) {
DCHECK(IS_SIMM8(rel));
- cUnit->codeBuffer.push_back(0x70 | cc);
- cUnit->codeBuffer.push_back(rel & 0xFF);
+ cu->code_buffer.push_back(0x70 | cc);
+ cu->code_buffer.push_back(rel & 0xFF);
} else {
DCHECK(entry->opcode == kX86Jcc32);
- cUnit->codeBuffer.push_back(0x0F);
- cUnit->codeBuffer.push_back(0x80 | cc);
- cUnit->codeBuffer.push_back(rel & 0xFF);
- cUnit->codeBuffer.push_back((rel >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((rel >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((rel >> 24) & 0xFF);
+ cu->code_buffer.push_back(0x0F);
+ cu->code_buffer.push_back(0x80 | cc);
+ cu->code_buffer.push_back(rel & 0xFF);
+ cu->code_buffer.push_back((rel >> 8) & 0xFF);
+ cu->code_buffer.push_back((rel >> 16) & 0xFF);
+ cu->code_buffer.push_back((rel >> 24) & 0xFF);
}
}
-static void EmitCallMem(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitCallMem(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t base, int disp) {
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -1076,27 +1076,27 @@
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
if (base == rX86_SP) {
// Special SIB for SP base
- cUnit->codeBuffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+ cu->code_buffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
}
- EmitDisp(cUnit, base, disp);
+ EmitDisp(cu, base, disp);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitCallThread(CompilationUnit* cUnit, const X86EncodingMap* entry, int disp) {
+static void EmitCallThread(CompilationUnit* cu, const X86EncodingMap* entry, int disp) {
DCHECK_NE(entry->skeleton.prefix1, 0);
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
@@ -1105,30 +1105,30 @@
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
}
uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
- cUnit->codeBuffer.push_back(modrm);
- cUnit->codeBuffer.push_back(disp & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 24) & 0xFF);
+ cu->code_buffer.push_back(modrm);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-static void EmitPcRel(CompilationUnit* cUnit, const X86EncodingMap* entry, uint8_t reg,
+static void EmitPcRel(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg,
int base_or_table, uint8_t index, int scale, int table_or_disp) {
int disp;
if (entry->opcode == kX86PcRelLoadRA) {
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(table_or_disp);
- disp = tabRec->offset;
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(table_or_disp);
+ disp = tab_rec->offset;
} else {
DCHECK(entry->opcode == kX86PcRelAdr);
- FillArrayData *tabRec = reinterpret_cast<FillArrayData*>(base_or_table);
- disp = tabRec->offset;
+ FillArrayData *tab_rec = reinterpret_cast<FillArrayData*>(base_or_table);
+ disp = tab_rec->offset;
}
if (entry->skeleton.prefix1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix1);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
if (entry->skeleton.prefix2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
}
} else {
DCHECK_EQ(0, entry->skeleton.prefix2);
@@ -1138,48 +1138,48 @@
}
DCHECK_LT(reg, 8);
if (entry->opcode == kX86PcRelLoadRA) {
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP;
- cUnit->codeBuffer.push_back(modrm);
+ cu->code_buffer.push_back(modrm);
DCHECK_LT(scale, 4);
DCHECK_LT(index, 8);
DCHECK_LT(base_or_table, 8);
uint8_t base = static_cast<uint8_t>(base_or_table);
uint8_t sib = (scale << 6) | (index << 3) | base;
- cUnit->codeBuffer.push_back(sib);
+ cu->code_buffer.push_back(sib);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
} else {
- cUnit->codeBuffer.push_back(entry->skeleton.opcode + reg);
+ cu->code_buffer.push_back(entry->skeleton.opcode + reg);
}
- cUnit->codeBuffer.push_back(disp & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 8) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 16) & 0xFF);
- cUnit->codeBuffer.push_back((disp >> 24) & 0xFF);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
DCHECK_EQ(0, entry->skeleton.modrm_opcode);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
}
-static void EmitMacro(CompilationUnit* cUnit, const X86EncodingMap* entry,
+static void EmitMacro(CompilationUnit* cu, const X86EncodingMap* entry,
uint8_t reg, int offset) {
DCHECK(entry->opcode == kX86StartOfMethod) << entry->name;
- cUnit->codeBuffer.push_back(0xE8); // call +0
- cUnit->codeBuffer.push_back(0);
- cUnit->codeBuffer.push_back(0);
- cUnit->codeBuffer.push_back(0);
- cUnit->codeBuffer.push_back(0);
+ cu->code_buffer.push_back(0xE8); // call +0
+ cu->code_buffer.push_back(0);
+ cu->code_buffer.push_back(0);
+ cu->code_buffer.push_back(0);
+ cu->code_buffer.push_back(0);
DCHECK_LT(reg, 8);
- cUnit->codeBuffer.push_back(0x58 + reg); // pop reg
+ cu->code_buffer.push_back(0x58 + reg); // pop reg
- EmitRegImm(cUnit, &EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+ EmitRegImm(cu, &EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
}
-static void EmitUnimplemented(CompilationUnit* cUnit, const X86EncodingMap* entry, LIR* lir) {
+static void EmitUnimplemented(CompilationUnit* cu, const X86EncodingMap* entry, LIR* lir) {
UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " " << BuildInsnString(entry->fmt, lir, 0);
for (int i = 0; i < GetInsnSize(lir); ++i) {
- cUnit->codeBuffer.push_back(0xCC); // push breakpoint instruction - int 3
+ cu->code_buffer.push_back(0xCC); // push breakpoint instruction - int 3
}
}
@@ -1189,25 +1189,25 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus AssembleInstructions(CompilationUnit *cUnit, uintptr_t startAddr) {
+AssemblerStatus AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
const bool kVerbosePcFixup = false;
- for (lir = cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+ for (lir = cu->first_lir_insn; lir; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
- if (lir->flags.isNop) {
+ if (lir->flags.is_nop) {
continue;
}
if (lir->flags.pcRelFixup) {
switch (lir->opcode) {
case kX86Jcc8: {
- LIR *targetLIR = lir->target;
- DCHECK(targetLIR != NULL);
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
int delta = 0;
uintptr_t pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1215,7 +1215,7 @@
} else {
pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
}
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
delta = target - pc;
if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
if (kVerbosePcFixup) {
@@ -1223,38 +1223,38 @@
<< " delta: " << delta << " old delta: " << lir->operands[0];
}
lir->opcode = kX86Jcc32;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
if (kVerbosePcFixup) {
LOG(INFO) << "Source:";
- DumpLIRInsn(cUnit, lir, 0);
+ DumpLIRInsn(cu, lir, 0);
LOG(INFO) << "Target:";
- DumpLIRInsn(cUnit, targetLIR, 0);
+ DumpLIRInsn(cu, target_lir, 0);
LOG(INFO) << "Delta " << delta;
}
lir->operands[0] = delta;
break;
}
case kX86Jcc32: {
- LIR *targetLIR = lir->target;
- DCHECK(targetLIR != NULL);
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (kVerbosePcFixup) {
LOG(INFO) << "Source:";
- DumpLIRInsn(cUnit, lir, 0);
+ DumpLIRInsn(cu, lir, 0);
LOG(INFO) << "Target:";
- DumpLIRInsn(cUnit, targetLIR, 0);
+ DumpLIRInsn(cu, target_lir, 0);
LOG(INFO) << "Delta " << delta;
}
lir->operands[0] = delta;
break;
}
case kX86Jmp8: {
- LIR *targetLIR = lir->target;
- DCHECK(targetLIR != NULL);
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
int delta = 0;
uintptr_t pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1262,11 +1262,11 @@
} else {
pc = lir->offset + 5 /* opcode + rel32 */;
}
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
delta = target - pc;
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && delta == 0) {
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
// Useless branch
- lir->flags.isNop = true;
+ lir->flags.is_nop = true;
if (kVerbosePcFixup) {
LOG(INFO) << "Retry for useless branch at " << lir->offset;
}
@@ -1276,17 +1276,17 @@
LOG(INFO) << "Retry for JMP growth at " << lir->offset;
}
lir->opcode = kX86Jmp32;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
lir->operands[0] = delta;
break;
}
case kX86Jmp32: {
- LIR *targetLIR = lir->target;
- DCHECK(targetLIR != NULL);
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
uintptr_t pc = lir->offset + 5 /* opcode + rel32 */;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
lir->operands[0] = delta;
break;
@@ -1304,21 +1304,21 @@
if (res != kSuccess) {
continue;
}
- CHECK_EQ(static_cast<size_t>(lir->offset), cUnit->codeBuffer.size());
+ CHECK_EQ(static_cast<size_t>(lir->offset), cu->code_buffer.size());
const X86EncodingMap *entry = &EncodingMap[lir->opcode];
- size_t starting_cbuf_size = cUnit->codeBuffer.size();
+ size_t starting_cbuf_size = cu->code_buffer.size();
switch (entry->kind) {
case kData: // 4 bytes of data
- cUnit->codeBuffer.push_back(lir->operands[0]);
+ cu->code_buffer.push_back(lir->operands[0]);
break;
case kNullary: // 1 byte of opcode
DCHECK_EQ(0, entry->skeleton.prefix1);
DCHECK_EQ(0, entry->skeleton.prefix2);
- cUnit->codeBuffer.push_back(entry->skeleton.opcode);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
if (entry->skeleton.extra_opcode1 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
if (entry->skeleton.extra_opcode2 != 0) {
- cUnit->codeBuffer.push_back(entry->skeleton.extra_opcode2);
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
}
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -1328,87 +1328,87 @@
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
break;
case kReg: // lir operands - 0: reg
- EmitOpReg(cUnit, entry, lir->operands[0]);
+ EmitOpReg(cu, entry, lir->operands[0]);
break;
case kMem: // lir operands - 0: base, 1: disp
- EmitOpMem(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitOpMem(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kMemReg: // lir operands - 0: base, 1: disp, 2: reg
- EmitMemReg(cUnit, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+ EmitMemReg(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
break;
case kArrayReg: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
- EmitArrayReg(cUnit, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+ EmitArrayReg(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
lir->operands[3], lir->operands[4]);
break;
case kRegMem: // lir operands - 0: reg, 1: base, 2: disp
- EmitRegMem(cUnit, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+ EmitRegMem(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
break;
case kRegArray: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
- EmitRegArray(cUnit, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+ EmitRegArray(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
lir->operands[3], lir->operands[4]);
break;
case kRegThread: // lir operands - 0: reg, 1: disp
- EmitRegThread(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitRegThread(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kRegReg: // lir operands - 0: reg1, 1: reg2
- EmitRegReg(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitRegReg(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kRegRegStore: // lir operands - 0: reg2, 1: reg1
- EmitRegReg(cUnit, entry, lir->operands[1], lir->operands[0]);
+ EmitRegReg(cu, entry, lir->operands[1], lir->operands[0]);
break;
case kRegRegImm:
- EmitRegRegImm(cUnit, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+ EmitRegRegImm(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
break;
case kRegImm: // lir operands - 0: reg, 1: immediate
- EmitRegImm(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitRegImm(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kThreadImm: // lir operands - 0: disp, 1: immediate
- EmitThreadImm(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitThreadImm(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kMovRegImm: // lir operands - 0: reg, 1: immediate
- EmitMovRegImm(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitMovRegImm(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kShiftRegImm: // lir operands - 0: reg, 1: immediate
- EmitShiftRegImm(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitShiftRegImm(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kShiftRegCl: // lir operands - 0: reg, 1: cl
- EmitShiftRegCl(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitShiftRegCl(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kRegCond: // lir operands - 0: reg, 1: condition
- EmitRegCond(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitRegCond(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kJmp: // lir operands - 0: rel
- EmitJmp(cUnit, entry, lir->operands[0]);
+ EmitJmp(cu, entry, lir->operands[0]);
break;
case kJcc: // lir operands - 0: rel, 1: CC, target assigned
- EmitJcc(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitJcc(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kCall:
switch (entry->opcode) {
case kX86CallM: // lir operands - 0: base, 1: disp
- EmitCallMem(cUnit, entry, lir->operands[0], lir->operands[1]);
+ EmitCallMem(cu, entry, lir->operands[0], lir->operands[1]);
break;
case kX86CallT: // lir operands - 0: disp
- EmitCallThread(cUnit, entry, lir->operands[0]);
+ EmitCallThread(cu, entry, lir->operands[0]);
break;
default:
- EmitUnimplemented(cUnit, entry, lir);
+ EmitUnimplemented(cu, entry, lir);
break;
}
break;
case kPcRel: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
- EmitPcRel(cUnit, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+ EmitPcRel(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
lir->operands[3], lir->operands[4]);
break;
case kMacro:
- EmitMacro(cUnit, entry, lir->operands[0], lir->offset);
+ EmitMacro(cu, entry, lir->operands[0], lir->offset);
break;
default:
- EmitUnimplemented(cUnit, entry, lir);
+ EmitUnimplemented(cu, entry, lir);
break;
}
CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)),
- cUnit->codeBuffer.size() - starting_cbuf_size)
+ cu->code_buffer.size() - starting_cbuf_size)
<< "Instruction size mismatch for entry: " << EncodingMap[lir->opcode].name;
}
return res;
@@ -1418,15 +1418,15 @@
* Target-dependent offset assignment.
* independent.
*/
-int AssignInsnOffsets(CompilationUnit* cUnit)
+int AssignInsnOffsets(CompilationUnit* cu)
{
LIR* x86LIR;
int offset = 0;
- for (x86LIR = cUnit->firstLIRInsn; x86LIR; x86LIR = NEXT_LIR(x86LIR)) {
+ for (x86LIR = cu->first_lir_insn; x86LIR; x86LIR = NEXT_LIR(x86LIR)) {
x86LIR->offset = offset;
if (x86LIR->opcode >= 0) {
- if (!x86LIR->flags.isNop) {
+ if (!x86LIR->flags.is_nop) {
offset += x86LIR->flags.size;
}
} else if (x86LIR->opcode == kPseudoPseudoAlign4) {
diff --git a/src/compiler/codegen/x86/call_x86.cc b/src/compiler/codegen/x86/call_x86.cc
index 7ada136..e24831d 100644
--- a/src/compiler/codegen/x86/call_x86.cc
+++ b/src/compiler/codegen/x86/call_x86.cc
@@ -22,8 +22,8 @@
namespace art {
-void GenSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- SpecialCaseHandler specialCase)
+void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
{
// TODO
}
@@ -32,98 +32,98 @@
* The sparse table in the literal pool is an array of <key,displacement>
* pairs.
*/
-BasicBlock *FindBlock(CompilationUnit* cUnit, unsigned int codeOffset,
- bool split, bool create, BasicBlock** immedPredBlockP);
-void GenSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+BasicBlock *FindBlock(CompilationUnit* cu, unsigned int code_offset,
+ bool split, bool create, BasicBlock** immed_pred_block_p);
+void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpSparseSwitchTable(table);
}
int entries = table[1];
const int* keys = reinterpret_cast<const int*>(&table[2]);
const int* targets = &keys[entries];
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
for (int i = 0; i < entries; i++) {
int key = keys[i];
- BasicBlock* case_block = FindBlock(cUnit,
- cUnit->currentDalvikOffset + targets[i],
+ BasicBlock* case_block = FindBlock(cu,
+ cu->current_dalvik_offset + targets[i],
false, false, NULL);
- LIR* labelList = cUnit->blockLabelList;
- OpCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key,
- &labelList[case_block->id]);
+ LIR* label_list = cu->block_label_list;
+ OpCmpImmBranch(cu, kCondEq, rl_src.low_reg, key,
+ &label_list[case_block->id]);
}
}
/*
* Code pattern will look something like:
*
- * mov rVal, ..
+ * mov r_val, ..
* call 0
- * pop rStartOfMethod
- * sub rStartOfMethod, ..
- * mov rKeyReg, rVal
- * sub rKeyReg, lowKey
- * cmp rKeyReg, size-1 ; bound check
+ * pop r_start_of_method
+ * sub r_start_of_method, ..
+ * mov r_key_reg, r_val
+ * sub r_key_reg, low_key
+ * cmp r_key_reg, size-1 ; bound check
* ja done
- * mov rDisp, [rStartOfMethod + rKeyReg * 4 + tableOffset]
- * add rStartOfMethod, rDisp
- * jmp rStartOfMethod
+ * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
+ * add r_start_of_method, r_disp
+ * jmp r_start_of_method
* done:
*/
-void GenPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tabRec =
- static_cast<SwitchTable *>(NewMem(cUnit, sizeof(SwitchTable), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable *>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
int size = table[1];
- tabRec->targets = static_cast<LIR**>(NewMem(cUnit, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cUnit, &cUnit->switchTables, reinterpret_cast<uintptr_t>(tabRec));
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
// Get the switch value
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- int startOfMethodReg = AllocTemp(cUnit);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int start_of_method_reg = AllocTemp(cu);
// Materialize a pointer to the switch table
- //NewLIR0(cUnit, kX86Bkpt);
- NewLIR1(cUnit, kX86StartOfMethod, startOfMethodReg);
- int lowKey = s4FromSwitchData(&table[2]);
+ //NewLIR0(cu, kX86Bkpt);
+ NewLIR1(cu, kX86StartOfMethod, start_of_method_reg);
+ int low_key = s4FromSwitchData(&table[2]);
int keyReg;
// Remove the bias, if necessary
- if (lowKey == 0) {
- keyReg = rlSrc.lowReg;
+ if (low_key == 0) {
+ keyReg = rl_src.low_reg;
} else {
- keyReg = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpSub, keyReg, rlSrc.lowReg, lowKey);
+ keyReg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
- OpRegImm(cUnit, kOpCmp, keyReg, size-1);
- LIR* branchOver = OpCondBranch(cUnit, kCondHi, NULL);
+ OpRegImm(cu, kOpCmp, keyReg, size-1);
+ LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
// Load the displacement from the switch table
- int dispReg = AllocTemp(cUnit);
- NewLIR5(cUnit, kX86PcRelLoadRA, dispReg, startOfMethodReg, keyReg, 2,
- reinterpret_cast<uintptr_t>(tabRec));
+ int disp_reg = AllocTemp(cu);
+ NewLIR5(cu, kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
+ reinterpret_cast<uintptr_t>(tab_rec));
// Add displacement to start of method
- OpRegReg(cUnit, kOpAdd, startOfMethodReg, dispReg);
+ OpRegReg(cu, kOpAdd, start_of_method_reg, disp_reg);
// ..and go!
- LIR* switchBranch = NewLIR1(cUnit, kX86JmpR, startOfMethodReg);
- tabRec->anchor = switchBranch;
+ LIR* switch_branch = NewLIR1(cu, kX86JmpR, start_of_method_reg);
+ tab_rec->anchor = switch_branch;
- /* branchOver target here */
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
+ /* branch_over target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
}
-void CallRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset,
- int arg0, int arg1, bool safepointPC);
+void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset,
+ int arg0, int arg1, bool safepoint_pc);
/*
* Array data table format:
* ushort ident = 0x0300 magic value
@@ -134,91 +134,91 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void GenFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tabRec =
- static_cast<FillArrayData*>(NewMem(cUnit, sizeof(FillArrayData), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
- uint16_t width = tabRec->table[1];
- uint32_t size = tabRec->table[2] | ((static_cast<uint32_t>(tabRec->table[3])) << 16);
- tabRec->size = (size * width) + 8;
+ FillArrayData *tab_rec =
+ static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ uint16_t width = tab_rec->table[1];
+ uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+ tab_rec->size = (size * width) + 8;
- InsertGrowableList(cUnit, &cUnit->fillArrayData, reinterpret_cast<uintptr_t>(tabRec));
+ InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
// Making a call - use explicit registers
- FlushAllRegs(cUnit); /* Everything to home location */
- LoadValueDirectFixed(cUnit, rlSrc, rX86_ARG0);
+ FlushAllRegs(cu); /* Everything to home location */
+ LoadValueDirectFixed(cu, rl_src, rX86_ARG0);
// Materialize a pointer to the fill data image
- NewLIR1(cUnit, kX86StartOfMethod, rX86_ARG2);
- NewLIR2(cUnit, kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tabRec));
- NewLIR2(cUnit, kX86Add32RR, rX86_ARG1, rX86_ARG2);
- CallRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+ NewLIR1(cu, kX86StartOfMethod, rX86_ARG2);
+ NewLIR2(cu, kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR2(cu, kX86Add32RR, rX86_ARG1, rX86_ARG2);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
rX86_ARG1, true);
}
-void GenMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
- LoadValueDirectFixed(cUnit, rlSrc, rCX); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, rCX, optFlags);
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rCX); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rCX, opt_flags);
// If lock is unheld, try to grab it quickly with compare and exchange
// TODO: copy and clear hash state?
- NewLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
- NewLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
- NewLIR2(cUnit, kX86Xor32RR, rAX, rAX);
- NewLIR3(cUnit, kX86LockCmpxchgMR, rCX, Object::MonitorOffset().Int32Value(), rDX);
- LIR* branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondEq);
+ NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ NewLIR2(cu, kX86Xor32RR, rAX, rAX);
+ NewLIR3(cu, kX86LockCmpxchgMR, rCX, Object::MonitorOffset().Int32Value(), rDX);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondEq);
// If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
- CallRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
- branch->target = NewLIR0(cUnit, kPseudoTargetLabel);
+ CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
}
-void GenMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
- LoadValueDirectFixed(cUnit, rlSrc, rAX); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, rAX, optFlags);
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rAX); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rAX, opt_flags);
// If lock is held by the current thread, clear it to quickly release it
// TODO: clear hash state?
- NewLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
- NewLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
- NewLIR3(cUnit, kX86Mov32RM, rCX, rAX, Object::MonitorOffset().Int32Value());
- OpRegReg(cUnit, kOpSub, rCX, rDX);
- LIR* branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondNe);
- NewLIR3(cUnit, kX86Mov32MR, rAX, Object::MonitorOffset().Int32Value(), rCX);
- LIR* branch2 = NewLIR1(cUnit, kX86Jmp8, 0);
- branch->target = NewLIR0(cUnit, kPseudoTargetLabel);
+ NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ NewLIR3(cu, kX86Mov32RM, rCX, rAX, Object::MonitorOffset().Int32Value());
+ OpRegReg(cu, kOpSub, rCX, rDX);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondNe);
+ NewLIR3(cu, kX86Mov32MR, rAX, Object::MonitorOffset().Int32Value(), rCX);
+ LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
// Otherwise, go the expensive route - UnlockObjectFromCode(obj);
- CallRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
- branch2->target = NewLIR0(cUnit, kPseudoTargetLabel);
+ CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+ branch2->target = NewLIR0(cu, kPseudoTargetLabel);
}
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void MarkGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
+void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
{
- int regCardBase = AllocTemp(cUnit);
- int regCardNo = AllocTemp(cUnit);
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
- NewLIR2(cUnit, kX86Mov32RT, regCardBase, Thread::CardTableOffset().Int32Value());
- OpRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, CardTable::kCardShift);
- StoreBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
+ int reg_card_base = AllocTemp(cu);
+ int reg_card_no = AllocTemp(cu);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
+ NewLIR2(cu, kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
+ OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+ StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
kUnsignedByte);
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
- FreeTemp(cUnit, regCardBase);
- FreeTemp(cUnit, regCardNo);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+ FreeTemp(cu, reg_card_base);
+ FreeTemp(cu, reg_card_no);
}
-void GenEntrySequence(CompilationUnit* cUnit, RegLocation* ArgLocs,
- RegLocation rlMethod)
+void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method)
{
/*
* On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
@@ -226,54 +226,54 @@
* expanding the frame or flushing. This leaves the utility
* code with no spare temps.
*/
- LockTemp(cUnit, rX86_ARG0);
- LockTemp(cUnit, rX86_ARG1);
- LockTemp(cUnit, rX86_ARG2);
+ LockTemp(cu, rX86_ARG0);
+ LockTemp(cu, rX86_ARG1);
+ LockTemp(cu, rX86_ARG2);
/* Build frame, return address already on stack */
- OpRegImm(cUnit, kOpSub, rX86_SP, cUnit->frameSize - 4);
+ OpRegImm(cu, kOpSub, rX86_SP, cu->frame_size - 4);
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- (static_cast<size_t>(cUnit->frameSize) <
+ bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+ (static_cast<size_t>(cu->frame_size) <
Thread::kStackOverflowReservedBytes));
- NewLIR0(cUnit, kPseudoMethodEntry);
+ NewLIR0(cu, kPseudoMethodEntry);
/* Spill core callee saves */
- SpillCoreRegs(cUnit);
+ SpillCoreRegs(cu);
/* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
- DCHECK_EQ(cUnit->numFPSpills, 0);
- if (!skipOverflowCheck) {
+ DCHECK_EQ(cu->num_fp_spills, 0);
+ if (!skip_overflow_check) {
// cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
- LIR* tgt = RawLIR(cUnit, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
- OpRegThreadMem(cUnit, kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
- OpCondBranch(cUnit, kCondUlt, tgt);
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+ OpRegThreadMem(cu, kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+ OpCondBranch(cu, kCondUlt, tgt);
// Remember branch target - will process later
- InsertGrowableList(cUnit, &cUnit->throwLaunchpads, reinterpret_cast<uintptr_t>(tgt));
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
}
- FlushIns(cUnit, ArgLocs, rlMethod);
+ FlushIns(cu, ArgLocs, rl_method);
- FreeTemp(cUnit, rX86_ARG0);
- FreeTemp(cUnit, rX86_ARG1);
- FreeTemp(cUnit, rX86_ARG2);
+ FreeTemp(cu, rX86_ARG0);
+ FreeTemp(cu, rX86_ARG1);
+ FreeTemp(cu, rX86_ARG2);
}
-void GenExitSequence(CompilationUnit* cUnit) {
+void GenExitSequence(CompilationUnit* cu) {
/*
* In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
* allocated by the register utilities as temps.
*/
- LockTemp(cUnit, rX86_RET0);
- LockTemp(cUnit, rX86_RET1);
+ LockTemp(cu, rX86_RET0);
+ LockTemp(cu, rX86_RET1);
- NewLIR0(cUnit, kPseudoMethodExit);
- UnSpillCoreRegs(cUnit);
+ NewLIR0(cu, kPseudoMethodExit);
+ UnSpillCoreRegs(cu);
/* Remove frame except for return address */
- OpRegImm(cUnit, kOpAdd, rX86_SP, cUnit->frameSize - 4);
- NewLIR0(cUnit, kX86Ret);
+ OpRegImm(cu, kOpAdd, rX86_SP, cu->frame_size - 4);
+ NewLIR0(cu, kX86Ret);
}
} // namespace art
diff --git a/src/compiler/codegen/x86/fp_x86.cc b/src/compiler/codegen/x86/fp_x86.cc
index 9ab80f4..def4896 100644
--- a/src/compiler/codegen/x86/fp_x86.cc
+++ b/src/compiler/codegen/x86/fp_x86.cc
@@ -20,10 +20,10 @@
namespace art {
-bool GenArithOpFloat(CompilationUnit *cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2) {
+bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
X86OpCode op = kX86Nop;
- RegLocation rlResult;
+ RegLocation rl_result;
/*
* Don't attempt to optimize register usage since these opcodes call out to
@@ -49,31 +49,31 @@
case Instruction::NEG_FLOAT:
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
- return GenArithOpFloatPortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpFloatPortable(cu, opcode, rl_dest, rl_src1, rl_src2);
default:
return true;
}
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- int rDest = rlResult.lowReg;
- int rSrc1 = rlSrc1.lowReg;
- int rSrc2 = rlSrc2.lowReg;
- if (rDest == rSrc2) {
- rSrc2 = AllocTempFloat(cUnit);
- OpRegCopy(cUnit, rSrc2, rDest);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ int r_dest = rl_result.low_reg;
+ int r_src1 = rl_src1.low_reg;
+ int r_src2 = rl_src2.low_reg;
+ if (r_dest == r_src2) {
+ r_src2 = AllocTempFloat(cu);
+ OpRegCopy(cu, r_src2, r_dest);
}
- OpRegCopy(cUnit, rDest, rSrc1);
- NewLIR2(cUnit, op, rDest, rSrc2);
- StoreValue(cUnit, rlDest, rlResult);
+ OpRegCopy(cu, r_dest, r_src1);
+ NewLIR2(cu, op, r_dest, r_src2);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-bool GenArithOpDouble(CompilationUnit *cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2) {
+bool GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
X86OpCode op = kX86Nop;
- RegLocation rlResult;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::ADD_DOUBLE_2ADDR:
@@ -95,36 +95,36 @@
case Instruction::NEG_DOUBLE:
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
- return GenArithOpDoublePortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpDoublePortable(cu, opcode, rl_dest, rl_src1, rl_src2);
default:
return true;
}
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- DCHECK(rlSrc1.wide);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- DCHECK(rlSrc2.wide);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- DCHECK(rlDest.wide);
- DCHECK(rlResult.wide);
- int rDest = S2d(rlResult.lowReg, rlResult.highReg);
- int rSrc1 = S2d(rlSrc1.lowReg, rlSrc1.highReg);
- int rSrc2 = S2d(rlSrc2.lowReg, rlSrc2.highReg);
- if (rDest == rSrc2) {
- rSrc2 = AllocTempDouble(cUnit) | X86_FP_DOUBLE;
- OpRegCopy(cUnit, rSrc2, rDest);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
+ int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+ int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+ if (r_dest == r_src2) {
+ r_src2 = AllocTempDouble(cu) | X86_FP_DOUBLE;
+ OpRegCopy(cu, r_src2, r_dest);
}
- OpRegCopy(cUnit, rDest, rSrc1);
- NewLIR2(cUnit, op, rDest, rSrc2);
- StoreValueWide(cUnit, rlDest, rlResult);
+ OpRegCopy(cu, r_dest, r_src1);
+ NewLIR2(cu, op, r_dest, r_src2);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenConversion(CompilationUnit *cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc) {
+bool GenConversion(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src) {
RegisterClass rcSrc = kFPReg;
X86OpCode op = kX86Nop;
- int srcReg;
- RegLocation rlResult;
+ int src_reg;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::INT_TO_FLOAT:
rcSrc = kCoreReg;
@@ -143,45 +143,45 @@
op = kX86Cvtsi2sdRR;
break;
case Instruction::FLOAT_TO_INT: {
- rlSrc = LoadValue(cUnit, rlSrc, kFPReg);
- srcReg = rlSrc.lowReg;
- ClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int tempReg = AllocTempFloat(cUnit);
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int temp_reg = AllocTempFloat(cu);
- LoadConstant(cUnit, rlResult.lowReg, 0x7fffffff);
- NewLIR2(cUnit, kX86Cvtsi2ssRR, tempReg, rlResult.lowReg);
- NewLIR2(cUnit, kX86ComissRR, srcReg, tempReg);
- LIR* branchPosOverflow = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondA);
- LIR* branchNaN = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondP);
- NewLIR2(cUnit, kX86Cvttss2siRR, rlResult.lowReg, srcReg);
- LIR* branchNormal = NewLIR1(cUnit, kX86Jmp8, 0);
- branchNaN->target = NewLIR0(cUnit, kPseudoTargetLabel);
- NewLIR2(cUnit, kX86Xor32RR, rlResult.lowReg, rlResult.lowReg);
- branchPosOverflow->target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchNormal->target = NewLIR0(cUnit, kPseudoTargetLabel);
- StoreValue(cUnit, rlDest, rlResult);
+ LoadConstant(cu, rl_result.low_reg, 0x7fffffff);
+ NewLIR2(cu, kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
+ NewLIR2(cu, kX86ComissRR, src_reg, temp_reg);
+ LIR* branch_pos_overflow = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
+ LIR* branch_na_n = NewLIR2(cu, kX86Jcc8, 0, kX86CondP);
+ NewLIR2(cu, kX86Cvttss2siRR, rl_result.low_reg, src_reg);
+ LIR* branch_normal = NewLIR1(cu, kX86Jmp8, 0);
+ branch_na_n->target = NewLIR0(cu, kPseudoTargetLabel);
+ NewLIR2(cu, kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ branch_pos_overflow->target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_normal->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
case Instruction::DOUBLE_TO_INT: {
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- srcReg = rlSrc.lowReg;
- ClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int tempReg = AllocTempDouble(cUnit) | X86_FP_DOUBLE;
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int temp_reg = AllocTempDouble(cu) | X86_FP_DOUBLE;
- LoadConstant(cUnit, rlResult.lowReg, 0x7fffffff);
- NewLIR2(cUnit, kX86Cvtsi2sdRR, tempReg, rlResult.lowReg);
- NewLIR2(cUnit, kX86ComisdRR, srcReg, tempReg);
- LIR* branchPosOverflow = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondA);
- LIR* branchNaN = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondP);
- NewLIR2(cUnit, kX86Cvttsd2siRR, rlResult.lowReg, srcReg);
- LIR* branchNormal = NewLIR1(cUnit, kX86Jmp8, 0);
- branchNaN->target = NewLIR0(cUnit, kPseudoTargetLabel);
- NewLIR2(cUnit, kX86Xor32RR, rlResult.lowReg, rlResult.lowReg);
- branchPosOverflow->target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchNormal->target = NewLIR0(cUnit, kPseudoTargetLabel);
- StoreValue(cUnit, rlDest, rlResult);
+ LoadConstant(cu, rl_result.low_reg, 0x7fffffff);
+ NewLIR2(cu, kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
+ NewLIR2(cu, kX86ComisdRR, src_reg, temp_reg);
+ LIR* branch_pos_overflow = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
+ LIR* branch_na_n = NewLIR2(cu, kX86Jcc8, 0, kX86CondP);
+ NewLIR2(cu, kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
+ LIR* branch_normal = NewLIR1(cu, kX86Jmp8, 0);
+ branch_na_n->target = NewLIR0(cu, kPseudoTargetLabel);
+ NewLIR2(cu, kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ branch_pos_overflow->target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_normal->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
case Instruction::LONG_TO_DOUBLE:
@@ -189,140 +189,140 @@
// TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
case Instruction::FLOAT_TO_LONG:
case Instruction::DOUBLE_TO_LONG:
- return GenConversionPortable(cUnit, opcode, rlDest, rlSrc);
+ return GenConversionPortable(cu, opcode, rl_dest, rl_src);
default:
return true;
}
- if (rlSrc.wide) {
- rlSrc = LoadValueWide(cUnit, rlSrc, rcSrc);
- srcReg = S2d(rlSrc.lowReg, rlSrc.highReg);
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, rcSrc);
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, rcSrc);
- srcReg = rlSrc.lowReg;
+ rl_src = LoadValue(cu, rl_src, rcSrc);
+ src_reg = rl_src.low_reg;
}
- if (rlDest.wide) {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, S2d(rlResult.lowReg, rlResult.highReg), srcReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ if (rl_dest.wide) {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, rlResult.lowReg, srcReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, rl_result.low_reg, src_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
return false;
}
-bool GenCmpFP(CompilationUnit *cUnit, Instruction::Code code, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2) {
+bool GenCmpFP(CompilationUnit *cu, Instruction::Code code, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
- bool unorderedGt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
- int srcReg1;
- int srcReg2;
+ bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
+ int src_reg1;
+ int src_reg2;
if (single) {
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- srcReg1 = rlSrc1.lowReg;
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- srcReg2 = rlSrc2.lowReg;
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ src_reg1 = rl_src1.low_reg;
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ src_reg2 = rl_src2.low_reg;
} else {
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- srcReg1 = S2d(rlSrc1.lowReg, rlSrc1.highReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- srcReg2 = S2d(rlSrc2.lowReg, rlSrc2.highReg);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
}
- ClobberSReg(cUnit, rlDest.sRegLow);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadConstantNoClobber(cUnit, rlResult.lowReg, unorderedGt ? 1 : 0);
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstantNoClobber(cu, rl_result.low_reg, unordered_gt ? 1 : 0);
if (single) {
- NewLIR2(cUnit, kX86UcomissRR, srcReg1, srcReg2);
+ NewLIR2(cu, kX86UcomissRR, src_reg1, src_reg2);
} else {
- NewLIR2(cUnit, kX86UcomisdRR, srcReg1, srcReg2);
+ NewLIR2(cu, kX86UcomisdRR, src_reg1, src_reg2);
}
LIR* branch = NULL;
- if (unorderedGt) {
- branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ if (unordered_gt) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
- if (rlResult.lowReg >= 4) {
+ if (rl_result.low_reg >= 4) {
LIR* branch2 = NULL;
- if (unorderedGt) {
- branch2 = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondA);
- NewLIR2(cUnit, kX86Mov32RI, rlResult.lowReg, 0x0);
+ if (unordered_gt) {
+ branch2 = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
+ NewLIR2(cu, kX86Mov32RI, rl_result.low_reg, 0x0);
} else {
- branch2 = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondBe);
- NewLIR2(cUnit, kX86Mov32RI, rlResult.lowReg, 0x1);
+ branch2 = NewLIR2(cu, kX86Jcc8, 0, kX86CondBe);
+ NewLIR2(cu, kX86Mov32RI, rl_result.low_reg, 0x1);
}
- branch2->target = NewLIR0(cUnit, kPseudoTargetLabel);
+ branch2->target = NewLIR0(cu, kPseudoTargetLabel);
} else {
- NewLIR2(cUnit, kX86Set8R, rlResult.lowReg, kX86CondA /* above - unsigned > */);
+ NewLIR2(cu, kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
}
- NewLIR2(cUnit, kX86Sbb32RI, rlResult.lowReg, 0);
- if (unorderedGt) {
- branch->target = NewLIR0(cUnit, kPseudoTargetLabel);
+ NewLIR2(cu, kX86Sbb32RI, rl_result.low_reg, 0);
+ if (unordered_gt) {
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-void GenFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- bool gtBias, bool isDouble) {
- LIR* labelList = cUnit->blockLabelList;
- LIR* taken = &labelList[bb->taken->id];
- LIR* notTaken = &labelList[bb->fallThrough->id];
+void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ bool gt_bias, bool is_double) {
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* not_taken = &label_list[bb->fall_through->id];
LIR* branch = NULL;
- RegLocation rlSrc1;
- RegLocation rlSrc2;
- if (isDouble) {
- rlSrc1 = GetSrcWide(cUnit, mir, 0);
- rlSrc2 = GetSrcWide(cUnit, mir, 2);
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- NewLIR2(cUnit, kX86UcomisdRR, S2d(rlSrc1.lowReg, rlSrc1.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
+ RegLocation rl_src1;
+ RegLocation rl_src2;
+ if (is_double) {
+ rl_src1 = GetSrcWide(cu, mir, 0);
+ rl_src2 = GetSrcWide(cu, mir, 2);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
- rlSrc1 = GetSrc(cUnit, mir, 0);
- rlSrc2 = GetSrc(cUnit, mir, 1);
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- NewLIR2(cUnit, kX86UcomissRR, rlSrc1.lowReg, rlSrc2.lowReg);
+ rl_src1 = GetSrc(cu, mir, 0);
+ rl_src2 = GetSrc(cu, mir, 1);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
}
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
switch (ccode) {
case kCondEq:
- if (!gtBias) {
- branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
- branch->target = notTaken;
+ if (!gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = not_taken;
}
break;
case kCondNe:
- if (!gtBias) {
- branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ if (!gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
branch->target = taken;
}
break;
case kCondLt:
- if (gtBias) {
- branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
- branch->target = notTaken;
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = not_taken;
}
ccode = kCondCs;
break;
case kCondLe:
- if (gtBias) {
- branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
- branch->target = notTaken;
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = not_taken;
}
ccode = kCondLs;
break;
case kCondGt:
- if (gtBias) {
- branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
branch->target = taken;
}
ccode = kCondHi;
break;
case kCondGe:
- if (gtBias) {
- branch = NewLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
branch->target = taken;
}
ccode = kCondCc;
@@ -330,30 +330,30 @@
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpCondBranch(cUnit, ccode, taken);
+ OpCondBranch(cu, ccode, taken);
}
-void GenNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ StoreValue(cu, rl_dest, rl_result);
}
-void GenNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValueWide(cUnit, rlSrc, kCoreReg);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
- OpRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
}
-bool GenInlinedSqrt(CompilationUnit* cUnit, CallInfo* info) {
- DCHECK_NE(cUnit->instructionSet, kThumb2);
+bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
return false;
}
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
index c501bc3..f6eaaf5 100644
--- a/src/compiler/codegen/x86/int_x86.cc
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -25,15 +25,15 @@
/*
* Perform register memory operation.
*/
-LIR* GenRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
+LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
int reg1, int base, int offset, ThrowKind kind)
{
- LIR* tgt = RawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- cUnit->currentDalvikOffset, reg1, base, offset);
- OpRegMem(cUnit, kOpCmp, reg1, base, offset);
- LIR* branch = OpCondBranch(cUnit, cCode, tgt);
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset, reg1, base, offset);
+ OpRegMem(cu, kOpCmp, reg1, base, offset);
+ LIR* branch = OpCondBranch(cu, c_code, tgt);
// Remember branch target - will process later
- InsertGrowableList(cUnit, &cUnit->throwLaunchpads, reinterpret_cast<uintptr_t>(tgt));
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
return branch;
}
@@ -53,25 +53,25 @@
* finish:
*
*/
-void GenCmpLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
// Compute (r1:r0) = (r1:r0) - (r3:r2)
- OpRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- OpRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- NewLIR2(cUnit, kX86Set8R, r2, kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0
- NewLIR2(cUnit, kX86Movzx8RR, r2, r2);
- OpReg(cUnit, kOpNeg, r2); // r2 = -r2
- OpRegReg(cUnit, kOpOr, r0, r1); // r0 = high | low - sets ZF
- NewLIR2(cUnit, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0
- NewLIR2(cUnit, kX86Movzx8RR, r0, r0);
- OpRegReg(cUnit, kOpOr, r0, r2); // r0 = r0 | r2
- RegLocation rlResult = LocCReturn();
- StoreValue(cUnit, rlDest, rlResult);
+ OpRegReg(cu, kOpSub, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ NewLIR2(cu, kX86Set8R, r2, kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0
+ NewLIR2(cu, kX86Movzx8RR, r2, r2);
+ OpReg(cu, kOpNeg, r2); // r2 = -r2
+ OpRegReg(cu, kOpOr, r0, r1); // r0 = high | low - sets ZF
+ NewLIR2(cu, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0
+ NewLIR2(cu, kX86Movzx8RR, r0, r0);
+ OpRegReg(cu, kOpOr, r0, r2); // r0 = r0 | r2
+ RegLocation rl_result = LocCReturn();
+ StoreValue(cu, rl_dest, rl_result);
}
X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
@@ -96,111 +96,111 @@
return kX86CondO;
}
-LIR* OpCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
int src2, LIR* target)
{
- NewLIR2(cUnit, kX86Cmp32RR, src1, src2);
+ NewLIR2(cu, kX86Cmp32RR, src1, src2);
X86ConditionCode cc = X86ConditionEncoding(cond);
- LIR* branch = NewLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
cc);
branch->target = target;
return branch;
}
-LIR* OpCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
- int checkValue, LIR* target)
+LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+ int check_value, LIR* target)
{
- if ((checkValue == 0) && (cond == kCondEq || cond == kCondNe)) {
- // TODO: when checkValue == 0 and reg is rCX, use the jcxz/nz opcode
- NewLIR2(cUnit, kX86Test32RR, reg, reg);
+ if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
+ // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
+ NewLIR2(cu, kX86Test32RR, reg, reg);
} else {
- NewLIR2(cUnit, IS_SIMM8(checkValue) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, checkValue);
+ NewLIR2(cu, IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
}
X86ConditionCode cc = X86ConditionEncoding(cond);
- LIR* branch = NewLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
branch->target = target;
return branch;
}
-LIR* OpRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
+LIR* OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
{
- if (X86_FPREG(rDest) || X86_FPREG(rSrc))
- return FpRegCopy(cUnit, rDest, rSrc);
- LIR* res = RawLIR(cUnit, cUnit->currentDalvikOffset, kX86Mov32RR,
- rDest, rSrc);
- if (rDest == rSrc) {
- res->flags.isNop = true;
+ if (X86_FPREG(r_dest) || X86_FPREG(r_src))
+ return FpRegCopy(cu, r_dest, r_src);
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, kX86Mov32RR,
+ r_dest, r_src);
+ if (r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
-LIR* OpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+LIR* OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
{
- LIR *res = OpRegCopyNoInsert(cUnit, rDest, rSrc);
- AppendLIR(cUnit, res);
+ LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
+ AppendLIR(cu, res);
return res;
}
-void OpRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
+void OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
+ int src_lo, int src_hi)
{
- bool destFP = X86_FPREG(destLo) && X86_FPREG(destHi);
- bool srcFP = X86_FPREG(srcLo) && X86_FPREG(srcHi);
- assert(X86_FPREG(srcLo) == X86_FPREG(srcHi));
- assert(X86_FPREG(destLo) == X86_FPREG(destHi));
- if (destFP) {
- if (srcFP) {
- OpRegCopy(cUnit, S2d(destLo, destHi), S2d(srcLo, srcHi));
+ bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
+ bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
+ assert(X86_FPREG(src_lo) == X86_FPREG(src_hi));
+ assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
+ if (dest_fp) {
+ if (src_fp) {
+ OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
} else {
// TODO: Prevent this from happening in the code. The result is often
// unused or could have been loaded more easily from memory.
- NewLIR2(cUnit, kX86MovdxrRR, destLo, srcLo);
- NewLIR2(cUnit, kX86MovdxrRR, destHi, srcHi);
- NewLIR2(cUnit, kX86PsllqRI, destHi, 32);
- NewLIR2(cUnit, kX86OrpsRR, destLo, destHi);
+ NewLIR2(cu, kX86MovdxrRR, dest_lo, src_lo);
+ NewLIR2(cu, kX86MovdxrRR, dest_hi, src_hi);
+ NewLIR2(cu, kX86PsllqRI, dest_hi, 32);
+ NewLIR2(cu, kX86OrpsRR, dest_lo, dest_hi);
}
} else {
- if (srcFP) {
- NewLIR2(cUnit, kX86MovdrxRR, destLo, srcLo);
- NewLIR2(cUnit, kX86PsrlqRI, srcLo, 32);
- NewLIR2(cUnit, kX86MovdrxRR, destHi, srcLo);
+ if (src_fp) {
+ NewLIR2(cu, kX86MovdrxRR, dest_lo, src_lo);
+ NewLIR2(cu, kX86PsrlqRI, src_lo, 32);
+ NewLIR2(cu, kX86MovdrxRR, dest_hi, src_lo);
} else {
// Handle overlap
- if (srcHi == destLo) {
- OpRegCopy(cUnit, destHi, srcHi);
- OpRegCopy(cUnit, destLo, srcLo);
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
} else {
- OpRegCopy(cUnit, destLo, srcLo);
- OpRegCopy(cUnit, destHi, srcHi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
}
}
}
}
-void GenFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir) {
- LIR* labelList = cUnit->blockLabelList;
- LIR* taken = &labelList[bb->taken->id];
- RegLocation rlSrc1 = GetSrcWide(cUnit, mir, 0);
- RegLocation rlSrc2 = GetSrcWide(cUnit, mir, 2);
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
+ RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
// Swap operands and condition code to prevent use of zero flag.
if (ccode == kCondLe || ccode == kCondGt) {
// Compute (r3:r2) = (r3:r2) - (r1:r0)
- OpRegReg(cUnit, kOpSub, r2, r0); // r2 = r2 - r0
- OpRegReg(cUnit, kOpSbc, r3, r1); // r3 = r3 - r1 - CF
+ OpRegReg(cu, kOpSub, r2, r0); // r2 = r2 - r0
+ OpRegReg(cu, kOpSbc, r3, r1); // r3 = r3 - r1 - CF
} else {
// Compute (r1:r0) = (r1:r0) - (r3:r2)
- OpRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- OpRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ OpRegReg(cu, kOpSub, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
}
switch (ccode) {
case kCondEq:
case kCondNe:
- OpRegReg(cUnit, kOpOr, r0, r1); // r0 = r0 | r1
+ OpRegReg(cu, kOpOr, r0, r1); // r0 = r0 | r1
break;
case kCondLe:
ccode = kCondGe;
@@ -214,217 +214,217 @@
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpCondBranch(cUnit, ccode, taken);
+ OpCondBranch(cu, ccode, taken);
}
-RegLocation GenDivRemLit(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int lit, bool isDiv)
+RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
- return rlDest;
+ return rl_dest;
}
-RegLocation GenDivRem(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int regHi, bool isDiv)
+RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
- return rlDest;
+ return rl_dest;
}
-bool GenInlinedMinMaxInt(CompilationUnit *cUnit, CallInfo* info, bool isMin)
+bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
{
- DCHECK_EQ(cUnit->instructionSet, kX86);
- RegLocation rlSrc1 = info->args[0];
- RegLocation rlSrc2 = info->args[1];
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
- RegLocation rlDest = InlineTarget(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- DCHECK_EQ(cUnit->instructionSet, kX86);
- LIR* branch = NewLIR2(cUnit, kX86Jcc8, 0, isMin ? kX86CondG : kX86CondL);
- OpRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc1.lowReg);
- LIR* branch2 = NewLIR1(cUnit, kX86Jmp8, 0);
- branch->target = NewLIR0(cUnit, kPseudoTargetLabel);
- OpRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc2.lowReg);
- branch2->target = NewLIR0(cUnit, kPseudoTargetLabel);
- StoreValue(cUnit, rlDest, rlResult);
+ DCHECK_EQ(cu->instruction_set, kX86);
+ RegLocation rl_src1 = info->args[0];
+ RegLocation rl_src2 = info->args[1];
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ DCHECK_EQ(cu->instruction_set, kX86);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
+ LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
+ branch2->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-void OpLea(CompilationUnit* cUnit, int rBase, int reg1, int reg2, int scale, int offset)
+void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
{
- NewLIR5(cUnit, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
+ NewLIR5(cu, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
}
-void OpTlsCmp(CompilationUnit* cUnit, int offset, int val)
+void OpTlsCmp(CompilationUnit* cu, int offset, int val)
{
- NewLIR2(cUnit, kX86Cmp16TI8, offset, val);
+ NewLIR2(cu, kX86Cmp16TI8, offset, val);
}
-bool GenInlinedCas32(CompilationUnit* cUnit, CallInfo* info, bool need_write_barrier) {
- DCHECK_NE(cUnit->instructionSet, kThumb2);
+bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
return false;
}
-LIR* OpPcRelLoad(CompilationUnit* cUnit, int reg, LIR* target) {
+LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
return NULL;
}
-LIR* OpVldm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
{
LOG(FATAL) << "Unexpected use of OpVldm for x86";
return NULL;
}
-LIR* OpVstm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
{
LOG(FATAL) << "Unexpected use of OpVstm for x86";
return NULL;
}
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
- RegLocation rlResult, int lit,
- int firstBit, int secondBit)
+void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
{
- int tReg = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, secondBit - firstBit);
- OpRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, tReg);
- FreeTemp(cUnit, tReg);
- if (firstBit != 0) {
- OpRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
+ int t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ if (first_bit != 0) {
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
}
}
-void GenDivZeroCheck(CompilationUnit* cUnit, int regLo, int regHi)
+void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
{
- int tReg = AllocTemp(cUnit);
- OpRegRegReg(cUnit, kOpOr, tReg, regLo, regHi);
- GenImmedCheck(cUnit, kCondEq, tReg, 0, kThrowDivZero);
- FreeTemp(cUnit, tReg);
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
+ GenImmedCheck(cu, kCondEq, t_reg, 0, kThrowDivZero);
+ FreeTemp(cu, t_reg);
}
// Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cUnit, LIR* target)
+LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
{
- OpTlsCmp(cUnit, Thread::ThreadFlagsOffset().Int32Value(), 0);
- return OpCondBranch(cUnit, (target == NULL) ? kCondNe : kCondEq, target);
+ OpTlsCmp(cu, Thread::ThreadFlagsOffset().Int32Value(), 0);
+ return OpCondBranch(cu, (target == NULL) ? kCondNe : kCondEq, target);
}
// Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cUnit, ConditionCode cCode, int reg, LIR* target)
+LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
{
- OpRegImm(cUnit, kOpSub, reg, 1);
- return OpCmpImmBranch(cUnit, cCode, reg, 0, target);
+ OpRegImm(cu, kOpSub, reg, 1);
+ return OpCmpImmBranch(cu, c_code, reg, 0, target);
}
-bool SmallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
- RegLocation rlSrc, RegLocation rlDest, int lit)
+bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
{
LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
return false;
}
-LIR* OpIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide)
+LIR* OpIT(CompilationUnit* cu, ArmConditionCode cond, const char* guide)
{
LOG(FATAL) << "Unexpected use of OpIT in x86";
return NULL;
}
-bool GenAddLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
// Compute (r1:r0) = (r1:r0) + (r2:r3)
- OpRegReg(cUnit, kOpAdd, r0, r2); // r0 = r0 + r2
- OpRegReg(cUnit, kOpAdc, r1, r3); // r1 = r1 + r3 + CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ OpRegReg(cu, kOpAdd, r0, r2); // r0 = r0 + r2
+ OpRegReg(cu, kOpAdc, r1, r3); // r1 = r1 + r3 + CF
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
INVALID_SREG, INVALID_SREG};
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenSubLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
// Compute (r1:r0) = (r1:r0) + (r2:r3)
- OpRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- OpRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ OpRegReg(cu, kOpSub, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
INVALID_SREG, INVALID_SREG};
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenAndLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
// Compute (r1:r0) = (r1:r0) + (r2:r3)
- OpRegReg(cUnit, kOpAnd, r0, r2); // r0 = r0 - r2
- OpRegReg(cUnit, kOpAnd, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ OpRegReg(cu, kOpAnd, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpAnd, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
INVALID_SREG, INVALID_SREG};
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenOrLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
// Compute (r1:r0) = (r1:r0) + (r2:r3)
- OpRegReg(cUnit, kOpOr, r0, r2); // r0 = r0 - r2
- OpRegReg(cUnit, kOpOr, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ OpRegReg(cu, kOpOr, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpOr, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
INVALID_SREG, INVALID_SREG};
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenXorLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- LoadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
// Compute (r1:r0) = (r1:r0) + (r2:r3)
- OpRegReg(cUnit, kOpXor, r0, r2); // r0 = r0 - r2
- OpRegReg(cUnit, kOpXor, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ OpRegReg(cu, kOpXor, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpXor, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
INVALID_SREG, INVALID_SREG};
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenNegLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
+bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src)
{
- FlushAllRegs(cUnit);
- LockCallTemps(cUnit); // Prepare for explicit register usage
- LoadValueDirectWideFixed(cUnit, rlSrc, r0, r1);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src, r0, r1);
// Compute (r1:r0) = -(r1:r0)
- OpRegReg(cUnit, kOpNeg, r0, r0); // r0 = -r0
- OpRegImm(cUnit, kOpAdc, r1, 0); // r1 = r1 + CF
- OpRegReg(cUnit, kOpNeg, r1, r1); // r1 = -r1
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ OpRegReg(cu, kOpNeg, r0, r0); // r0 = -r0
+ OpRegImm(cu, kOpAdc, r1, 0); // r1 = r1 + CF
+ OpRegReg(cu, kOpNeg, r1, r1); // r1 = -r1
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
INVALID_SREG, INVALID_SREG};
- StoreValueWide(cUnit, rlDest, rlResult);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-void OpRegThreadMem(CompilationUnit* cUnit, OpKind op, int rDest, int threadOffset) {
+void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpCmp: opcode = kX86Cmp32RT; break;
@@ -432,7 +432,7 @@
LOG(FATAL) << "Bad opcode: " << op;
break;
}
- NewLIR2(cUnit, opcode, rDest, threadOffset);
+ NewLIR2(cu, opcode, r_dest, thread_offset);
}
} // namespace art
diff --git a/src/compiler/codegen/x86/target_x86.cc b/src/compiler/codegen/x86/target_x86.cc
index 272554d..c51e9e9 100644
--- a/src/compiler/codegen/x86/target_x86.cc
+++ b/src/compiler/codegen/x86/target_x86.cc
@@ -24,21 +24,21 @@
namespace art {
//FIXME: restore "static" when usage uncovered
-/*static*/ int coreRegs[] = {
+/*static*/ int core_regs[] = {
rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
#ifdef TARGET_REX_SUPPORT
r8, r9, r10, r11, r12, r13, r14, 15
#endif
};
/*static*/ int ReservedRegs[] = {rX86_SP};
-/*static*/ int coreTemps[] = {rAX, rCX, rDX, rBX};
+/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
/*static*/ int FpRegs[] = {
fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
#ifdef TARGET_REX_SUPPORT
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
#endif
};
-/*static*/ int fpTemps[] = {
+/*static*/ int fp_temps[] = {
fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
#ifdef TARGET_REX_SUPPORT
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
@@ -95,9 +95,9 @@
}
// Create a double from a pair of singles.
-int S2d(int lowReg, int highReg)
+int S2d(int low_reg, int high_reg)
{
- return X86_S2D(lowReg, highReg);
+ return X86_S2D(low_reg, high_reg);
}
// Is reg a single or double?
@@ -133,19 +133,19 @@
/*
* Decode the register id.
*/
-uint64_t GetRegMaskCommon(CompilationUnit* cUnit, int reg)
+uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
{
uint64_t seed;
int shift;
- int regId;
+ int reg_id;
- regId = reg & 0xf;
+ reg_id = reg & 0xf;
/* Double registers in x86 are just a single FP register */
seed = 1;
/* FP register starts at bit position 16 */
shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
/* Expand the double register id into single offset */
- shift += regId;
+ shift += reg_id;
return (seed << shift);
}
@@ -159,38 +159,38 @@
return 0ULL;
}
-void SetupTargetResourceMasks(CompilationUnit* cUnit, LIR* lir)
+void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
{
- DCHECK_EQ(cUnit->instructionSet, kX86);
+ DCHECK_EQ(cu->instruction_set, kX86);
// X86-specific resource map setup here.
uint64_t flags = EncodingMap[lir->opcode].flags;
if (flags & REG_USE_SP) {
- lir->useMask |= ENCODE_X86_REG_SP;
+ lir->use_mask |= ENCODE_X86_REG_SP;
}
if (flags & REG_DEF_SP) {
- lir->defMask |= ENCODE_X86_REG_SP;
+ lir->def_mask |= ENCODE_X86_REG_SP;
}
if (flags & REG_DEFA) {
- SetupRegMask(cUnit, &lir->defMask, rAX);
+ SetupRegMask(cu, &lir->def_mask, rAX);
}
if (flags & REG_DEFD) {
- SetupRegMask(cUnit, &lir->defMask, rDX);
+ SetupRegMask(cu, &lir->def_mask, rDX);
}
if (flags & REG_USEA) {
- SetupRegMask(cUnit, &lir->useMask, rAX);
+ SetupRegMask(cu, &lir->use_mask, rAX);
}
if (flags & REG_USEC) {
- SetupRegMask(cUnit, &lir->useMask, rCX);
+ SetupRegMask(cu, &lir->use_mask, rCX);
}
if (flags & REG_USED) {
- SetupRegMask(cUnit, &lir->useMask, rDX);
+ SetupRegMask(cu, &lir->use_mask, rDX);
}
}
@@ -223,7 +223,7 @@
* Interpret a format string and build a string no longer than size
* See format key in Assemble.cc.
*/
-std::string BuildInsnString(const char *fmt, LIR *lir, unsigned char* baseAddr) {
+std::string BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
std::string buf;
size_t i = 0;
size_t fmt_len = strlen(fmt);
@@ -252,8 +252,8 @@
buf += StringPrintf("%d", operand);
break;
case 'p': {
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(operand);
- buf += StringPrintf("0x%08x", tabRec->offset);
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
+ buf += StringPrintf("0x%08x", tab_rec->offset);
break;
}
case 'r':
@@ -267,7 +267,7 @@
break;
case 't':
buf += StringPrintf("0x%08x (L%p)",
- reinterpret_cast<uint32_t>(baseAddr)
+ reinterpret_cast<uint32_t>(base_addr)
+ lir->offset + operand, lir->target);
break;
default:
@@ -304,8 +304,8 @@
}
/* Memory bits */
if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
- (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ sprintf(buf + strlen(buf), "dr%d%s", x86LIR->alias_info & 0xffff,
+ (x86LIR->alias_info & 0x80000000) ? "(+1)" : "");
}
if (mask & ENCODE_LITERAL) {
strcat(buf, "lit ");
@@ -322,10 +322,10 @@
LOG(INFO) << prefix << ": " << buf;
}
}
-void AdjustSpillMask(CompilationUnit* cUnit) {
+void AdjustSpillMask(CompilationUnit* cu) {
// Adjustment for LR spilling, x86 has no LR so nothing to do here
- cUnit->coreSpillMask |= (1 << rRET);
- cUnit->numCoreSpills++;
+ cu->core_spill_mask |= (1 << rRET);
+ cu->num_core_spills++;
}
/*
@@ -334,7 +334,7 @@
* include any holes in the mask. Associate holes with
* Dalvik register INVALID_VREG (0xFFFFU).
*/
-void MarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
+void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
{
UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
#if 0
@@ -342,35 +342,35 @@
#endif
}
-void FlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
+void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
{
- RegisterInfo* info1 = GetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = GetRegInfo(cUnit, reg2);
+ RegisterInfo* info1 = GetRegInfo(cu, reg1);
+ RegisterInfo* info2 = GetRegInfo(cu, reg2);
DCHECK(info1 && info2 && info1->pair && info2->pair &&
(info1->partner == info2->reg) &&
(info2->partner == info1->reg));
if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
+ if (!(info1->is_temp && info2->is_temp)) {
+ /* Should not happen. If it does, there's a problem in eval_loc */
LOG(FATAL) << "Long half-temp, half-promoted";
}
info1->dirty = false;
info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) < SRegToVReg(cUnit, info1->sReg))
+ if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- StoreBaseDispWide(cUnit, rX86_SP, VRegOffset(cUnit, vReg), info1->reg, info1->partner);
+ int v_reg = SRegToVReg(cu, info1->s_reg);
+ StoreBaseDispWide(cu, rX86_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
}
}
-void FlushReg(CompilationUnit* cUnit, int reg)
+void FlushReg(CompilationUnit* cu, int reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
+ RegisterInfo* info = GetRegInfo(cu, reg);
if (info->live && info->dirty) {
info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- StoreBaseDisp(cUnit, rX86_SP, VRegOffset(cUnit, vReg), reg, kWord);
+ int v_reg = SRegToVReg(cu, info->s_reg);
+ StoreBaseDisp(cu, rX86_SP, VRegOffset(cu, v_reg), reg, kWord);
}
}
@@ -380,56 +380,56 @@
}
/* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cUnit)
+void ClobberCalleeSave(CompilationUnit *cu)
{
- Clobber(cUnit, rAX);
- Clobber(cUnit, rCX);
- Clobber(cUnit, rDX);
+ Clobber(cu, rAX);
+ Clobber(cu, rCX);
+ Clobber(cu, rDX);
}
-RegLocation GetReturnWideAlt(CompilationUnit* cUnit) {
+RegLocation GetReturnWideAlt(CompilationUnit* cu) {
RegLocation res = LocCReturnWide();
- CHECK(res.lowReg == rAX);
- CHECK(res.highReg == rDX);
- Clobber(cUnit, rAX);
- Clobber(cUnit, rDX);
- MarkInUse(cUnit, rAX);
- MarkInUse(cUnit, rDX);
- MarkPair(cUnit, res.lowReg, res.highReg);
+ CHECK(res.low_reg == rAX);
+ CHECK(res.high_reg == rDX);
+ Clobber(cu, rAX);
+ Clobber(cu, rDX);
+ MarkInUse(cu, rAX);
+ MarkInUse(cu, rDX);
+ MarkPair(cu, res.low_reg, res.high_reg);
return res;
}
-RegLocation GetReturnAlt(CompilationUnit* cUnit)
+RegLocation GetReturnAlt(CompilationUnit* cu)
{
RegLocation res = LocCReturn();
- res.lowReg = rDX;
- Clobber(cUnit, rDX);
- MarkInUse(cUnit, rDX);
+ res.low_reg = rDX;
+ Clobber(cu, rDX);
+ MarkInUse(cu, rDX);
return res;
}
-RegisterInfo* GetRegInfo(CompilationUnit* cUnit, int reg)
+RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
{
- return X86_FPREG(reg) ? &cUnit->regPool->FPRegs[reg & X86_FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return X86_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & X86_FP_REG_MASK]
+ : &cu->reg_pool->core_regs[reg];
}
/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cUnit)
+void LockCallTemps(CompilationUnit* cu)
{
- LockTemp(cUnit, rX86_ARG0);
- LockTemp(cUnit, rX86_ARG1);
- LockTemp(cUnit, rX86_ARG2);
- LockTemp(cUnit, rX86_ARG3);
+ LockTemp(cu, rX86_ARG0);
+ LockTemp(cu, rX86_ARG1);
+ LockTemp(cu, rX86_ARG2);
+ LockTemp(cu, rX86_ARG3);
}
/* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cUnit)
+void FreeCallTemps(CompilationUnit* cu)
{
- FreeTemp(cUnit, rX86_ARG0);
- FreeTemp(cUnit, rX86_ARG1);
- FreeTemp(cUnit, rX86_ARG2);
- FreeTemp(cUnit, rX86_ARG3);
+ FreeTemp(cu, rX86_ARG0);
+ FreeTemp(cu, rX86_ARG1);
+ FreeTemp(cu, rX86_ARG2);
+ FreeTemp(cu, rX86_ARG3);
}
/* Architecture-specific initializations and checks go here */
@@ -438,128 +438,128 @@
return true;
}
-void GenMemBarrier(CompilationUnit *cUnit, MemBarrierKind barrierKind)
+void GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
{
#if ANDROID_SMP != 0
// TODO: optimize fences
- NewLIR0(cUnit, kX86Mfence);
+ NewLIR0(cu, kX86Mfence);
#endif
}
/*
* Alloc a pair of core registers, or a double. Low reg in low byte,
* high reg in next byte.
*/
-int AllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
- int regClass)
+int AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+ int reg_class)
{
- int highReg;
- int lowReg;
+ int high_reg;
+ int low_reg;
int res = 0;
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- lowReg = AllocTempDouble(cUnit);
- highReg = lowReg + 1;
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ low_reg = AllocTempDouble(cu);
+ high_reg = low_reg + 1;
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
return res;
}
- lowReg = AllocTemp(cUnit);
- highReg = AllocTemp(cUnit);
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ low_reg = AllocTemp(cu);
+ high_reg = AllocTemp(cu);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
return res;
}
-int AllocTypedTemp(CompilationUnit *cUnit, bool fpHint, int regClass) {
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- return AllocTempFloat(cUnit);
+int AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class) {
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ return AllocTempFloat(cu);
}
- return AllocTemp(cUnit);
+ return AllocTemp(cu);
}
-void CompilerInitializeRegAlloc(CompilationUnit* cUnit) {
- int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
- int numReserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
- int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
- int numFPRegs = sizeof(FpRegs)/sizeof(*FpRegs);
- int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+void CompilerInitializeRegAlloc(CompilationUnit* cu) {
+ int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+ int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+ int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+ int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+ int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
RegisterPool *pool =
- static_cast<RegisterPool*>(NewMem(cUnit, sizeof(*pool), true, kAllocRegAlloc));
- cUnit->regPool = pool;
- pool->numCoreRegs = numRegs;
- pool->coreRegs =
- static_cast<RegisterInfo*>(NewMem(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
+ static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
+ cu->reg_pool = pool;
+ pool->num_core_regs = num_regs;
+ pool->core_regs =
+ static_cast<RegisterInfo*>(NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs),
true, kAllocRegAlloc));
- pool->numFPRegs = numFPRegs;
+ pool->num_fp_regs = num_fp_regs;
pool->FPRegs =
- static_cast<RegisterInfo *>(NewMem(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs),
+ static_cast<RegisterInfo *>(NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs),
true, kAllocRegAlloc));
- CompilerInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
- CompilerInitPool(pool->FPRegs, FpRegs, pool->numFPRegs);
+ CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
+ CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
// Keep special registers from being allocated
- for (int i = 0; i < numReserved; i++) {
- MarkInUse(cUnit, ReservedRegs[i]);
+ for (int i = 0; i < num_reserved; i++) {
+ MarkInUse(cu, ReservedRegs[i]);
}
// Mark temp regs - all others not in use can be used for promotion
- for (int i = 0; i < numTemps; i++) {
- MarkTemp(cUnit, coreTemps[i]);
+ for (int i = 0; i < num_temps; i++) {
+ MarkTemp(cu, core_temps[i]);
}
- for (int i = 0; i < numFPTemps; i++) {
- MarkTemp(cUnit, fpTemps[i]);
+ for (int i = 0; i < num_fp_temps; i++) {
+ MarkTemp(cu, fp_temps[i]);
}
// Construct the alias map.
- cUnit->phiAliasMap = static_cast<int*>
- (NewMem(cUnit, cUnit->numSSARegs * sizeof(cUnit->phiAliasMap[0]), false, kAllocDFInfo));
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- cUnit->phiAliasMap[i] = i;
+ cu->phi_alias_map = static_cast<int*>
+ (NewMem(cu, cu->num_ssa_regs * sizeof(cu->phi_alias_map[0]), false, kAllocDFInfo));
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ cu->phi_alias_map[i] = i;
}
- for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
- int defReg = phi->ssaRep->defs[0];
- for (int i = 0; i < phi->ssaRep->numUses; i++) {
- for (int j = 0; j < cUnit->numSSARegs; j++) {
- if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
- cUnit->phiAliasMap[j] = defReg;
+ for (MIR* phi = cu->phi_list; phi; phi = phi->meta.phi_next) {
+ int def_reg = phi->ssa_rep->defs[0];
+ for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+ for (int j = 0; j < cu->num_ssa_regs; j++) {
+ if (cu->phi_alias_map[j] == phi->ssa_rep->uses[i]) {
+ cu->phi_alias_map[j] = def_reg;
}
}
}
}
}
-void FreeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree)
+void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+ RegLocation rl_free)
{
- if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
- (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
// No overlap, free both
- FreeTemp(cUnit, rlFree.lowReg);
- FreeTemp(cUnit, rlFree.highReg);
+ FreeTemp(cu, rl_free.low_reg);
+ FreeTemp(cu, rl_free.high_reg);
}
}
-void SpillCoreRegs(CompilationUnit* cUnit) {
- if (cUnit->numCoreSpills == 0) {
+void SpillCoreRegs(CompilationUnit* cu) {
+ if (cu->num_core_spills == 0) {
return;
}
// Spill mask not including fake return address register
- uint32_t mask = cUnit->coreSpillMask & ~(1 << rRET);
- int offset = cUnit->frameSize - (4 * cUnit->numCoreSpills);
+ uint32_t mask = cu->core_spill_mask & ~(1 << rRET);
+ int offset = cu->frame_size - (4 * cu->num_core_spills);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreWordDisp(cUnit, rX86_SP, offset, reg);
+ StoreWordDisp(cu, rX86_SP, offset, reg);
offset += 4;
}
}
}
-void UnSpillCoreRegs(CompilationUnit* cUnit) {
- if (cUnit->numCoreSpills == 0) {
+void UnSpillCoreRegs(CompilationUnit* cu) {
+ if (cu->num_core_spills == 0) {
return;
}
// Spill mask not including fake return address register
- uint32_t mask = cUnit->coreSpillMask & ~(1 << rRET);
- int offset = cUnit->frameSize - (4 * cUnit->numCoreSpills);
+ uint32_t mask = cu->core_spill_mask & ~(1 << rRET);
+ int offset = cu->frame_size - (4 * cu->num_core_spills);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadWordDisp(cUnit, rX86_SP, offset, reg);
+ LoadWordDisp(cu, rX86_SP, offset, reg);
offset += 4;
}
}
@@ -586,7 +586,7 @@
}
// Not used in x86
-int LoadHelper(CompilationUnit* cUnit, int offset)
+int LoadHelper(CompilationUnit* cu, int offset)
{
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
return INVALID_REG;
diff --git a/src/compiler/codegen/x86/utility_x86.cc b/src/compiler/codegen/x86/utility_x86.cc
index 87a7942..22037f3 100644
--- a/src/compiler/codegen/x86/utility_x86.cc
+++ b/src/compiler/codegen/x86/utility_x86.cc
@@ -22,37 +22,35 @@
/* This file contains codegen for the X86 ISA */
-void GenBarrier(CompilationUnit *cUnit);
-void LoadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg);
-LIR *LoadWordDisp(CompilationUnit *cUnit, int rBase, int displacement,
- int rDest);
-LIR *StoreWordDisp(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrc);
-LIR *LoadConstant(CompilationUnit *cUnit, int rDest, int value);
+void GenBarrier(CompilationUnit *cu);
+void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg);
+LIR *LoadWordDisp(CompilationUnit *cu, int rBase, int displacement, int r_dest);
+LIR *StoreWordDisp(CompilationUnit *cu, int rBase, int displacement, int r_src);
+LIR *LoadConstant(CompilationUnit *cu, int r_dest, int value);
-LIR *FpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+LIR *FpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
{
int opcode;
/* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(X86_DOUBLEREG(rDest), X86_DOUBLEREG(rSrc));
- if (X86_DOUBLEREG(rDest)) {
+ DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
+ if (X86_DOUBLEREG(r_dest)) {
opcode = kX86MovsdRR;
} else {
- if (X86_SINGLEREG(rDest)) {
- if (X86_SINGLEREG(rSrc)) {
+ if (X86_SINGLEREG(r_dest)) {
+ if (X86_SINGLEREG(r_src)) {
opcode = kX86MovssRR;
} else { // Fpr <- Gpr
opcode = kX86MovdxrRR;
}
} else { // Gpr <- Fpr
- DCHECK(X86_SINGLEREG(rSrc));
+ DCHECK(X86_SINGLEREG(r_src));
opcode = kX86MovdrxRR;
}
}
DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
- LIR* res = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (rDest == rSrc) {
- res->flags.isNop = true;
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
@@ -63,54 +61,54 @@
* a high register, build constant into a low register and copy.
*
* No additional register clobbering operation performed. Use this version when
- * 1) rDest is freshly returned from AllocTemp or
+ * 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR *LoadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value)
+LIR *LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
{
- int rDestSave = rDest;
- if (X86_FPREG(rDest)) {
+ int r_dest_save = r_dest;
+ if (X86_FPREG(r_dest)) {
if (value == 0) {
- return NewLIR2(cUnit, kX86XorpsRR, rDest, rDest);
+ return NewLIR2(cu, kX86XorpsRR, r_dest, r_dest);
}
- DCHECK(X86_SINGLEREG(rDest));
- rDest = AllocTemp(cUnit);
+ DCHECK(X86_SINGLEREG(r_dest));
+ r_dest = AllocTemp(cu);
}
LIR *res;
if (value == 0) {
- res = NewLIR2(cUnit, kX86Xor32RR, rDest, rDest);
+ res = NewLIR2(cu, kX86Xor32RR, r_dest, r_dest);
} else {
// Note, there is no byte immediate form of a 32 bit immediate move.
- res = NewLIR2(cUnit, kX86Mov32RI, rDest, value);
+ res = NewLIR2(cu, kX86Mov32RI, r_dest, value);
}
- if (X86_FPREG(rDestSave)) {
- NewLIR2(cUnit, kX86MovdxrRR, rDestSave, rDest);
- FreeTemp(cUnit, rDest);
+ if (X86_FPREG(r_dest_save)) {
+ NewLIR2(cu, kX86MovdxrRR, r_dest_save, r_dest);
+ FreeTemp(cu, r_dest);
}
return res;
}
-LIR* OpBranchUnconditional(CompilationUnit *cUnit, OpKind op)
+LIR* OpBranchUnconditional(CompilationUnit *cu, OpKind op)
{
CHECK_EQ(op, kOpUncondBr);
- return NewLIR1(cUnit, kX86Jmp8, 0 /* offset to be patched */ );
+ return NewLIR1(cu, kX86Jmp8, 0 /* offset to be patched */ );
}
-LIR *LoadMultiple(CompilationUnit *cUnit, int rBase, int rMask);
+LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask);
X86ConditionCode X86ConditionEncoding(ConditionCode cond);
-LIR* OpCondBranch(CompilationUnit* cUnit, ConditionCode cc, LIR* target)
+LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
{
- LIR* branch = NewLIR2(cUnit, kX86Jcc8, 0 /* offset to be patched */,
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* offset to be patched */,
X86ConditionEncoding(cc));
branch->target = target;
return branch;
}
-LIR *OpReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
+LIR *OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
{
X86OpCode opcode = kX86Bkpt;
switch (op) {
@@ -120,48 +118,48 @@
default:
LOG(FATAL) << "Bad case in OpReg " << op;
}
- return NewLIR1(cUnit, opcode, rDestSrc);
+ return NewLIR1(cu, opcode, r_dest_src);
}
-LIR *OpRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int value)
+LIR *OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1, int value)
{
X86OpCode opcode = kX86Bkpt;
- bool byteImm = IS_SIMM8(value);
- DCHECK(!X86_FPREG(rDestSrc1));
+ bool byte_imm = IS_SIMM8(value);
+ DCHECK(!X86_FPREG(r_dest_src1));
switch (op) {
case kOpLsl: opcode = kX86Sal32RI; break;
case kOpLsr: opcode = kX86Shr32RI; break;
case kOpAsr: opcode = kX86Sar32RI; break;
- case kOpAdd: opcode = byteImm ? kX86Add32RI8 : kX86Add32RI; break;
- case kOpOr: opcode = byteImm ? kX86Or32RI8 : kX86Or32RI; break;
- case kOpAdc: opcode = byteImm ? kX86Adc32RI8 : kX86Adc32RI; break;
+ case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
+ case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
+ case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
//case kOpSbb: opcode = kX86Sbb32RI; break;
- case kOpAnd: opcode = byteImm ? kX86And32RI8 : kX86And32RI; break;
- case kOpSub: opcode = byteImm ? kX86Sub32RI8 : kX86Sub32RI; break;
- case kOpXor: opcode = byteImm ? kX86Xor32RI8 : kX86Xor32RI; break;
- case kOpCmp: opcode = byteImm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
- case kOpMov: return LoadConstantNoClobber(cUnit, rDestSrc1, value);
+ case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
+ case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
+ case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
+ case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
+ case kOpMov: return LoadConstantNoClobber(cu, r_dest_src1, value);
case kOpMul:
- opcode = byteImm ? kX86Imul32RRI8 : kX86Imul32RRI;
- return NewLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, value);
+ opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
+ return NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, value);
default:
LOG(FATAL) << "Bad case in OpRegImm " << op;
}
- return NewLIR2(cUnit, opcode, rDestSrc1, value);
+ return NewLIR2(cu, opcode, r_dest_src1, value);
}
-LIR *OpRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int rSrc2)
+LIR *OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
{
X86OpCode opcode = kX86Nop;
bool src2_must_be_cx = false;
switch (op) {
// X86 unary opcodes
case kOpMvn:
- OpRegCopy(cUnit, rDestSrc1, rSrc2);
- return OpReg(cUnit, kOpNot, rDestSrc1);
+ OpRegCopy(cu, r_dest_src1, r_src2);
+ return OpReg(cu, kOpNot, r_dest_src1);
case kOpNeg:
- OpRegCopy(cUnit, rDestSrc1, rSrc2);
- return OpReg(cUnit, kOpNeg, rDestSrc1);
+ OpRegCopy(cu, r_dest_src1, r_src2);
+ return OpReg(cu, kOpNeg, r_dest_src1);
// X86 binary opcodes
case kOpSub: opcode = kX86Sub32RR; break;
case kOpSbc: opcode = kX86Sbb32RR; break;
@@ -177,10 +175,10 @@
case kOpXor: opcode = kX86Xor32RR; break;
case kOp2Byte:
// Use shifts instead of a byte operand if the source can't be byte accessed.
- if (rSrc2 >= 4) {
- NewLIR2(cUnit, kX86Mov32RR, rDestSrc1, rSrc2);
- NewLIR2(cUnit, kX86Sal32RI, rDestSrc1, 24);
- return NewLIR2(cUnit, kX86Sar32RI, rDestSrc1, 24);
+ if (r_src2 >= 4) {
+ NewLIR2(cu, kX86Mov32RR, r_dest_src1, r_src2);
+ NewLIR2(cu, kX86Sal32RI, r_dest_src1, 24);
+ return NewLIR2(cu, kX86Sar32RI, r_dest_src1, 24);
} else {
opcode = kX86Movsx8RR;
}
@@ -192,11 +190,11 @@
LOG(FATAL) << "Bad case in OpRegReg " << op;
break;
}
- CHECK(!src2_must_be_cx || rSrc2 == rCX);
- return NewLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+ CHECK(!src2_must_be_cx || r_src2 == rCX);
+ return NewLIR2(cu, opcode, r_dest_src1, r_src2);
}
-LIR* OpRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase,
+LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
int offset)
{
X86OpCode opcode = kX86Nop;
@@ -217,43 +215,43 @@
LOG(FATAL) << "Bad case in OpRegMem " << op;
break;
}
- return NewLIR3(cUnit, opcode, rDest, rBase, offset);
+ return NewLIR3(cu, opcode, r_dest, rBase, offset);
}
-LIR* OpRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc1,
- int rSrc2)
+LIR* OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1,
+ int r_src2)
{
- if (rDest != rSrc1 && rDest != rSrc2) {
+ if (r_dest != r_src1 && r_dest != r_src2) {
if (op == kOpAdd) { // lea special case, except can't encode rbp as base
- if (rSrc1 == rSrc2) {
- OpRegCopy(cUnit, rDest, rSrc1);
- return OpRegImm(cUnit, kOpLsl, rDest, 1);
- } else if (rSrc1 != rBP) {
- return NewLIR5(cUnit, kX86Lea32RA, rDest, rSrc1 /* base */,
- rSrc2 /* index */, 0 /* scale */, 0 /* disp */);
+ if (r_src1 == r_src2) {
+ OpRegCopy(cu, r_dest, r_src1);
+ return OpRegImm(cu, kOpLsl, r_dest, 1);
+ } else if (r_src1 != rBP) {
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r_src1 /* base */,
+ r_src2 /* index */, 0 /* scale */, 0 /* disp */);
} else {
- return NewLIR5(cUnit, kX86Lea32RA, rDest, rSrc2 /* base */,
- rSrc1 /* index */, 0 /* scale */, 0 /* disp */);
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r_src2 /* base */,
+ r_src1 /* index */, 0 /* scale */, 0 /* disp */);
}
} else {
- OpRegCopy(cUnit, rDest, rSrc1);
- return OpRegReg(cUnit, op, rDest, rSrc2);
+ OpRegCopy(cu, r_dest, r_src1);
+ return OpRegReg(cu, op, r_dest, r_src2);
}
- } else if (rDest == rSrc1) {
- return OpRegReg(cUnit, op, rDest, rSrc2);
- } else { // rDest == rSrc2
+ } else if (r_dest == r_src1) {
+ return OpRegReg(cu, op, r_dest, r_src2);
+ } else { // r_dest == r_src2
switch (op) {
case kOpSub: // non-commutative
- OpReg(cUnit, kOpNeg, rDest);
+ OpReg(cu, kOpNeg, r_dest);
op = kOpAdd;
break;
case kOpSbc:
case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
- int tReg = AllocTemp(cUnit);
- OpRegCopy(cUnit, tReg, rSrc1);
- OpRegReg(cUnit, op, tReg, rSrc2);
- LIR* res = OpRegCopy(cUnit, rDest, tReg);
- FreeTemp(cUnit, tReg);
+ int t_reg = AllocTemp(cu);
+ OpRegCopy(cu, t_reg, r_src1);
+ OpRegReg(cu, op, t_reg, r_src2);
+ LIR* res = OpRegCopy(cu, r_dest, t_reg);
+ FreeTemp(cu, t_reg);
return res;
}
case kOpAdd: // commutative
@@ -265,38 +263,38 @@
default:
LOG(FATAL) << "Bad case in OpRegRegReg " << op;
}
- return OpRegReg(cUnit, op, rDest, rSrc1);
+ return OpRegReg(cu, op, r_dest, r_src1);
}
}
-LIR* OpRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc,
+LIR* OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src,
int value)
{
if (op == kOpMul) {
X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
- return NewLIR3(cUnit, opcode, rDest, rSrc, value);
+ return NewLIR3(cu, opcode, r_dest, r_src, value);
} else if (op == kOpAnd) {
- if (value == 0xFF && rSrc < 4) {
- return NewLIR2(cUnit, kX86Movzx8RR, rDest, rSrc);
+ if (value == 0xFF && r_src < 4) {
+ return NewLIR2(cu, kX86Movzx8RR, r_dest, r_src);
} else if (value == 0xFFFF) {
- return NewLIR2(cUnit, kX86Movzx16RR, rDest, rSrc);
+ return NewLIR2(cu, kX86Movzx16RR, r_dest, r_src);
}
}
- if (rDest != rSrc) {
+ if (r_dest != r_src) {
if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
// TODO: fix bug in LEA encoding when disp == 0
- return NewLIR5(cUnit, kX86Lea32RA, rDest, r5sib_no_base /* base */,
- rSrc /* index */, value /* scale */, 0 /* disp */);
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r5sib_no_base /* base */,
+ r_src /* index */, value /* scale */, 0 /* disp */);
} else if (op == kOpAdd) { // lea add special case
- return NewLIR5(cUnit, kX86Lea32RA, rDest, rSrc /* base */,
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r_src /* base */,
r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
}
- OpRegCopy(cUnit, rDest, rSrc);
+ OpRegCopy(cu, r_dest, r_src);
}
- return OpRegImm(cUnit, op, rDest, value);
+ return OpRegImm(cu, op, r_dest, value);
}
-LIR* OpThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset)
+LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
{
X86OpCode opcode = kX86Bkpt;
switch (op) {
@@ -305,10 +303,10 @@
LOG(FATAL) << "Bad opcode: " << op;
break;
}
- return NewLIR1(cUnit, opcode, threadOffset);
+ return NewLIR1(cu, opcode, thread_offset);
}
-LIR* OpMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp)
+LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
{
X86OpCode opcode = kX86Bkpt;
switch (op) {
@@ -317,57 +315,55 @@
LOG(FATAL) << "Bad opcode: " << op;
break;
}
- return NewLIR2(cUnit, opcode, rBase, disp);
+ return NewLIR2(cu, opcode, rBase, disp);
}
-LIR *LoadConstantValueWide(CompilationUnit *cUnit, int rDestLo,
- int rDestHi, int valLo, int valHi)
+LIR *LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo,
+ int r_dest_hi, int val_lo, int val_hi)
{
LIR *res;
- if (X86_FPREG(rDestLo)) {
- DCHECK(X86_FPREG(rDestHi)); // ignore rDestHi
- if (valLo == 0 && valHi == 0) {
- return NewLIR2(cUnit, kX86XorpsRR, rDestLo, rDestLo);
+ if (X86_FPREG(r_dest_lo)) {
+ DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi
+ if (val_lo == 0 && val_hi == 0) {
+ return NewLIR2(cu, kX86XorpsRR, r_dest_lo, r_dest_lo);
} else {
- if (valLo == 0) {
- res = NewLIR2(cUnit, kX86XorpsRR, rDestLo, rDestLo);
+ if (val_lo == 0) {
+ res = NewLIR2(cu, kX86XorpsRR, r_dest_lo, r_dest_lo);
} else {
- res = LoadConstantNoClobber(cUnit, rDestLo, valLo);
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
}
- if (valHi != 0) {
- LoadConstantNoClobber(cUnit, rDestHi, valHi);
- NewLIR2(cUnit, kX86PsllqRI, rDestHi, 32);
- NewLIR2(cUnit, kX86OrpsRR, rDestLo, rDestHi);
+ if (val_hi != 0) {
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
+ NewLIR2(cu, kX86PsllqRI, r_dest_hi, 32);
+ NewLIR2(cu, kX86OrpsRR, r_dest_lo, r_dest_hi);
}
}
} else {
- res = LoadConstantNoClobber(cUnit, rDestLo, valLo);
- LoadConstantNoClobber(cUnit, rDestHi, valHi);
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
}
return res;
}
-LIR *LoadMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask)
{
UNIMPLEMENTED(FATAL) << "LoadMultiple";
- NewLIR0(cUnit, kX86Bkpt);
+ NewLIR0(cu, kX86Bkpt);
return NULL;
}
-LIR *StoreMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+LIR *StoreMultiple(CompilationUnit *cu, int rBase, int r_mask)
{
UNIMPLEMENTED(FATAL) << "StoreMultiple";
- NewLIR0(cUnit, kX86Bkpt);
+ NewLIR0(cu, kX86Bkpt);
return NULL;
}
-LIR* LoadBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rDest, int rDestHi,
- OpSize size, int sReg) {
+LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg) {
LIR *load = NULL;
LIR *load2 = NULL;
- bool isArray = rIndex != INVALID_REG;
+ bool is_array = r_index != INVALID_REG;
bool pair = false;
bool is64bit = false;
X86OpCode opcode = kX86Nop;
@@ -375,84 +371,84 @@
case kLong:
case kDouble:
is64bit = true;
- if (X86_FPREG(rDest)) {
- opcode = isArray ? kX86MovsdRA : kX86MovsdRM;
- if (X86_SINGLEREG(rDest)) {
- DCHECK(X86_FPREG(rDestHi));
- DCHECK_EQ(rDest, (rDestHi - 1));
- rDest = S2d(rDest, rDestHi);
+ if (X86_FPREG(r_dest)) {
+ opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
+ if (X86_SINGLEREG(r_dest)) {
+ DCHECK(X86_FPREG(r_dest_hi));
+ DCHECK_EQ(r_dest, (r_dest_hi - 1));
+ r_dest = S2d(r_dest, r_dest_hi);
}
- rDestHi = rDest + 1;
+ r_dest_hi = r_dest + 1;
} else {
pair = true;
- opcode = isArray ? kX86Mov32RA : kX86Mov32RM;
+ opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
}
// TODO: double store is to unaligned address
DCHECK_EQ((displacement & 0x3), 0);
break;
case kWord:
case kSingle:
- opcode = isArray ? kX86Mov32RA : kX86Mov32RM;
- if (X86_FPREG(rDest)) {
- opcode = isArray ? kX86MovssRA : kX86MovssRM;
- DCHECK(X86_SINGLEREG(rDest));
+ opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
+ if (X86_FPREG(r_dest)) {
+ opcode = is_array ? kX86MovssRA : kX86MovssRM;
+ DCHECK(X86_SINGLEREG(r_dest));
}
DCHECK_EQ((displacement & 0x3), 0);
break;
case kUnsignedHalf:
- opcode = isArray ? kX86Movzx16RA : kX86Movzx16RM;
+ opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
DCHECK_EQ((displacement & 0x1), 0);
break;
case kSignedHalf:
- opcode = isArray ? kX86Movsx16RA : kX86Movsx16RM;
+ opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
DCHECK_EQ((displacement & 0x1), 0);
break;
case kUnsignedByte:
- opcode = isArray ? kX86Movzx8RA : kX86Movzx8RM;
+ opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
break;
case kSignedByte:
- opcode = isArray ? kX86Movsx8RA : kX86Movsx8RM;
+ opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
break;
default:
LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
}
- if (!isArray) {
+ if (!is_array) {
if (!pair) {
- load = NewLIR3(cUnit, opcode, rDest, rBase, displacement + LOWORD_OFFSET);
+ load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
} else {
- if (rBase == rDest) {
- load2 = NewLIR3(cUnit, opcode, rDestHi, rBase,
+ if (rBase == r_dest) {
+ load2 = NewLIR3(cu, opcode, r_dest_hi, rBase,
displacement + HIWORD_OFFSET);
- load = NewLIR3(cUnit, opcode, rDest, rBase, displacement + LOWORD_OFFSET);
+ load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
} else {
- load = NewLIR3(cUnit, opcode, rDest, rBase, displacement + LOWORD_OFFSET);
- load2 = NewLIR3(cUnit, opcode, rDestHi, rBase,
+ load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+ load2 = NewLIR3(cu, opcode, r_dest_hi, rBase,
displacement + HIWORD_OFFSET);
}
}
if (rBase == rX86_SP) {
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0))
- >> 2, true /* isLoad */, is64bit);
+ >> 2, true /* is_load */, is64bit);
if (pair) {
AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
- true /* isLoad */, is64bit);
+ true /* is_load */, is64bit);
}
}
} else {
if (!pair) {
- load = NewLIR5(cUnit, opcode, rDest, rBase, rIndex, scale,
+ load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
displacement + LOWORD_OFFSET);
} else {
- if (rBase == rDest) {
- load2 = NewLIR5(cUnit, opcode, rDestHi, rBase, rIndex, scale,
+ if (rBase == r_dest) {
+ load2 = NewLIR5(cu, opcode, r_dest_hi, rBase, r_index, scale,
displacement + HIWORD_OFFSET);
- load = NewLIR5(cUnit, opcode, rDest, rBase, rIndex, scale,
+ load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
displacement + LOWORD_OFFSET);
} else {
- load = NewLIR5(cUnit, opcode, rDest, rBase, rIndex, scale,
+ load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
displacement + LOWORD_OFFSET);
- load2 = NewLIR5(cUnit, opcode, rDestHi, rBase, rIndex, scale,
+ load2 = NewLIR5(cu, opcode, r_dest_hi, rBase, r_index, scale,
displacement + HIWORD_OFFSET);
}
}
@@ -462,35 +458,29 @@
}
/* Load value from base + scaled index. */
-LIR *LoadBaseIndexed(CompilationUnit *cUnit, int rBase,
- int rIndex, int rDest, int scale, OpSize size) {
- return LoadBaseIndexedDisp(cUnit, rBase, rIndex, scale, 0,
- rDest, INVALID_REG, size, INVALID_SREG);
+LIR *LoadBaseIndexed(CompilationUnit *cu, int rBase,
+ int r_index, int r_dest, int scale, OpSize size) {
+ return LoadBaseIndexedDisp(cu, rBase, r_index, scale, 0,
+ r_dest, INVALID_REG, size, INVALID_SREG);
}
-LIR *LoadBaseDisp(CompilationUnit *cUnit,
- int rBase, int displacement,
- int rDest,
- OpSize size, int sReg) {
- return LoadBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0, displacement,
- rDest, INVALID_REG, size, sReg);
+LIR *LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+ int r_dest, OpSize size, int s_reg) {
+ return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+ r_dest, INVALID_REG, size, s_reg);
}
-LIR *LoadBaseDispWide(CompilationUnit *cUnit,
- int rBase, int displacement,
- int rDestLo, int rDestHi,
- int sReg) {
- return LoadBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0, displacement,
- rDestLo, rDestHi, kLong, sReg);
+LIR *LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+ int r_dest_lo, int r_dest_hi, int s_reg) {
+ return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+ r_dest_lo, r_dest_hi, kLong, s_reg);
}
-LIR* StoreBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rSrc, int rSrcHi,
- OpSize size, int sReg) {
+LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size, int s_reg) {
LIR *store = NULL;
LIR *store2 = NULL;
- bool isArray = rIndex != INVALID_REG;
+ bool is_array = r_index != INVALID_REG;
bool pair = false;
bool is64bit = false;
X86OpCode opcode = kX86Nop;
@@ -498,67 +488,67 @@
case kLong:
case kDouble:
is64bit = true;
- if (X86_FPREG(rSrc)) {
- opcode = isArray ? kX86MovsdAR : kX86MovsdMR;
- if (X86_SINGLEREG(rSrc)) {
- DCHECK(X86_FPREG(rSrcHi));
- DCHECK_EQ(rSrc, (rSrcHi - 1));
- rSrc = S2d(rSrc, rSrcHi);
+ if (X86_FPREG(r_src)) {
+ opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
+ if (X86_SINGLEREG(r_src)) {
+ DCHECK(X86_FPREG(r_src_hi));
+ DCHECK_EQ(r_src, (r_src_hi - 1));
+ r_src = S2d(r_src, r_src_hi);
}
- rSrcHi = rSrc + 1;
+ r_src_hi = r_src + 1;
} else {
pair = true;
- opcode = isArray ? kX86Mov32AR : kX86Mov32MR;
+ opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
}
// TODO: double store is to unaligned address
DCHECK_EQ((displacement & 0x3), 0);
break;
case kWord:
case kSingle:
- opcode = isArray ? kX86Mov32AR : kX86Mov32MR;
- if (X86_FPREG(rSrc)) {
- opcode = isArray ? kX86MovssAR : kX86MovssMR;
- DCHECK(X86_SINGLEREG(rSrc));
+ opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
+ if (X86_FPREG(r_src)) {
+ opcode = is_array ? kX86MovssAR : kX86MovssMR;
+ DCHECK(X86_SINGLEREG(r_src));
}
DCHECK_EQ((displacement & 0x3), 0);
break;
case kUnsignedHalf:
case kSignedHalf:
- opcode = isArray ? kX86Mov16AR : kX86Mov16MR;
+ opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
DCHECK_EQ((displacement & 0x1), 0);
break;
case kUnsignedByte:
case kSignedByte:
- opcode = isArray ? kX86Mov8AR : kX86Mov8MR;
+ opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
break;
default:
LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
}
- if (!isArray) {
+ if (!is_array) {
if (!pair) {
- store = NewLIR3(cUnit, opcode, rBase, displacement + LOWORD_OFFSET, rSrc);
+ store = NewLIR3(cu, opcode, rBase, displacement + LOWORD_OFFSET, r_src);
} else {
- store = NewLIR3(cUnit, opcode, rBase, displacement + LOWORD_OFFSET, rSrc);
- store2 = NewLIR3(cUnit, opcode, rBase, displacement + HIWORD_OFFSET, rSrcHi);
+ store = NewLIR3(cu, opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+ store2 = NewLIR3(cu, opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
}
if (rBase == rX86_SP) {
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0))
- >> 2, false /* isLoad */, is64bit);
+ >> 2, false /* is_load */, is64bit);
if (pair) {
AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
- false /* isLoad */, is64bit);
+ false /* is_load */, is64bit);
}
}
} else {
if (!pair) {
- store = NewLIR5(cUnit, opcode, rBase, rIndex, scale,
- displacement + LOWORD_OFFSET, rSrc);
+ store = NewLIR5(cu, opcode, rBase, r_index, scale,
+ displacement + LOWORD_OFFSET, r_src);
} else {
- store = NewLIR5(cUnit, opcode, rBase, rIndex, scale,
- displacement + LOWORD_OFFSET, rSrc);
- store2 = NewLIR5(cUnit, opcode, rBase, rIndex, scale,
- displacement + HIWORD_OFFSET, rSrcHi);
+ store = NewLIR5(cu, opcode, rBase, r_index, scale,
+ displacement + LOWORD_OFFSET, r_src);
+ store2 = NewLIR5(cu, opcode, rBase, r_index, scale,
+ displacement + HIWORD_OFFSET, r_src_hi);
}
}
@@ -566,31 +556,31 @@
}
/* store value base base + scaled index. */
-LIR *StoreBaseIndexed(CompilationUnit *cUnit, int rBase, int rIndex, int rSrc,
+LIR *StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
int scale, OpSize size)
{
- return StoreBaseIndexedDisp(cUnit, rBase, rIndex, scale, 0,
- rSrc, INVALID_REG, size, INVALID_SREG);
+ return StoreBaseIndexedDisp(cu, rBase, r_index, scale, 0,
+ r_src, INVALID_REG, size, INVALID_SREG);
}
-LIR *StoreBaseDisp(CompilationUnit *cUnit, int rBase, int displacement,
- int rSrc, OpSize size)
+LIR *StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+ int r_src, OpSize size)
{
- return StoreBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0,
- displacement, rSrc, INVALID_REG, size,
+ return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0,
+ displacement, r_src, INVALID_REG, size,
INVALID_SREG);
}
-LIR *StoreBaseDispWide(CompilationUnit *cUnit, int rBase, int displacement,
- int rSrcLo, int rSrcHi)
+LIR *StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+ int r_src_lo, int r_src_hi)
{
- return StoreBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0, displacement,
- rSrcLo, rSrcHi, kLong, INVALID_SREG);
+ return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+ r_src_lo, r_src_hi, kLong, INVALID_SREG);
}
-void LoadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
+void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
{
- LoadBaseDispWide(cUnit, base, 0, lowReg, highReg, INVALID_SREG);
+ LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
}
} // namespace art
diff --git a/src/compiler/codegen/x86/x86_lir.h b/src/compiler/codegen/x86/x86_lir.h
index 53d69ad..d58f587 100644
--- a/src/compiler/codegen/x86/x86_lir.h
+++ b/src/compiler/codegen/x86/x86_lir.h
@@ -99,7 +99,7 @@
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | curMethod* | <<== sp w/ 16-byte alignment
+ * | cur_method* | <<== sp w/ 16-byte alignment
* +========================+
*/
@@ -128,7 +128,7 @@
#define X86_FP_REG_MASK 0xF
/* RegisterLocation templates return values (rAX, rAX/rDX or XMM0) */
-// location, wide, defined, const, fp, core, ref, highWord, home, lowReg, highReg, sRegLow
+// location, wide, defined, const, fp, core, ref, high_word, home, low_reg, high_reg, s_reg_low
#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rAX, rDX, INVALID_SREG, INVALID_SREG}
#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
@@ -384,7 +384,7 @@
kX86Last
};
-/* Instruction assembly fieldLoc kind */
+/* Instruction assembly field_loc kind */
enum X86EncodingKind {
kData, // Special case for raw data.
kNop, // Special case for variable length nop.
diff --git a/src/compiler/compiler_enums.h b/src/compiler/compiler_enums.h
index 15336cd..65c34d2 100644
--- a/src/compiler/compiler_enums.h
+++ b/src/compiler/compiler_enums.h
@@ -63,7 +63,7 @@
};
/*
- * Def/Use encoding in 64-bit useMask/defMask. Low positions used for target-specific
+ * Def/Use encoding in 64-bit use_mask/def_mask. Low positions used for target-specific
* registers (and typically use the register number as the position). High positions
* reserved for common and abstract resources.
*/
@@ -72,7 +72,7 @@
kMustNotAlias = 63,
kHeapRef = 62, // Default memory reference type
kLiteral = 61, // Literal pool memory reference
- kDalvikReg = 60, // Dalvik vReg memory reference
+ kDalvikReg = 60, // Dalvik v_reg memory reference
kFPStatus = 59,
kCCode = 58,
kLowestCommonResource = kCCode
@@ -130,7 +130,7 @@
kMIRMark, // Temporary node mark
};
-/* For successorBlockList */
+/* For successor_block_list */
enum BlockListType {
kNotUsed = 0,
kCatch,
diff --git a/src/compiler/compiler_ir.h b/src/compiler/compiler_ir.h
index 4fa019f..e7a5e73 100644
--- a/src/compiler/compiler_ir.h
+++ b/src/compiler/compiler_ir.h
@@ -29,14 +29,14 @@
namespace art {
-#define SLOW_FIELD_PATH (cUnit->enableDebug & (1 << kDebugSlowFieldPath))
-#define SLOW_INVOKE_PATH (cUnit->enableDebug & (1 << kDebugSlowInvokePath))
-#define SLOW_STRING_PATH (cUnit->enableDebug & (1 << kDebugSlowStringPath))
-#define SLOW_TYPE_PATH (cUnit->enableDebug & (1 << kDebugSlowTypePath))
-#define EXERCISE_SLOWEST_STRING_PATH (cUnit->enableDebug & \
+#define SLOW_FIELD_PATH (cu->enable_debug & (1 << kDebugSlowFieldPath))
+#define SLOW_INVOKE_PATH (cu->enable_debug & (1 << kDebugSlowInvokePath))
+#define SLOW_STRING_PATH (cu->enable_debug & (1 << kDebugSlowStringPath))
+#define SLOW_TYPE_PATH (cu->enable_debug & (1 << kDebugSlowTypePath))
+#define EXERCISE_SLOWEST_STRING_PATH (cu->enable_debug & \
(1 << kDebugSlowestStringPath))
-// Minimum field size to contain Dalvik vReg number
+// Minimum field size to contain Dalvik v_reg number
#define VREG_NUM_WIDTH 16
struct ArenaBitVector;
@@ -44,48 +44,48 @@
class LLVMInfo;
struct PromotionMap {
- RegLocationType coreLocation:3;
- uint8_t coreReg;
- RegLocationType fpLocation:3;
+ RegLocationType core_location:3;
+ uint8_t core_reg;
+ RegLocationType fp_location:3;
uint8_t FpReg;
- bool firstInPair;
+ bool first_in_pair;
};
struct RegLocation {
RegLocationType location:3;
unsigned wide:1;
unsigned defined:1; // Do we know the type?
- unsigned isConst:1; // Constant, value in cUnit->constantValues[]
+ unsigned is_const:1; // Constant, value in cu->constant_values[]
unsigned fp:1; // Floating point?
unsigned core:1; // Non-floating point?
unsigned ref:1; // Something GC cares about
- unsigned highWord:1; // High word of pair?
+ unsigned high_word:1; // High word of pair?
unsigned home:1; // Does this represent the home location?
- uint8_t lowReg; // First physical register
- uint8_t highReg; // 2nd physical register (if wide)
- int32_t sRegLow; // SSA name for low Dalvik word
- int32_t origSReg; // TODO: remove after Bitcode gen complete
- // and consolodate usage w/ sRegLow
+ uint8_t low_reg; // First physical register
+ uint8_t high_reg; // 2nd physical register (if wide)
+ int32_t s_reg_low; // SSA name for low Dalvik word
+ int32_t orig_sreg; // TODO: remove after Bitcode gen complete
+ // and consolodate usage w/ s_reg_low
};
struct CompilerTemp {
- int sReg;
+ int s_reg;
ArenaBitVector* bv;
};
struct CallInfo {
- int numArgWords; // Note: word count, not arg count
+ int num_arg_words; // Note: word count, not arg count
RegLocation* args; // One for each word of arguments
RegLocation result; // Eventual target of MOVE_RESULT
- int optFlags;
+ int opt_flags;
InvokeType type;
- uint32_t dexIdx;
+ uint32_t dex_idx;
uint32_t index; // Method idx for invokes, type idx for FilledNewArray
- uintptr_t directCode;
- uintptr_t directMethod;
+ uintptr_t direct_code;
+ uintptr_t direct_method;
RegLocation target; // Target of following move_result
- bool skipThis;
- bool isRange;
+ bool skip_this;
+ bool is_range;
int offset; // Dalvik offset
};
@@ -97,24 +97,24 @@
*/
struct RegisterInfo {
int reg; // Reg number
- bool inUse; // Has it been allocated?
- bool isTemp; // Can allocate as temp?
+ bool in_use; // Has it been allocated?
+ bool is_temp; // Can allocate as temp?
bool pair; // Part of a register pair?
int partner; // If pair, other reg of pair
bool live; // Is there an associated SSA name?
bool dirty; // If live, is it dirty?
- int sReg; // Name of live value
- LIR *defStart; // Starting inst in last def sequence
- LIR *defEnd; // Ending inst in last def sequence
+ int s_reg; // Name of live value
+ LIR *def_start; // Starting inst in last def sequence
+ LIR *def_end; // Ending inst in last def sequence
};
struct RegisterPool {
- int numCoreRegs;
- RegisterInfo *coreRegs;
- int nextCoreReg;
- int numFPRegs;
+ int num_core_regs;
+ RegisterInfo *core_regs;
+ int next_core_reg;
+ int num_fp_regs;
RegisterInfo *FPRegs;
- int nextFPReg;
+ int next_fp_reg;
};
#define INVALID_SREG (-1)
@@ -142,7 +142,7 @@
#define NEXT_LIR(lir) (lir->next)
#define PREV_LIR(lir) (lir->prev)
-/* Defines for aliasInfo (tracks Dalvik register references) */
+/* Defines for alias_info (tracks Dalvik register references) */
#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff)
#define DECODE_ALIAS_INFO_WIDE_FLAG (0x80000000)
#define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
@@ -162,28 +162,28 @@
#define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
-#define isPseudoOpcode(opcode) (static_cast<int>(opcode) < 0)
+#define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0)
struct LIR {
int offset; // Offset of this instruction
- int dalvikOffset; // Offset of Dalvik opcode
+ int dalvik_offset; // Offset of Dalvik opcode
LIR* next;
LIR* prev;
LIR* target;
int opcode;
int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]
struct {
- bool isNop:1; // LIR is optimized away
+ bool is_nop:1; // LIR is optimized away
bool pcRelFixup:1; // May need pc-relative fixup
unsigned int size:5; // in bytes
unsigned int unused:25;
} flags;
- int aliasInfo; // For Dalvik register & litpool disambiguation
- uint64_t useMask; // Resource mask for use
- uint64_t defMask; // Resource mask for def
+ int alias_info; // For Dalvik register & litpool disambiguation
+ uint64_t use_mask; // Resource mask for use
+ uint64_t def_mask; // Resource mask for def
};
-extern const char* extendedMIROpNames[kMirOpLast - kMirOpFirst];
+extern const char* extended_mir_op_names[kMirOpLast - kMirOpFirst];
struct SSARepresentation;
@@ -199,10 +199,10 @@
#define MIR_MARK (1 << kMIRMark)
struct Checkstats {
- int nullChecks;
- int nullChecksEliminated;
- int rangeChecks;
- int rangeChecksEliminated;
+ int null_checks;
+ int null_checks_eliminated;
+ int range_checks;
+ int range_checks_eliminated;
};
struct MIR {
@@ -211,13 +211,13 @@
unsigned int offset;
MIR* prev;
MIR* next;
- SSARepresentation* ssaRep;
- int optimizationFlags;
+ SSARepresentation* ssa_rep;
+ int optimization_flags;
union {
// Used to quickly locate all Phi opcodes
- MIR* phiNext;
+ MIR* phi_next;
// Establish link between two halves of throwing instructions
- MIR* throwInsn;
+ MIR* throw_insn;
} meta;
};
@@ -225,34 +225,34 @@
struct BasicBlock {
int id;
- int dfsId;
+ int dfs_id;
bool visited;
bool hidden;
- bool catchEntry;
- bool explicitThrow;
- bool conditionalBranch;
- bool hasReturn;
- uint16_t startOffset;
- uint16_t nestingDepth;
- BBType blockType;
- MIR* firstMIRInsn;
- MIR* lastMIRInsn;
- BasicBlock* fallThrough;
+ bool catch_entry;
+ bool explicit_throw;
+ bool conditional_branch;
+ bool has_return;
+ uint16_t start_offset;
+ uint16_t nesting_depth;
+ BBType block_type;
+ MIR* first_mir_insn;
+ MIR* last_mir_insn;
+ BasicBlock* fall_through;
BasicBlock* taken;
- BasicBlock* iDom; // Immediate dominator
- BasicBlockDataFlow* dataFlowInfo;
+ BasicBlock* i_dom; // Immediate dominator
+ BasicBlockDataFlow* data_flow_info;
GrowableList* predecessors;
ArenaBitVector* dominators;
- ArenaBitVector* iDominated; // Set nodes being immediately dominated
- ArenaBitVector* domFrontier; // Dominance frontier
+ ArenaBitVector* i_dominated; // Set nodes being immediately dominated
+ ArenaBitVector* dom_frontier; // Dominance frontier
struct { // For one-to-many successors like
- BlockListType blockListType; // switch and exception handling
+ BlockListType block_list_type; // switch and exception handling
GrowableList blocks;
- } successorBlockList;
+ } successor_block_list;
};
/*
- * The "blocks" field in "successorBlockList" points to an array of
+ * The "blocks" field in "successor_block_list" points to an array of
* elements with the type "SuccessorBlockInfo".
* For catch blocks, key is type index for the exception.
* For swtich blocks, key is the case value.
@@ -271,7 +271,7 @@
struct CompilationUnit {
CompilationUnit()
- : numBlocks(0),
+ : num_blocks(0),
compiler(NULL),
class_linker(NULL),
dex_file(NULL),
@@ -281,89 +281,89 @@
access_flags(0),
invoke_type(kDirect),
shorty(NULL),
- firstLIRInsn(NULL),
- lastLIRInsn(NULL),
- literalList(NULL),
- methodLiteralList(NULL),
- codeLiteralList(NULL),
- disableOpt(0),
- enableDebug(0),
- dataOffset(0),
- totalSize(0),
- assemblerStatus(kSuccess),
- assemblerRetries(0),
- printMe(false),
- hasLoop(false),
- hasInvoke(false),
- qdMode(false),
- regPool(NULL),
- instructionSet(kNone),
- numSSARegs(0),
- ssaBaseVRegs(NULL),
- ssaSubscripts(NULL),
- ssaStrings(NULL),
- vRegToSSAMap(NULL),
- SSALastDefs(NULL),
- isConstantV(NULL),
- constantValues(NULL),
- phiAliasMap(NULL),
- phiList(NULL),
- regLocation(NULL),
- promotionMap(NULL),
- methodSReg(0),
- numReachableBlocks(0),
- numDalvikRegisters(0),
- entryBlock(NULL),
- exitBlock(NULL),
- curBlock(NULL),
- iDomList(NULL),
- tryBlockAddr(NULL),
- defBlockMatrix(NULL),
- tempBlockV(NULL),
- tempDalvikRegisterV(NULL),
- tempSSARegisterV(NULL),
- tempSSABlockIdV(NULL),
- blockLabelList(NULL),
- numIns(0),
- numOuts(0),
- numRegs(0),
- numCoreSpills(0),
- numFPSpills(0),
- numCompilerTemps(0),
- frameSize(0),
- coreSpillMask(0U),
- fpSpillMask(0U),
+ first_lir_insn(NULL),
+ last_lir_insn(NULL),
+ literal_list(NULL),
+ method_literal_list(NULL),
+ code_literal_list(NULL),
+ disable_opt(0),
+ enable_debug(0),
+ data_offset(0),
+ total_size(0),
+ assembler_status(kSuccess),
+ assembler_retries(0),
+ verbose(false),
+ has_loop(false),
+ has_invoke(false),
+ qd_mode(false),
+ reg_pool(NULL),
+ instruction_set(kNone),
+ num_ssa_regs(0),
+ ssa_base_vregs(NULL),
+ ssa_subscripts(NULL),
+ ssa_strings(NULL),
+ vreg_to_ssa_map(NULL),
+ ssa_last_defs(NULL),
+ is_constant_v(NULL),
+ constant_values(NULL),
+ phi_alias_map(NULL),
+ phi_list(NULL),
+ reg_location(NULL),
+ promotion_map(NULL),
+ method_sreg(0),
+ num_reachable_blocks(0),
+ num_dalvik_registers(0),
+ entry_block(NULL),
+ exit_block(NULL),
+ cur_block(NULL),
+ i_dom_list(NULL),
+ try_block_addr(NULL),
+ def_block_matrix(NULL),
+ temp_block_v(NULL),
+ temp_dalvik_register_v(NULL),
+ temp_ssa_register_v(NULL),
+ temp_ssa_block_id_v(NULL),
+ block_label_list(NULL),
+ num_ins(0),
+ num_outs(0),
+ num_regs(0),
+ num_core_spills(0),
+ num_fp_spills(0),
+ num_compiler_temps(0),
+ frame_size(0),
+ core_spill_mask(0U),
+ fp_spill_mask(0U),
attrs(0U),
- currentDalvikOffset(0),
+ current_dalvik_offset(0),
insns(NULL),
- insnsSize(0U),
- disableDataflow(false),
- defCount(0),
- compilerFlipMatch(false),
- arenaHead(NULL),
- currentArena(NULL),
- numArenaBlocks(0),
+ insns_size(0U),
+ disable_dataflow(false),
+ def_count(0),
+ compiler_flip_match(false),
+ arena_head(NULL),
+ current_arena(NULL),
+ num_arena_blocks(0),
mstats(NULL),
checkstats(NULL),
- genBitcode(false),
+ gen_bitcode(false),
context(NULL),
module(NULL),
func(NULL),
intrinsic_helper(NULL),
irb(NULL),
- placeholderBB(NULL),
- entryBB(NULL),
- entryTargetBB(NULL),
- tempName(0),
- numShadowFrameEntries(0),
- shadowMap(NULL),
+ placeholder_bb(NULL),
+ entry_bb(NULL),
+ entryTarget_bb(NULL),
+ temp_name(0),
+ num_shadow_frame_entries(0),
+ shadow_map(NULL),
#ifndef NDEBUG
- liveSReg(0),
+ live_sreg(0),
#endif
- opcodeCount(NULL) {}
+ opcode_count(NULL) {}
- int numBlocks;
- GrowableList blockList;
+ int num_blocks;
+ GrowableList block_list;
Compiler* compiler; // Compiler driving this compiler
ClassLinker* class_linker; // Linker to resolve fields and methods
const DexFile* dex_file; // DexFile containing the method being compiled
@@ -373,18 +373,18 @@
uint32_t access_flags; // compiling method's access flags
InvokeType invoke_type; // compiling method's invocation type
const char* shorty; // compiling method's shorty
- LIR* firstLIRInsn;
- LIR* lastLIRInsn;
- LIR* literalList; // Constants
- LIR* methodLiteralList; // Method literals requiring patching
- LIR* codeLiteralList; // Code literals requiring patching
- uint32_t disableOpt; // optControlVector flags
- uint32_t enableDebug; // debugControlVector flags
- int dataOffset; // starting offset of literal pool
- int totalSize; // header + code size
- AssemblerStatus assemblerStatus; // Success or fix and retry
- int assemblerRetries;
- std::vector<uint8_t> codeBuffer;
+ LIR* first_lir_insn;
+ LIR* last_lir_insn;
+ LIR* literal_list; // Constants
+ LIR* method_literal_list; // Method literals requiring patching
+ LIR* code_literal_list; // Code literals requiring patching
+ uint32_t disable_opt; // opt_control_vector flags
+ uint32_t enable_debug; // debugControlVector flags
+ int data_offset; // starting offset of literal pool
+ int total_size; // header + code size
+ AssemblerStatus assembler_status; // Success or fix and retry
+ int assembler_retries;
+ std::vector<uint8_t> code_buffer;
/*
* Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
* Native PC is on the return address of the safepointed operation. Dex PC is for
@@ -396,84 +396,84 @@
* immediately preceed the instruction.
*/
std::vector<uint32_t> dex2pcMappingTable;
- std::vector<uint32_t> combinedMappingTable;
- std::vector<uint32_t> coreVmapTable;
- std::vector<uint32_t> fpVmapTable;
- std::vector<uint8_t> nativeGcMap;
- bool printMe;
- bool hasLoop; // Contains a loop
- bool hasInvoke; // Contains an invoke instruction
- bool qdMode; // Compile for code size/compile time
- RegisterPool* regPool;
- InstructionSet instructionSet;
- /* Number of total regs used in the whole cUnit after SSA transformation */
- int numSSARegs;
+ std::vector<uint32_t> combined_mapping_table;
+ std::vector<uint32_t> core_vmap_table;
+ std::vector<uint32_t> fp_vmap_table;
+ std::vector<uint8_t> native_gc_map;
+ bool verbose;
+ bool has_loop; // Contains a loop
+ bool has_invoke; // Contains an invoke instruction
+ bool qd_mode; // Compile for code size/compile time
+ RegisterPool* reg_pool;
+ InstructionSet instruction_set;
+ /* Number of total regs used in the whole cu after SSA transformation */
+ int num_ssa_regs;
/* Map SSA reg i to the base virtual register/subscript */
- GrowableList* ssaBaseVRegs;
- GrowableList* ssaSubscripts;
- GrowableList* ssaStrings;
+ GrowableList* ssa_base_vregs;
+ GrowableList* ssa_subscripts;
+ GrowableList* ssa_strings;
/* The following are new data structures to support SSA representations */
/* Map original Dalvik virtual reg i to the current SSA name */
- int* vRegToSSAMap; // length == method->registersSize
- int* SSALastDefs; // length == method->registersSize
- ArenaBitVector* isConstantV; // length == numSSAReg
- int* constantValues; // length == numSSAReg
- int* phiAliasMap; // length == numSSAReg
- MIR* phiList;
+ int* vreg_to_ssa_map; // length == method->registers_size
+ int* ssa_last_defs; // length == method->registers_size
+ ArenaBitVector* is_constant_v; // length == num_ssa_reg
+ int* constant_values; // length == num_ssa_reg
+ int* phi_alias_map; // length == num_ssa_reg
+ MIR* phi_list;
/* Use counts of ssa names */
- GrowableList useCounts; // Weighted by nesting depth
- GrowableList rawUseCounts; // Not weighted
+ GrowableList use_counts; // Weighted by nesting depth
+ GrowableList raw_use_counts; // Not weighted
/* Optimization support */
- GrowableList loopHeaders;
+ GrowableList loop_headers;
/* Map SSA names to location */
- RegLocation* regLocation;
+ RegLocation* reg_location;
- /* Keep track of Dalvik vReg to physical register mappings */
- PromotionMap* promotionMap;
+ /* Keep track of Dalvik v_reg to physical register mappings */
+ PromotionMap* promotion_map;
/* SSA name for Method* */
- int methodSReg;
- RegLocation methodLoc; // Describes location of method*
+ int method_sreg;
+ RegLocation method_loc; // Describes location of method*
- int numReachableBlocks;
- int numDalvikRegisters; // method->registersSize
- BasicBlock* entryBlock;
- BasicBlock* exitBlock;
- BasicBlock* curBlock;
- GrowableList dfsOrder;
- GrowableList dfsPostOrder;
- GrowableList domPostOrderTraversal;
- GrowableList throwLaunchpads;
- GrowableList suspendLaunchpads;
- GrowableList intrinsicLaunchpads;
- GrowableList compilerTemps;
- int* iDomList;
- ArenaBitVector* tryBlockAddr;
- ArenaBitVector** defBlockMatrix; // numDalvikRegister x numBlocks
- ArenaBitVector* tempBlockV;
- ArenaBitVector* tempDalvikRegisterV;
- ArenaBitVector* tempSSARegisterV; // numSSARegs
- int* tempSSABlockIdV; // working storage for Phi labels
- LIR* blockLabelList;
+ int num_reachable_blocks;
+ int num_dalvik_registers; // method->registers_size
+ BasicBlock* entry_block;
+ BasicBlock* exit_block;
+ BasicBlock* cur_block;
+ GrowableList dfs_order;
+ GrowableList dfs_post_order;
+ GrowableList dom_post_order_traversal;
+ GrowableList throw_launchpads;
+ GrowableList suspend_launchpads;
+ GrowableList intrinsic_launchpads;
+ GrowableList compiler_temps;
+ int* i_dom_list;
+ ArenaBitVector* try_block_addr;
+ ArenaBitVector** def_block_matrix; // num_dalvik_register x num_blocks
+ ArenaBitVector* temp_block_v;
+ ArenaBitVector* temp_dalvik_register_v;
+ ArenaBitVector* temp_ssa_register_v; // num_ssa_regs
+ int* temp_ssa_block_id_v; // working storage for Phi labels
+ LIR* block_label_list;
/*
* Frame layout details.
* NOTE: for debug support it will be necessary to add a structure
* to map the Dalvik virtual registers to the promoted registers.
* NOTE: "num" fields are in 4-byte words, "Size" and "Offset" in bytes.
*/
- int numIns;
- int numOuts;
- int numRegs; // Unlike numDalvikRegisters, does not include ins
- int numCoreSpills;
- int numFPSpills;
- int numCompilerTemps;
- int frameSize;
- unsigned int coreSpillMask;
- unsigned int fpSpillMask;
+ int num_ins;
+ int num_outs;
+ int num_regs; // Unlike num_dalvik_registers, does not include ins
+ int num_core_spills;
+ int num_fp_spills;
+ int num_compiler_temps;
+ int frame_size;
+ unsigned int core_spill_mask;
+ unsigned int fp_spill_mask;
unsigned int attrs;
/*
* CLEANUP/RESTRUCTURE: The code generation utilities don't have a built-in
@@ -485,55 +485,55 @@
* The low-level LIR creation utilites will pull it from here. Should
* be rewritten.
*/
- int currentDalvikOffset;
- GrowableList switchTables;
- GrowableList fillArrayData;
+ int current_dalvik_offset;
+ GrowableList switch_tables;
+ GrowableList fill_array_data;
const uint16_t* insns;
- uint32_t insnsSize;
- bool disableDataflow; // Skip dataflow analysis if possible
- SafeMap<unsigned int, BasicBlock*> blockMap; // FindBlock lookup cache
- SafeMap<unsigned int, unsigned int> blockIdMap; // Block collapse lookup cache
- SafeMap<unsigned int, LIR*> boundaryMap; // boundary lookup cache
- int defCount; // Used to estimate number of SSA names
+ uint32_t insns_size;
+ bool disable_dataflow; // Skip dataflow analysis if possible
+ SafeMap<unsigned int, BasicBlock*> block_map; // FindBlock lookup cache
+ SafeMap<unsigned int, unsigned int> block_id_map; // Block collapse lookup cache
+ SafeMap<unsigned int, LIR*> boundary_map; // boundary lookup cache
+ int def_count; // Used to estimate number of SSA names
// If non-empty, apply optimizer/debug flags only to matching methods.
- std::string compilerMethodMatch;
- // Flips sense of compilerMethodMatch - apply flags if doesn't match.
- bool compilerFlipMatch;
- ArenaMemBlock* arenaHead;
- ArenaMemBlock* currentArena;
- int numArenaBlocks;
+ std::string compiler_method_match;
+ // Flips sense of compiler_method_match - apply flags if doesn't match.
+ bool compiler_flip_match;
+ ArenaMemBlock* arena_head;
+ ArenaMemBlock* current_arena;
+ int num_arena_blocks;
Memstats* mstats;
Checkstats* checkstats;
- bool genBitcode;
+ bool gen_bitcode;
LLVMInfo* llvm_info;
llvm::LLVMContext* context;
llvm::Module* module;
llvm::Function* func;
greenland::IntrinsicHelper* intrinsic_helper;
greenland::IRBuilder* irb;
- llvm::BasicBlock* placeholderBB;
- llvm::BasicBlock* entryBB;
- llvm::BasicBlock* entryTargetBB;
+ llvm::BasicBlock* placeholder_bb;
+ llvm::BasicBlock* entry_bb;
+ llvm::BasicBlock* entryTarget_bb;
std::string bitcode_filename;
- GrowableList llvmValues;
- int32_t tempName;
- SafeMap<llvm::BasicBlock*, LIR*> blockToLabelMap; // llvm bb -> LIR label
- SafeMap<int32_t, llvm::BasicBlock*> idToBlockMap; // block id -> llvm bb
- SafeMap<llvm::Value*, RegLocation> locMap; // llvm Value to loc rec
- int numShadowFrameEntries;
- int* shadowMap;
- std::set<llvm::BasicBlock*> llvmBlocks;
+ GrowableList llvm_values;
+ int32_t temp_name;
+ SafeMap<llvm::BasicBlock*, LIR*> block_to_label_map; // llvm bb -> LIR label
+ SafeMap<int32_t, llvm::BasicBlock*> id_to_block_map; // block id -> llvm bb
+ SafeMap<llvm::Value*, RegLocation> loc_map; // llvm Value to loc rec
+ int num_shadow_frame_entries;
+ int* shadow_map;
+ std::set<llvm::BasicBlock*> llvm_blocks;
#ifndef NDEBUG
/*
* Sanity checking for the register temp tracking. The same ssa
* name should never be associated with one temp register per
* instruction compilation.
*/
- int liveSReg;
+ int live_sreg;
#endif
std::set<uint32_t> catches;
- int* opcodeCount; // Count Dalvik opcodes for tuning
+ int* opcode_count; // Count Dalvik opcodes for tuning
};
struct SwitchTable {
@@ -555,10 +555,10 @@
struct CodePattern {
const Instruction::Code opcodes[MAX_PATTERN_LEN];
- const SpecialCaseHandler handlerCode;
+ const SpecialCaseHandler handler_code;
};
-static const CodePattern specialPatterns[] = {
+static const CodePattern special_patterns[] = {
{{Instruction::RETURN_VOID}, kNullMethod},
{{Instruction::CONST, Instruction::RETURN}, kConstFunction},
{{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
@@ -583,23 +583,23 @@
{{Instruction::RETURN_WIDE}, kIdentity},
};
-BasicBlock* NewMemBB(CompilationUnit* cUnit, BBType blockType, int blockId);
+BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id);
void AppendMIR(BasicBlock* bb, MIR* mir);
void PrependMIR(BasicBlock* bb, MIR* mir);
-void InsertMIRAfter(BasicBlock* bb, MIR* currentMIR, MIR* newMIR);
+void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
-void AppendLIR(CompilationUnit* cUnit, LIR* lir);
+void AppendLIR(CompilationUnit* cu, LIR* lir);
-void InsertLIRBefore(LIR* currentLIR, LIR* newLIR);
+void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
-void InsertLIRAfter(LIR* currentLIR, LIR* newLIR);
+void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
-MIR* FindMoveResult(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir);
+MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
/* Debug Utilities */
-void DumpCompilationUnit(CompilationUnit* cUnit);
+void DumpCompilationUnit(CompilationUnit* cu);
} // namespace art
diff --git a/src/compiler/compiler_utility.cc b/src/compiler/compiler_utility.cc
index 47dfb50..757aa7d 100644
--- a/src/compiler/compiler_utility.cc
+++ b/src/compiler/compiler_utility.cc
@@ -18,7 +18,7 @@
namespace art {
-const char* extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
+const char* extended_mir_op_names[kMirOpLast - kMirOpFirst] = {
"kMirOpPhi",
"kMirOpCopy",
"kMirFusedCmplFloat",
@@ -35,17 +35,17 @@
#ifdef WITH_MEMSTATS
struct Memstats {
- uint32_t allocStats[kNumAllocKinds];
- int listSizes[kNumListKinds];
- int listWasted[kNumListKinds];
- int listGrows[kNumListKinds];
- int listMaxElems[kNumListKinds];
- int bitMapSizes[kNumBitMapKinds];
- int bitMapWasted[kNumBitMapKinds];
- int bitMapGrows[kNumBitMapKinds];
+ uint32_t alloc_stats[kNumAllocKinds];
+ int list_sizes[kNumListKinds];
+ int list_wasted[kNumListKinds];
+ int list_grows[kNumListKinds];
+ int list_max_elems[kNumListKinds];
+ int bit_map_sizes[kNumBitMapKinds];
+ int bit_map_wasted[kNumBitMapKinds];
+ int bit_map_grows[kNumBitMapKinds];
};
-const char* allocNames[kNumAllocKinds] = {
+const char* alloc_names[kNumAllocKinds] = {
"Misc ",
"BasicBlock ",
"LIR ",
@@ -61,22 +61,22 @@
"Preds ",
};
-const char* listNames[kNumListKinds] = {
+const char* list_names[kNumListKinds] = {
"Misc ",
- "blockList ",
+ "block_list ",
"SSAtoDalvik ",
- "dfsOrder ",
- "dfsPostOrder ",
- "domPostOrderTraversal ",
- "throwLaunchPads ",
- "suspendLaunchPads ",
- "switchTables ",
- "fillArrayData ",
+ "dfs_order ",
+ "dfs_post_order ",
+ "dom_post_order_traversal ",
+ "throw_launch_pads ",
+ "suspend_launch_pads ",
+ "switch_tables ",
+ "fill_array_data ",
"SuccessorBlocks ",
"Predecessors ",
};
-const char* bitMapNames[kNumBitMapKinds] = {
+const char* bit_map_names[kNumBitMapKinds] = {
"Misc ",
"Use ",
"Def ",
@@ -99,42 +99,42 @@
#define kArenaBitVectorGrowth 4 /* increase by 4 uint32_ts when limit hit */
/* Allocate the initial memory block for arena-based allocation */
-bool HeapInit(CompilationUnit* cUnit)
+bool HeapInit(CompilationUnit* cu)
{
- DCHECK(cUnit->arenaHead == NULL);
- cUnit->arenaHead =
+ DCHECK(cu->arena_head == NULL);
+ cu->arena_head =
static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock) + ARENA_DEFAULT_SIZE));
- if (cUnit->arenaHead == NULL) {
+ if (cu->arena_head == NULL) {
LOG(FATAL) << "No memory left to create compiler heap memory";
}
- cUnit->arenaHead->blockSize = ARENA_DEFAULT_SIZE;
- cUnit->currentArena = cUnit->arenaHead;
- cUnit->currentArena->bytesAllocated = 0;
- cUnit->currentArena->next = NULL;
- cUnit->numArenaBlocks = 1;
+ cu->arena_head->block_size = ARENA_DEFAULT_SIZE;
+ cu->current_arena = cu->arena_head;
+ cu->current_arena->bytes_allocated = 0;
+ cu->current_arena->next = NULL;
+ cu->num_arena_blocks = 1;
#ifdef WITH_MEMSTATS
- cUnit->mstats = (Memstats*) NewMem(cUnit, sizeof(Memstats), true,
+ cu->mstats = (Memstats*) NewMem(cu, sizeof(Memstats), true,
kAllocDebugInfo);
#endif
return true;
}
/* Arena-based malloc for compilation tasks */
-void* NewMem(CompilationUnit* cUnit, size_t size, bool zero, oatAllocKind kind)
+void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind)
{
size = (size + 3) & ~3;
#ifdef WITH_MEMSTATS
- if (cUnit->mstats != NULL) {
- cUnit->mstats->allocStats[kind] += size;
+ if (cu->mstats != NULL) {
+ cu->mstats->alloc_stats[kind] += size;
}
#endif
retry:
/* Normal case - space is available in the current page */
- if (size + cUnit->currentArena->bytesAllocated <=
- cUnit->currentArena->blockSize) {
+ if (size + cu->current_arena->bytes_allocated <=
+ cu->current_arena->block_size) {
void *ptr;
- ptr = &cUnit->currentArena->ptr[cUnit->currentArena->bytesAllocated];
- cUnit->currentArena->bytesAllocated += size;
+ ptr = &cu->current_arena->ptr[cu->current_arena->bytes_allocated];
+ cu->current_arena->bytes_allocated += size;
if (zero) {
memset(ptr, 0, size);
}
@@ -144,180 +144,180 @@
* See if there are previously allocated arena blocks before the last
* reset
*/
- if (cUnit->currentArena->next) {
- cUnit->currentArena = cUnit->currentArena->next;
- cUnit->currentArena->bytesAllocated = 0;
+ if (cu->current_arena->next) {
+ cu->current_arena = cu->current_arena->next;
+ cu->current_arena->bytes_allocated = 0;
goto retry;
}
- size_t blockSize = (size < ARENA_DEFAULT_SIZE) ? ARENA_DEFAULT_SIZE : size;
+ size_t block_size = (size < ARENA_DEFAULT_SIZE) ? ARENA_DEFAULT_SIZE : size;
/* Time to allocate a new arena */
- ArenaMemBlock *newArena =
- static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock) + blockSize));
- if (newArena == NULL) {
+ ArenaMemBlock *new_arena =
+ static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock) + block_size));
+ if (new_arena == NULL) {
LOG(FATAL) << "Arena allocation failure";
}
- newArena->blockSize = blockSize;
- newArena->bytesAllocated = 0;
- newArena->next = NULL;
- cUnit->currentArena->next = newArena;
- cUnit->currentArena = newArena;
- cUnit->numArenaBlocks++;
- if (cUnit->numArenaBlocks > 20000) {
- LOG(INFO) << "Total arena pages: " << cUnit->numArenaBlocks;
+ new_arena->block_size = block_size;
+ new_arena->bytes_allocated = 0;
+ new_arena->next = NULL;
+ cu->current_arena->next = new_arena;
+ cu->current_arena = new_arena;
+ cu->num_arena_blocks++;
+ if (cu->num_arena_blocks > 20000) {
+ LOG(INFO) << "Total arena pages: " << cu->num_arena_blocks;
}
goto retry;
}
}
/* Reclaim all the arena blocks allocated so far */
-void ArenaReset(CompilationUnit* cUnit)
+void ArenaReset(CompilationUnit* cu)
{
- ArenaMemBlock* head = cUnit->arenaHead;
+ ArenaMemBlock* head = cu->arena_head;
while (head != NULL) {
ArenaMemBlock* p = head;
head = head->next;
free(p);
}
- cUnit->arenaHead = NULL;
- cUnit->currentArena = NULL;
+ cu->arena_head = NULL;
+ cu->current_arena = NULL;
}
/* Growable List initialization */
-void CompilerInitGrowableList(CompilationUnit* cUnit, GrowableList* gList,
- size_t initLength, oatListKind kind)
+void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
+ size_t init_length, oat_list_kind kind)
{
- gList->numAllocated = initLength;
- gList->numUsed = 0;
- gList->elemList = static_cast<uintptr_t *>(NewMem(cUnit, sizeof(intptr_t) * initLength,
+ g_list->num_allocated = init_length;
+ g_list->num_used = 0;
+ g_list->elem_list = static_cast<uintptr_t *>(NewMem(cu, sizeof(intptr_t) * init_length,
true, kAllocGrowableList));
#ifdef WITH_MEMSTATS
- cUnit->mstats->listSizes[kind] += sizeof(uintptr_t) * initLength;
- gList->kind = kind;
- if (static_cast<int>(initLength) > cUnit->mstats->listMaxElems[kind]) {
- cUnit->mstats->listMaxElems[kind] = initLength;
+ cu->mstats->list_sizes[kind] += sizeof(uintptr_t) * init_length;
+ g_list->kind = kind;
+ if (static_cast<int>(init_length) > cu->mstats->list_max_elems[kind]) {
+ cu->mstats->list_max_elems[kind] = init_length;
}
#endif
}
/* Expand the capacity of a growable list */
-static void ExpandGrowableList(CompilationUnit* cUnit, GrowableList* gList)
+static void ExpandGrowableList(CompilationUnit* cu, GrowableList* g_list)
{
- int newLength = gList->numAllocated;
- if (newLength < 128) {
- newLength <<= 1;
+ int new_length = g_list->num_allocated;
+ if (new_length < 128) {
+ new_length <<= 1;
} else {
- newLength += 128;
+ new_length += 128;
}
- uintptr_t *newArray =
- static_cast<uintptr_t*>(NewMem(cUnit, sizeof(uintptr_t) * newLength, true,
+ uintptr_t *new_array =
+ static_cast<uintptr_t*>(NewMem(cu, sizeof(uintptr_t) * new_length, true,
kAllocGrowableList));
- memcpy(newArray, gList->elemList, sizeof(uintptr_t) * gList->numAllocated);
+ memcpy(new_array, g_list->elem_list, sizeof(uintptr_t) * g_list->num_allocated);
#ifdef WITH_MEMSTATS
- cUnit->mstats->listSizes[gList->kind] += sizeof(uintptr_t) * newLength;
- cUnit->mstats->listWasted[gList->kind] +=
- sizeof(uintptr_t) * gList->numAllocated;
- cUnit->mstats->listGrows[gList->kind]++;
- if (newLength > cUnit->mstats->listMaxElems[gList->kind]) {
- cUnit->mstats->listMaxElems[gList->kind] = newLength;
+ cu->mstats->list_sizes[g_list->kind] += sizeof(uintptr_t) * new_length;
+ cu->mstats->list_wasted[g_list->kind] +=
+ sizeof(uintptr_t) * g_list->num_allocated;
+ cu->mstats->list_grows[g_list->kind]++;
+ if (new_length > cu->mstats->list_max_elems[g_list->kind]) {
+ cu->mstats->list_max_elems[g_list->kind] = new_length;
}
#endif
- gList->numAllocated = newLength;
- gList->elemList = newArray;
+ g_list->num_allocated = new_length;
+ g_list->elem_list = new_array;
}
/* Insert a new element into the growable list */
-void InsertGrowableList(CompilationUnit* cUnit, GrowableList* gList,
+void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list,
uintptr_t elem)
{
- DCHECK_NE(gList->numAllocated, 0U);
- if (gList->numUsed == gList->numAllocated) {
- ExpandGrowableList(cUnit, gList);
+ DCHECK_NE(g_list->num_allocated, 0U);
+ if (g_list->num_used == g_list->num_allocated) {
+ ExpandGrowableList(cu, g_list);
}
- gList->elemList[gList->numUsed++] = elem;
+ g_list->elem_list[g_list->num_used++] = elem;
}
/* Delete an element from a growable list. Element must be present */
-void DeleteGrowableList(GrowableList* gList, uintptr_t elem)
+void DeleteGrowableList(GrowableList* g_list, uintptr_t elem)
{
bool found = false;
- for (unsigned int i = 0; i < gList->numUsed; i++) {
- if (!found && gList->elemList[i] == elem) {
+ for (unsigned int i = 0; i < g_list->num_used; i++) {
+ if (!found && g_list->elem_list[i] == elem) {
found = true;
}
if (found) {
- gList->elemList[i] = gList->elemList[i+1];
+ g_list->elem_list[i] = g_list->elem_list[i+1];
}
}
DCHECK_EQ(found, true);
- gList->numUsed--;
+ g_list->num_used--;
}
-void GrowableListIteratorInit(GrowableList* gList,
+void GrowableListIteratorInit(GrowableList* g_list,
GrowableListIterator* iterator)
{
- iterator->list = gList;
+ iterator->list = g_list;
iterator->idx = 0;
- iterator->size = gList->numUsed;
+ iterator->size = g_list->num_used;
}
uintptr_t GrowableListIteratorNext(GrowableListIterator* iterator)
{
- DCHECK_EQ(iterator->size, iterator->list->numUsed);
+ DCHECK_EQ(iterator->size, iterator->list->num_used);
if (iterator->idx == iterator->size) return 0;
- return iterator->list->elemList[iterator->idx++];
+ return iterator->list->elem_list[iterator->idx++];
}
-uintptr_t GrowableListGetElement(const GrowableList* gList, size_t idx)
+uintptr_t GrowableListGetElement(const GrowableList* g_list, size_t idx)
{
- DCHECK_LT(idx, gList->numUsed);
- return gList->elemList[idx];
+ DCHECK_LT(idx, g_list->num_used);
+ return g_list->elem_list[idx];
}
#ifdef WITH_MEMSTATS
/* Dump memory usage stats */
-void DumpMemStats(CompilationUnit* cUnit)
+void DumpMemStats(CompilationUnit* cu)
{
uint32_t total = 0;
for (int i = 0; i < kNumAllocKinds; i++) {
- total += cUnit->mstats->allocStats[i];
+ total += cu->mstats->alloc_stats[i];
}
if (total > (10 * 1024 * 1024)) {
LOG(INFO) << "MEMUSAGE: " << total << " : "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- LOG(INFO) << "insnsSize: " << cUnit->insnsSize;
- if (cUnit->disableDataflow) {
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(INFO) << "insns_size: " << cu->insns_size;
+ if (cu->disable_dataflow) {
LOG(INFO) << " ** Dataflow disabled ** ";
}
LOG(INFO) << "===== Overall allocations";
for (int i = 0; i < kNumAllocKinds; i++) {
- LOG(INFO) << allocNames[i] << std::setw(10) <<
- cUnit->mstats->allocStats[i];
+ LOG(INFO) << alloc_names[i] << std::setw(10) <<
+ cu->mstats->alloc_stats[i];
}
LOG(INFO) << "===== GrowableList allocations";
for (int i = 0; i < kNumListKinds; i++) {
- LOG(INFO) << listNames[i]
- << " S:" << cUnit->mstats->listSizes[i]
- << ", W:" << cUnit->mstats->listWasted[i]
- << ", G:" << cUnit->mstats->listGrows[i]
- << ", E:" << cUnit->mstats->listMaxElems[i];
+ LOG(INFO) << list_names[i]
+ << " S:" << cu->mstats->list_sizes[i]
+ << ", W:" << cu->mstats->list_wasted[i]
+ << ", G:" << cu->mstats->list_grows[i]
+ << ", E:" << cu->mstats->list_max_elems[i];
}
LOG(INFO) << "===== GrowableBitMap allocations";
for (int i = 0; i < kNumBitMapKinds; i++) {
- LOG(INFO) << bitMapNames[i]
- << " S:" << cUnit->mstats->bitMapSizes[i]
- << ", W:" << cUnit->mstats->bitMapWasted[i]
- << ", G:" << cUnit->mstats->bitMapGrows[i];
+ LOG(INFO) << bit_map_names[i]
+ << " S:" << cu->mstats->bit_map_sizes[i]
+ << ", W:" << cu->mstats->bit_map_wasted[i]
+ << ", G:" << cu->mstats->bit_map_grows[i];
}
}
}
#endif
/* Debug Utility - dump a compilation unit */
-void DumpCompilationUnit(CompilationUnit* cUnit)
+void DumpCompilationUnit(CompilationUnit* cu)
{
BasicBlock* bb;
- const char* blockTypeNames[] = {
+ const char* block_type_names[] = {
"Entry Block",
"Code Block",
"Exit Block",
@@ -325,34 +325,34 @@
"Catch Block"
};
- LOG(INFO) << "Compiling " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- LOG(INFO) << cUnit->insns << " insns";
- LOG(INFO) << cUnit->numBlocks << " blocks in total";
+ LOG(INFO) << "Compiling " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(INFO) << cu->insns << " insns";
+ LOG(INFO) << cu->num_blocks << " blocks in total";
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->blockList, &iterator);
+ GrowableListIteratorInit(&cu->block_list, &iterator);
while (true) {
bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
if (bb == NULL) break;
LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
bb->id,
- blockTypeNames[bb->blockType],
- bb->startOffset,
- bb->lastMIRInsn ? bb->lastMIRInsn->offset : bb->startOffset,
- bb->lastMIRInsn ? "" : " empty");
+ block_type_names[bb->block_type],
+ bb->start_offset,
+ bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
+ bb->last_mir_insn ? "" : " empty");
if (bb->taken) {
LOG(INFO) << " Taken branch: block " << bb->taken->id
- << "(0x" << std::hex << bb->taken->startOffset << ")";
+ << "(0x" << std::hex << bb->taken->start_offset << ")";
}
- if (bb->fallThrough) {
- LOG(INFO) << " Fallthrough : block " << bb->fallThrough->id
- << " (0x" << std::hex << bb->fallThrough->startOffset << ")";
+ if (bb->fall_through) {
+ LOG(INFO) << " Fallthrough : block " << bb->fall_through->id
+ << " (0x" << std::hex << bb->fall_through->start_offset << ")";
}
}
}
-static uint32_t checkMasks[32] = {
+static uint32_t check_masks[32] = {
0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
@@ -367,27 +367,27 @@
*
* NOTE: memory is allocated from the compiler arena.
*/
-ArenaBitVector* AllocBitVector(CompilationUnit* cUnit,
- unsigned int startBits, bool expandable,
- oatBitMapKind kind)
+ArenaBitVector* AllocBitVector(CompilationUnit* cu,
+ unsigned int start_bits, bool expandable,
+ oat_bit_map_kind kind)
{
ArenaBitVector* bv;
unsigned int count;
DCHECK_EQ(sizeof(bv->storage[0]), 4U); /* assuming 32-bit units */
- bv = static_cast<ArenaBitVector*>(NewMem(cUnit, sizeof(ArenaBitVector), false,
+ bv = static_cast<ArenaBitVector*>(NewMem(cu, sizeof(ArenaBitVector), false,
kAllocGrowableBitMap));
- count = (startBits + 31) >> 5;
+ count = (start_bits + 31) >> 5;
- bv->storageSize = count;
+ bv->storage_size = count;
bv->expandable = expandable;
- bv->storage = static_cast<uint32_t*>(NewMem(cUnit, count * sizeof(uint32_t), true,
+ bv->storage = static_cast<uint32_t*>(NewMem(cu, count * sizeof(uint32_t), true,
kAllocGrowableBitMap));
#ifdef WITH_MEMSTATS
bv->kind = kind;
- cUnit->mstats->bitMapSizes[kind] += count * sizeof(uint32_t);
+ cu->mstats->bit_map_sizes[kind] += count * sizeof(uint32_t);
#endif
return bv;
}
@@ -395,21 +395,21 @@
/*
* Determine whether or not the specified bit is set.
*/
-bool IsBitSet(const ArenaBitVector* pBits, unsigned int num)
+bool IsBitSet(const ArenaBitVector* p_bits, unsigned int num)
{
- DCHECK_LT(num, pBits->storageSize * sizeof(uint32_t) * 8);
+ DCHECK_LT(num, p_bits->storage_size * sizeof(uint32_t) * 8);
- unsigned int val = pBits->storage[num >> 5] & checkMasks[num & 0x1f];
+ unsigned int val = p_bits->storage[num >> 5] & check_masks[num & 0x1f];
return (val != 0);
}
/*
* Mark all bits bit as "clear".
*/
-void ClearAllBits(ArenaBitVector* pBits)
+void ClearAllBits(ArenaBitVector* p_bits)
{
- unsigned int count = pBits->storageSize;
- memset(pBits->storage, 0, count * sizeof(uint32_t));
+ unsigned int count = p_bits->storage_size;
+ memset(p_bits->storage, 0, count * sizeof(uint32_t));
}
/*
@@ -420,32 +420,32 @@
*
* NOTE: memory is allocated from the compiler arena.
*/
-bool SetBit(CompilationUnit* cUnit, ArenaBitVector* pBits, unsigned int num)
+bool SetBit(CompilationUnit* cu, ArenaBitVector* p_bits, unsigned int num)
{
- if (num >= pBits->storageSize * sizeof(uint32_t) * 8) {
- if (!pBits->expandable) {
+ if (num >= p_bits->storage_size * sizeof(uint32_t) * 8) {
+ if (!p_bits->expandable) {
LOG(FATAL) << "Can't expand";
}
/* Round up to word boundaries for "num+1" bits */
- unsigned int newSize = (num + 1 + 31) >> 5;
- DCHECK_GT(newSize, pBits->storageSize);
- uint32_t *newStorage = static_cast<uint32_t*>(NewMem(cUnit, newSize * sizeof(uint32_t), false,
+ unsigned int new_size = (num + 1 + 31) >> 5;
+ DCHECK_GT(new_size, p_bits->storage_size);
+ uint32_t *new_storage = static_cast<uint32_t*>(NewMem(cu, new_size * sizeof(uint32_t), false,
kAllocGrowableBitMap));
- memcpy(newStorage, pBits->storage, pBits->storageSize * sizeof(uint32_t));
- memset(&newStorage[pBits->storageSize], 0,
- (newSize - pBits->storageSize) * sizeof(uint32_t));
+ memcpy(new_storage, p_bits->storage, p_bits->storage_size * sizeof(uint32_t));
+ memset(&new_storage[p_bits->storage_size], 0,
+ (new_size - p_bits->storage_size) * sizeof(uint32_t));
#ifdef WITH_MEMSTATS
- cUnit->mstats->bitMapWasted[pBits->kind] +=
- pBits->storageSize * sizeof(uint32_t);
- cUnit->mstats->bitMapSizes[pBits->kind] += newSize * sizeof(uint32_t);
- cUnit->mstats->bitMapGrows[pBits->kind]++;
+ cu->mstats->bit_map_wasted[p_bits->kind] +=
+ p_bits->storage_size * sizeof(uint32_t);
+ cu->mstats->bit_map_sizes[p_bits->kind] += new_size * sizeof(uint32_t);
+ cu->mstats->bit_map_grows[p_bits->kind]++;
#endif
- pBits->storage = newStorage;
- pBits->storageSize = newSize;
+ p_bits->storage = new_storage;
+ p_bits->storage_size = new_size;
}
- pBits->storage[num >> 5] |= checkMasks[num & 0x1f];
+ p_bits->storage[num >> 5] |= check_masks[num & 0x1f];
return true;
}
@@ -457,22 +457,22 @@
*
* NOTE: memory is allocated from the compiler arena.
*/
-bool ClearBit(ArenaBitVector* pBits, unsigned int num)
+bool ClearBit(ArenaBitVector* p_bits, unsigned int num)
{
- if (num >= pBits->storageSize * sizeof(uint32_t) * 8) {
+ if (num >= p_bits->storage_size * sizeof(uint32_t) * 8) {
LOG(FATAL) << "Attempt to clear a bit not set in the vector yet";;
}
- pBits->storage[num >> 5] &= ~checkMasks[num & 0x1f];
+ p_bits->storage[num >> 5] &= ~check_masks[num & 0x1f];
return true;
}
/* Initialize the iterator structure */
-void BitVectorIteratorInit(ArenaBitVector* pBits,
+void BitVectorIteratorInit(ArenaBitVector* p_bits,
ArenaBitVectorIterator* iterator)
{
- iterator->pBits = pBits;
- iterator->bitSize = pBits->storageSize * sizeof(uint32_t) * 8;
+ iterator->p_bits = p_bits;
+ iterator->bit_size = p_bits->storage_size * sizeof(uint32_t) * 8;
iterator->idx = 0;
}
@@ -481,9 +481,9 @@
*/
static void CheckSizes(const ArenaBitVector* bv1, const ArenaBitVector* bv2)
{
- if (bv1->storageSize != bv2->storageSize) {
- LOG(FATAL) << "Mismatched vector sizes (" << bv1->storageSize
- << ", " << bv2->storageSize << ")";
+ if (bv1->storage_size != bv2->storage_size) {
+ LOG(FATAL) << "Mismatched vector sizes (" << bv1->storage_size
+ << ", " << bv2->storage_size << ")";
}
}
@@ -496,7 +496,7 @@
/* if dest is expandable and < src, we could expand dest to match */
CheckSizes(dest, src);
- memcpy(dest->storage, src->storage, sizeof(uint32_t) * dest->storageSize);
+ memcpy(dest->storage, src->storage, sizeof(uint32_t) * dest->storage_size);
}
/*
@@ -508,14 +508,14 @@
{
DCHECK(src1 != NULL);
DCHECK(src2 != NULL);
- if (dest->storageSize != src1->storageSize ||
- dest->storageSize != src2->storageSize ||
+ if (dest->storage_size != src1->storage_size ||
+ dest->storage_size != src2->storage_size ||
dest->expandable != src1->expandable ||
dest->expandable != src2->expandable)
return false;
unsigned int idx;
- for (idx = 0; idx < dest->storageSize; idx++) {
+ for (idx = 0; idx < dest->storage_size; idx++) {
dest->storage[idx] = src1->storage[idx] & src2->storage[idx];
}
return true;
@@ -529,14 +529,14 @@
{
DCHECK(src1 != NULL);
DCHECK(src2 != NULL);
- if (dest->storageSize != src1->storageSize ||
- dest->storageSize != src2->storageSize ||
+ if (dest->storage_size != src1->storage_size ||
+ dest->storage_size != src2->storage_size ||
dest->expandable != src1->expandable ||
dest->expandable != src2->expandable)
return false;
unsigned int idx;
- for (idx = 0; idx < dest->storageSize; idx++) {
+ for (idx = 0; idx < dest->storage_size; idx++) {
dest->storage[idx] = src1->storage[idx] | src2->storage[idx];
}
return true;
@@ -548,8 +548,8 @@
bool TestBitVectors(const ArenaBitVector* src1,
const ArenaBitVector* src2)
{
- DCHECK_EQ(src1->storageSize, src2->storageSize);
- for (uint32_t idx = 0; idx < src1->storageSize; idx++) {
+ DCHECK_EQ(src1->storage_size, src2->storage_size);
+ for (uint32_t idx = 0; idx < src1->storage_size; idx++) {
if (src1->storage[idx] & src2->storage[idx]) return true;
}
return false;
@@ -561,12 +561,12 @@
bool CompareBitVectors(const ArenaBitVector* src1,
const ArenaBitVector* src2)
{
- if (src1->storageSize != src2->storageSize ||
+ if (src1->storage_size != src2->storage_size ||
src1->expandable != src2->expandable)
return true;
unsigned int idx;
- for (idx = 0; idx < src1->storageSize; idx++) {
+ for (idx = 0; idx < src1->storage_size; idx++) {
if (src1->storage[idx] != src2->storage[idx]) return true;
}
return false;
@@ -575,13 +575,13 @@
/*
* Count the number of bits that are set.
*/
-int CountSetBits(const ArenaBitVector* pBits)
+int CountSetBits(const ArenaBitVector* p_bits)
{
unsigned int word;
unsigned int count = 0;
- for (word = 0; word < pBits->storageSize; word++) {
- uint32_t val = pBits->storage[word];
+ for (word = 0; word < p_bits->storage_size; word++) {
+ uint32_t val = p_bits->storage[word];
if (val != 0) {
if (val == 0xffffffff) {
@@ -602,39 +602,39 @@
/* Return the next position set to 1. -1 means end-of-element reached */
int BitVectorIteratorNext(ArenaBitVectorIterator* iterator)
{
- ArenaBitVector* pBits = iterator->pBits;
- uint32_t bitIndex = iterator->idx;
- uint32_t bitSize = iterator->bitSize;
+ ArenaBitVector* p_bits = iterator->p_bits;
+ uint32_t bit_index = iterator->idx;
+ uint32_t bit_size = iterator->bit_size;
- DCHECK_EQ(bitSize, pBits->storageSize * sizeof(uint32_t) * 8);
+ DCHECK_EQ(bit_size, p_bits->storage_size * sizeof(uint32_t) * 8);
- if (bitIndex >= bitSize) return -1;
+ if (bit_index >= bit_size) return -1;
- uint32_t wordIndex = bitIndex >> 5;
- uint32_t endWordIndex = bitSize >> 5;
- uint32_t* storage = pBits->storage;
- uint32_t word = storage[wordIndex++];
+ uint32_t word_index = bit_index >> 5;
+ uint32_t end_word_index = bit_size >> 5;
+ uint32_t* storage = p_bits->storage;
+ uint32_t word = storage[word_index++];
// Mask out any bits in the first word we've already considered
- word &= ~((1 << (bitIndex & 0x1f))-1);
+ word &= ~((1 << (bit_index & 0x1f))-1);
- for (; wordIndex <= endWordIndex;) {
- uint32_t bitPos = bitIndex & 0x1f;
+ for (; word_index <= end_word_index;) {
+ uint32_t bit_pos = bit_index & 0x1f;
if (word == 0) {
- bitIndex += (32 - bitPos);
- word = storage[wordIndex++];
+ bit_index += (32 - bit_pos);
+ word = storage[word_index++];
continue;
}
- for (; bitPos < 32; bitPos++) {
- if (word & (1 << bitPos)) {
- iterator->idx = bitIndex + 1;
- return bitIndex;
+ for (; bit_pos < 32; bit_pos++) {
+ if (word & (1 << bit_pos)) {
+ iterator->idx = bit_index + 1;
+ return bit_index;
}
- bitIndex++;
+ bit_index++;
}
- word = storage[wordIndex++];
+ word = storage[word_index++];
}
- iterator->idx = iterator->bitSize;
+ iterator->idx = iterator->bit_size;
return -1;
}
@@ -643,22 +643,22 @@
* since there might be unused bits - setting those to one will confuse the
* iterator.
*/
-void SetInitialBits(ArenaBitVector* pBits, unsigned int numBits)
+void SetInitialBits(ArenaBitVector* p_bits, unsigned int num_bits)
{
unsigned int idx;
- DCHECK_LE(((numBits + 31) >> 5), pBits->storageSize);
- for (idx = 0; idx < (numBits >> 5); idx++) {
- pBits->storage[idx] = -1;
+ DCHECK_LE(((num_bits + 31) >> 5), p_bits->storage_size);
+ for (idx = 0; idx < (num_bits >> 5); idx++) {
+ p_bits->storage[idx] = -1;
}
- unsigned int remNumBits = numBits & 0x1f;
- if (remNumBits) {
- pBits->storage[idx] = (1 << remNumBits) - 1;
+ unsigned int rem_num_bits = num_bits & 0x1f;
+ if (rem_num_bits) {
+ p_bits->storage[idx] = (1 << rem_num_bits) - 1;
}
}
void GetBlockName(BasicBlock* bb, char* name)
{
- switch (bb->blockType) {
+ switch (bb->block_type) {
case kEntryBlock:
snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
break;
@@ -666,10 +666,10 @@
snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
break;
case kDalvikByteCode:
- snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->startOffset, bb->id);
+ snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
break;
case kExceptionHandling:
- snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->startOffset,
+ snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
bb->id);
break;
default:
@@ -678,10 +678,10 @@
}
}
-const char* GetShortyFromTargetIdx(CompilationUnit *cUnit, int targetIdx)
+const char* GetShortyFromTargetIdx(CompilationUnit *cu, int target_idx)
{
- const DexFile::MethodId& methodId = cUnit->dex_file->GetMethodId(targetIdx);
- return cUnit->dex_file->GetShorty(methodId.proto_idx_);
+ const DexFile::MethodId& method_id = cu->dex_file->GetMethodId(target_idx);
+ return cu->dex_file->GetShorty(method_id.proto_idx_);
}
} // namespace art
diff --git a/src/compiler/compiler_utility.h b/src/compiler/compiler_utility.h
index f498d35..027204b 100644
--- a/src/compiler/compiler_utility.h
+++ b/src/compiler/compiler_utility.h
@@ -28,7 +28,7 @@
#define ARENA_DEFAULT_SIZE ((2 * 1024 * 1024) - 256)
/* Type of allocation for memory tuning */
-enum oatAllocKind {
+enum oat_alloc_kind {
kAllocMisc,
kAllocBB,
kAllocLIR,
@@ -46,7 +46,7 @@
};
/* Type of growable list for memory tuning */
-enum oatListKind {
+enum oat_list_kind {
kListMisc = 0,
kListBlockList,
kListSSAtoDalvikMap,
@@ -63,7 +63,7 @@
};
/* Type of growable bitmap for memory tuning */
-enum oatBitMapKind {
+enum oat_bit_map_kind {
kBitMapMisc = 0,
kBitMapUse,
kBitMapDef,
@@ -84,31 +84,31 @@
};
/* Allocate the initial memory block for arena-based allocation */
-bool HeapInit(CompilationUnit* cUnit);
+bool HeapInit(CompilationUnit* cu);
/* Collect memory usage statistics */
//#define WITH_MEMSTATS
struct ArenaMemBlock {
- size_t blockSize;
- size_t bytesAllocated;
+ size_t block_size;
+ size_t bytes_allocated;
ArenaMemBlock *next;
char ptr[0];
};
-void* NewMem(CompilationUnit* cUnit, size_t size, bool zero, oatAllocKind kind);
+void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind);
-void ArenaReset(CompilationUnit *cUnit);
+void ArenaReset(CompilationUnit *cu);
struct GrowableList {
- GrowableList() : numAllocated(0), numUsed(0), elemList(NULL) {
+ GrowableList() : num_allocated(0), num_used(0), elem_list(NULL) {
}
- size_t numAllocated;
- size_t numUsed;
- uintptr_t* elemList;
+ size_t num_allocated;
+ size_t num_used;
+ uintptr_t* elem_list;
#ifdef WITH_MEMSTATS
- oatListKind kind;
+ oat_list_kind kind;
#endif
};
@@ -126,21 +126,21 @@
*/
struct ArenaBitVector {
bool expandable; /* expand bitmap if we run out? */
- uint32_t storageSize; /* current size, in 32-bit words */
+ uint32_t storage_size; /* current size, in 32-bit words */
uint32_t* storage;
#ifdef WITH_MEMSTATS
- oatBitMapKind kind; /* for memory use tuning */
+ oat_bit_map_kind kind; /* for memory use tuning */
#endif
};
/* Handy iterator to walk through the bit positions set to 1 */
struct ArenaBitVectorIterator {
- ArenaBitVector* pBits;
+ ArenaBitVector* p_bits;
uint32_t idx;
- uint32_t bitSize;
+ uint32_t bit_size;
};
-#define GET_ELEM_N(LIST, TYPE, N) ((reinterpret_cast<TYPE*>(LIST->elemList)[N]))
+#define GET_ELEM_N(LIST, TYPE, N) ((reinterpret_cast<TYPE*>(LIST->elem_list)[N]))
#define BLOCK_NAME_LEN 80
@@ -150,29 +150,29 @@
struct LIR;
struct RegLocation;
-void CompilerInitGrowableList(CompilationUnit* cUnit,GrowableList* gList,
- size_t initLength, oatListKind kind = kListMisc);
-void InsertGrowableList(CompilationUnit* cUnit, GrowableList* gList,
+void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
+ size_t init_length, oat_list_kind kind = kListMisc);
+void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list,
uintptr_t elem);
-void DeleteGrowableList(GrowableList* gList, uintptr_t elem);
-void GrowableListIteratorInit(GrowableList* gList,
+void DeleteGrowableList(GrowableList* g_list, uintptr_t elem);
+void GrowableListIteratorInit(GrowableList* g_list,
GrowableListIterator* iterator);
uintptr_t GrowableListIteratorNext(GrowableListIterator* iterator);
-uintptr_t GrowableListGetElement(const GrowableList* gList, size_t idx);
+uintptr_t GrowableListGetElement(const GrowableList* g_list, size_t idx);
-ArenaBitVector* AllocBitVector(CompilationUnit* cUnit,
- unsigned int startBits, bool expandable,
- oatBitMapKind = kBitMapMisc);
-void BitVectorIteratorInit(ArenaBitVector* pBits,
+ArenaBitVector* AllocBitVector(CompilationUnit* cu,
+ unsigned int start_bits, bool expandable,
+ oat_bit_map_kind = kBitMapMisc);
+void BitVectorIteratorInit(ArenaBitVector* p_bits,
ArenaBitVectorIterator* iterator);
int BitVectorIteratorNext(ArenaBitVectorIterator* iterator);
-bool SetBit(CompilationUnit *cUnit, ArenaBitVector* pBits, unsigned int num);
-bool ClearBit(ArenaBitVector* pBits, unsigned int num);
-void MarkAllBits(ArenaBitVector* pBits, bool set);
+bool SetBit(CompilationUnit *cu, ArenaBitVector* p_bits, unsigned int num);
+bool ClearBit(ArenaBitVector* p_bits, unsigned int num);
+void MarkAllBits(ArenaBitVector* p_bits, bool set);
void DebugBitVector(char* msg, const ArenaBitVector* bv, int length);
-bool IsBitSet(const ArenaBitVector* pBits, unsigned int num);
-void ClearAllBits(ArenaBitVector* pBits);
-void SetInitialBits(ArenaBitVector* pBits, unsigned int numBits);
+bool IsBitSet(const ArenaBitVector* p_bits, unsigned int num);
+void ClearAllBits(ArenaBitVector* p_bits);
+void SetInitialBits(ArenaBitVector* p_bits, unsigned int num_bits);
void CopyBitVector(ArenaBitVector* dest, const ArenaBitVector* src);
bool IntersectBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
const ArenaBitVector* src2);
@@ -181,15 +181,15 @@
bool CompareBitVectors(const ArenaBitVector* src1,
const ArenaBitVector* src2);
bool TestBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
-int CountSetBits(const ArenaBitVector* pBits);
+int CountSetBits(const ArenaBitVector* p_bits);
-void DumpLIRInsn(CompilationUnit* cUnit, LIR* lir, unsigned char* baseAddr);
+void DumpLIRInsn(CompilationUnit* cu, LIR* lir, unsigned char* base_addr);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void DumpBlockBitVector(const GrowableList* blocks, char* msg,
const ArenaBitVector* bv, int length);
void GetBlockName(BasicBlock* bb, char* name);
const char* GetShortyFromTargetIdx(CompilationUnit*, int);
-void DumpMemStats(CompilationUnit* cUnit);
+void DumpMemStats(CompilationUnit* cu);
} // namespace art
diff --git a/src/compiler/dataflow.cc b/src/compiler/dataflow.cc
index 35de6e7..2bca167 100644
--- a/src/compiler/dataflow.cc
+++ b/src/compiler/dataflow.cc
@@ -28,7 +28,7 @@
* TODO - many optimization flags are incomplete - they will only limit the
* scope of optimizations but will not cause mis-optimizations.
*/
-const int oatDataFlowAttributes[kMirOpLast] = {
+const int oat_data_flow_attributes[kMirOpLast] = {
// 00 NOP
DF_NOP,
@@ -836,31 +836,31 @@
};
/* Return the base virtual register for a SSA name */
-int SRegToVReg(const CompilationUnit* cUnit, int ssaReg)
+int SRegToVReg(const CompilationUnit* cu, int ssa_reg)
{
- DCHECK_LT(ssaReg, static_cast<int>(cUnit->ssaBaseVRegs->numUsed));
- return GET_ELEM_N(cUnit->ssaBaseVRegs, int, ssaReg);
+ DCHECK_LT(ssa_reg, static_cast<int>(cu->ssa_base_vregs->num_used));
+ return GET_ELEM_N(cu->ssa_base_vregs, int, ssa_reg);
}
-int SRegToSubscript(const CompilationUnit* cUnit, int ssaReg)
+int SRegToSubscript(const CompilationUnit* cu, int ssa_reg)
{
- DCHECK(ssaReg < static_cast<int>(cUnit->ssaSubscripts->numUsed));
- return GET_ELEM_N(cUnit->ssaSubscripts, int, ssaReg);
+ DCHECK(ssa_reg < static_cast<int>(cu->ssa_subscripts->num_used));
+ return GET_ELEM_N(cu->ssa_subscripts, int, ssa_reg);
}
-static int GetSSAUseCount(CompilationUnit* cUnit, int sReg)
+static int GetSSAUseCount(CompilationUnit* cu, int s_reg)
{
- DCHECK(sReg < static_cast<int>(cUnit->rawUseCounts.numUsed));
- return cUnit->rawUseCounts.elemList[sReg];
+ DCHECK(s_reg < static_cast<int>(cu->raw_use_counts.num_used));
+ return cu->raw_use_counts.elem_list[s_reg];
}
-char* GetDalvikDisassembly(CompilationUnit* cUnit,
+char* GetDalvikDisassembly(CompilationUnit* cu,
const DecodedInstruction& insn, const char* note)
{
std::string str;
int opcode = insn.opcode;
- int dfAttributes = oatDataFlowAttributes[opcode];
+ int df_attributes = oat_data_flow_attributes[opcode];
int flags;
char* ret;
@@ -884,9 +884,9 @@
/* For branches, decode the instructions to print out the branch targets */
if (flags & Instruction::kBranch) {
- Instruction::Format dalvikFormat = Instruction::FormatOf(insn.opcode);
+ Instruction::Format dalvik_format = Instruction::FormatOf(insn.opcode);
int offset = 0;
- switch (dalvikFormat) {
+ switch (dalvik_format) {
case Instruction::k21t:
str.append(StringPrintf(" v%d,", insn.vA));
offset = insn.vB;
@@ -901,56 +901,56 @@
offset = insn.vA;
break;
default:
- LOG(FATAL) << "Unexpected branch format " << dalvikFormat << " from " << insn.opcode;
+ LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
}
str.append(StringPrintf(" (%c%x)",
offset > 0 ? '+' : '-',
offset > 0 ? offset : -offset));
- } else if (dfAttributes & DF_FORMAT_35C) {
+ } else if (df_attributes & DF_FORMAT_35C) {
unsigned int i;
for (i = 0; i < insn.vA; i++) {
if (i != 0) str.append(",");
str.append(StringPrintf(" v%d", insn.arg[i]));
}
}
- else if (dfAttributes & DF_FORMAT_3RC) {
+ else if (df_attributes & DF_FORMAT_3RC) {
str.append(StringPrintf(" v%d..v%d", insn.vC, insn.vC + insn.vA - 1));
} else {
- if (dfAttributes & DF_A_IS_REG) {
+ if (df_attributes & DF_A_IS_REG) {
str.append(StringPrintf(" v%d", insn.vA));
}
- if (dfAttributes & DF_B_IS_REG) {
+ if (df_attributes & DF_B_IS_REG) {
str.append(StringPrintf(", v%d", insn.vB));
} else if (static_cast<int>(opcode) < static_cast<int>(kMirOpFirst)) {
str.append(StringPrintf(", (#%d)", insn.vB));
}
- if (dfAttributes & DF_C_IS_REG) {
+ if (df_attributes & DF_C_IS_REG) {
str.append(StringPrintf(", v%d", insn.vC));
} else if (static_cast<int>(opcode) < static_cast<int>(kMirOpFirst)) {
str.append(StringPrintf(", (#%d)", insn.vC));
}
}
int length = str.length() + 1;
- ret = static_cast<char*>(NewMem(cUnit, length, false, kAllocDFInfo));
+ ret = static_cast<char*>(NewMem(cu, length, false, kAllocDFInfo));
strncpy(ret, str.c_str(), length);
return ret;
}
-static std::string GetSSAName(const CompilationUnit* cUnit, int ssaReg)
+static std::string GetSSAName(const CompilationUnit* cu, int ssa_reg)
{
- return StringPrintf("v%d_%d", SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg));
+ return StringPrintf("v%d_%d", SRegToVReg(cu, ssa_reg),
+ SRegToSubscript(cu, ssa_reg));
}
/*
* Dalvik instruction disassembler with optional SSA printing.
*/
-char* FullDisassembler(CompilationUnit* cUnit, const MIR* mir)
+char* FullDisassembler(CompilationUnit* cu, const MIR* mir)
{
std::string str;
const DecodedInstruction* insn = &mir->dalvikInsn;
int opcode = insn->opcode;
- int dfAttributes = oatDataFlowAttributes[opcode];
+ int df_attributes = oat_data_flow_attributes[opcode];
char* ret;
int length;
@@ -958,19 +958,19 @@
if (opcode == kMirOpPhi) {
int* incoming = reinterpret_cast<int*>(mir->dalvikInsn.vB);
str.append(StringPrintf("PHI %s = (%s",
- GetSSAName(cUnit, mir->ssaRep->defs[0]).c_str(),
- GetSSAName(cUnit, mir->ssaRep->uses[0]).c_str()));
+ GetSSAName(cu, mir->ssa_rep->defs[0]).c_str(),
+ GetSSAName(cu, mir->ssa_rep->uses[0]).c_str()));
str.append(StringPrintf(":%d",incoming[0]));
int i;
- for (i = 1; i < mir->ssaRep->numUses; i++) {
+ for (i = 1; i < mir->ssa_rep->num_uses; i++) {
str.append(StringPrintf(", %s:%d",
- GetSSAName(cUnit, mir->ssaRep->uses[i]).c_str(),
+ GetSSAName(cu, mir->ssa_rep->uses[i]).c_str(),
incoming[i]));
}
str.append(")");
} else if (opcode == kMirOpCheck) {
str.append("Check ");
- str.append(Instruction::Name(mir->meta.throwInsn->dalvikInsn.opcode));
+ str.append(Instruction::Name(mir->meta.throw_insn->dalvikInsn.opcode));
} else if (opcode == kMirOpNop) {
str.append("MirNop");
} else {
@@ -983,18 +983,18 @@
/* For branches, decode the instructions to print out the branch targets */
if (Instruction::FlagsOf(insn->opcode) & Instruction::kBranch) {
- Instruction::Format dalvikFormat = Instruction::FormatOf(insn->opcode);
+ Instruction::Format dalvik_format = Instruction::FormatOf(insn->opcode);
int delta = 0;
- switch (dalvikFormat) {
+ switch (dalvik_format) {
case Instruction::k21t:
str.append(StringPrintf(" %s, ",
- GetSSAName(cUnit, mir->ssaRep->uses[0]).c_str()));
+ GetSSAName(cu, mir->ssa_rep->uses[0]).c_str()));
delta = insn->vB;
break;
case Instruction::k22t:
str.append(StringPrintf(" %s, %s, ",
- GetSSAName(cUnit, mir->ssaRep->uses[0]).c_str(),
- GetSSAName(cUnit, mir->ssaRep->uses[1]).c_str()));
+ GetSSAName(cu, mir->ssa_rep->uses[0]).c_str(),
+ GetSSAName(cu, mir->ssa_rep->uses[1]).c_str()));
delta = insn->vC;
break;
case Instruction::k10t:
@@ -1003,38 +1003,38 @@
delta = insn->vA;
break;
default:
- LOG(FATAL) << "Unexpected branch format: " << dalvikFormat;
+ LOG(FATAL) << "Unexpected branch format: " << dalvik_format;
}
str.append(StringPrintf(" %04x", mir->offset + delta));
- } else if (dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) {
+ } else if (df_attributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) {
unsigned int i;
for (i = 0; i < insn->vA; i++) {
if (i != 0) str.append(",");
str.append(" ");
- str.append(GetSSAName(cUnit, mir->ssaRep->uses[i]));
+ str.append(GetSSAName(cu, mir->ssa_rep->uses[i]));
}
} else {
- int udIdx;
- if (mir->ssaRep->numDefs) {
+ int ud_idx;
+ if (mir->ssa_rep->num_defs) {
- for (udIdx = 0; udIdx < mir->ssaRep->numDefs; udIdx++) {
+ for (ud_idx = 0; ud_idx < mir->ssa_rep->num_defs; ud_idx++) {
str.append(" ");
- str.append(GetSSAName(cUnit, mir->ssaRep->defs[udIdx]));
+ str.append(GetSSAName(cu, mir->ssa_rep->defs[ud_idx]));
}
str.append(",");
}
- if (mir->ssaRep->numUses) {
+ if (mir->ssa_rep->num_uses) {
/* No leading ',' for the first use */
str.append(" ");
- str.append(GetSSAName(cUnit, mir->ssaRep->uses[0]));
- for (udIdx = 1; udIdx < mir->ssaRep->numUses; udIdx++) {
+ str.append(GetSSAName(cu, mir->ssa_rep->uses[0]));
+ for (ud_idx = 1; ud_idx < mir->ssa_rep->num_uses; ud_idx++) {
str.append(", ");
- str.append(GetSSAName(cUnit, mir->ssaRep->uses[udIdx]));
+ str.append(GetSSAName(cu, mir->ssa_rep->uses[ud_idx]));
}
}
if (static_cast<int>(opcode) < static_cast<int>(kMirOpFirst)) {
- Instruction::Format dalvikFormat = Instruction::FormatOf(insn->opcode);
- switch (dalvikFormat) {
+ Instruction::Format dalvik_format = Instruction::FormatOf(insn->opcode);
+ switch (dalvik_format) {
case Instruction::k11n: // op vA, #+B
case Instruction::k21s: // op vAA, #+BBBB
case Instruction::k21h: // op vAA, #+BBBB00000[00000000]
@@ -1062,198 +1062,198 @@
done:
length = str.length() + 1;
- ret = static_cast<char*>(NewMem(cUnit, length, false, kAllocDFInfo));
+ ret = static_cast<char*>(NewMem(cu, length, false, kAllocDFInfo));
strncpy(ret, str.c_str(), length);
return ret;
}
-char* GetSSAString(CompilationUnit* cUnit, SSARepresentation* ssaRep)
+char* GetSSAString(CompilationUnit* cu, SSARepresentation* ssa_rep)
{
std::string str;
char* ret;
int i;
- for (i = 0; i < ssaRep->numDefs; i++) {
- int ssaReg = ssaRep->defs[i];
- str.append(StringPrintf("s%d(v%d_%d) ", ssaReg,
- SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg)));
+ for (i = 0; i < ssa_rep->num_defs; i++) {
+ int ssa_reg = ssa_rep->defs[i];
+ str.append(StringPrintf("s%d(v%d_%d) ", ssa_reg,
+ SRegToVReg(cu, ssa_reg),
+ SRegToSubscript(cu, ssa_reg)));
}
- if (ssaRep->numDefs) {
+ if (ssa_rep->num_defs) {
str.append("<- ");
}
- for (i = 0; i < ssaRep->numUses; i++) {
- int ssaReg = ssaRep->uses[i];
- str.append(StringPrintf("s%d(v%d_%d) ", ssaReg, SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg)));
+ for (i = 0; i < ssa_rep->num_uses; i++) {
+ int ssa_reg = ssa_rep->uses[i];
+ str.append(StringPrintf("s%d(v%d_%d) ", ssa_reg, SRegToVReg(cu, ssa_reg),
+ SRegToSubscript(cu, ssa_reg)));
}
int length = str.length() + 1;
- ret = static_cast<char*>(NewMem(cUnit, length, false, kAllocDFInfo));
+ ret = static_cast<char*>(NewMem(cu, length, false, kAllocDFInfo));
strncpy(ret, str.c_str(), length);
return ret;
}
/* Any register that is used before being defined is considered live-in */
-static void HandleLiveInUse(CompilationUnit* cUnit, ArenaBitVector* useV, ArenaBitVector* defV,
- ArenaBitVector* liveInV, int dalvikRegId)
+static void HandleLiveInUse(CompilationUnit* cu, ArenaBitVector* use_v, ArenaBitVector* def_v,
+ ArenaBitVector* live_in_v, int dalvik_reg_id)
{
- SetBit(cUnit, useV, dalvikRegId);
- if (!IsBitSet(defV, dalvikRegId)) {
- SetBit(cUnit, liveInV, dalvikRegId);
+ SetBit(cu, use_v, dalvik_reg_id);
+ if (!IsBitSet(def_v, dalvik_reg_id)) {
+ SetBit(cu, live_in_v, dalvik_reg_id);
}
}
/* Mark a reg as being defined */
-static void HandleDef(CompilationUnit* cUnit, ArenaBitVector* defV, int dalvikRegId)
+static void HandleDef(CompilationUnit* cu, ArenaBitVector* def_v, int dalvik_reg_id)
{
- SetBit(cUnit, defV, dalvikRegId);
+ SetBit(cu, def_v, dalvik_reg_id);
}
/*
* Find out live-in variables for natural loops. Variables that are live-in in
* the main loop body are considered to be defined in the entry block.
*/
-bool FindLocalLiveIn(CompilationUnit* cUnit, BasicBlock* bb)
+bool FindLocalLiveIn(CompilationUnit* cu, BasicBlock* bb)
{
MIR* mir;
- ArenaBitVector *useV, *defV, *liveInV;
+ ArenaBitVector *use_v, *def_v, *live_in_v;
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->data_flow_info == NULL) return false;
- useV = bb->dataFlowInfo->useV =
- AllocBitVector(cUnit, cUnit->numDalvikRegisters, false, kBitMapUse);
- defV = bb->dataFlowInfo->defV =
- AllocBitVector(cUnit, cUnit->numDalvikRegisters, false, kBitMapDef);
- liveInV = bb->dataFlowInfo->liveInV =
- AllocBitVector(cUnit, cUnit->numDalvikRegisters, false,
+ use_v = bb->data_flow_info->use_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false, kBitMapUse);
+ def_v = bb->data_flow_info->def_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false, kBitMapDef);
+ live_in_v = bb->data_flow_info->live_in_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false,
kBitMapLiveIn);
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
- DecodedInstruction *dInsn = &mir->dalvikInsn;
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
- if (dfAttributes & DF_HAS_USES) {
- if (dfAttributes & DF_UA) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA);
- if (dfAttributes & DF_A_WIDE) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA+1);
+ if (df_attributes & DF_HAS_USES) {
+ if (df_attributes & DF_UA) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vA);
+ if (df_attributes & DF_A_WIDE) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vA+1);
}
}
- if (dfAttributes & DF_UB) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB);
- if (dfAttributes & DF_B_WIDE) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB+1);
+ if (df_attributes & DF_UB) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vB);
+ if (df_attributes & DF_B_WIDE) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vB+1);
}
}
- if (dfAttributes & DF_UC) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC);
- if (dfAttributes & DF_C_WIDE) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC+1);
+ if (df_attributes & DF_UC) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC);
+ if (df_attributes & DF_C_WIDE) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC+1);
}
}
}
- if (dfAttributes & DF_FORMAT_35C) {
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->arg[i]);
+ if (df_attributes & DF_FORMAT_35C) {
+ for (unsigned int i = 0; i < d_insn->vA; i++) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->arg[i]);
}
}
- if (dfAttributes & DF_FORMAT_3RC) {
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- HandleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC+i);
+ if (df_attributes & DF_FORMAT_3RC) {
+ for (unsigned int i = 0; i < d_insn->vA; i++) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC+i);
}
}
- if (dfAttributes & DF_HAS_DEFS) {
- HandleDef(cUnit, defV, dInsn->vA);
- if (dfAttributes & DF_A_WIDE) {
- HandleDef(cUnit, defV, dInsn->vA+1);
+ if (df_attributes & DF_HAS_DEFS) {
+ HandleDef(cu, def_v, d_insn->vA);
+ if (df_attributes & DF_A_WIDE) {
+ HandleDef(cu, def_v, d_insn->vA+1);
}
}
}
return true;
}
-static int AddNewSReg(CompilationUnit* cUnit, int vReg)
+static int AddNewSReg(CompilationUnit* cu, int v_reg)
{
// Compiler temps always have a subscript of 0
- int subscript = (vReg < 0) ? 0 : ++cUnit->SSALastDefs[vReg];
- int ssaReg = cUnit->numSSARegs++;
- InsertGrowableList(cUnit, cUnit->ssaBaseVRegs, vReg);
- InsertGrowableList(cUnit, cUnit->ssaSubscripts, subscript);
- std::string ssaName = GetSSAName(cUnit, ssaReg);
- char* name = static_cast<char*>(NewMem(cUnit, ssaName.length() + 1, false, kAllocDFInfo));
- strncpy(name, ssaName.c_str(), ssaName.length() + 1);
- InsertGrowableList(cUnit, cUnit->ssaStrings, reinterpret_cast<uintptr_t>(name));
- DCHECK_EQ(cUnit->ssaBaseVRegs->numUsed, cUnit->ssaSubscripts->numUsed);
- return ssaReg;
+ int subscript = (v_reg < 0) ? 0 : ++cu->ssa_last_defs[v_reg];
+ int ssa_reg = cu->num_ssa_regs++;
+ InsertGrowableList(cu, cu->ssa_base_vregs, v_reg);
+ InsertGrowableList(cu, cu->ssa_subscripts, subscript);
+ std::string ssa_name = GetSSAName(cu, ssa_reg);
+ char* name = static_cast<char*>(NewMem(cu, ssa_name.length() + 1, false, kAllocDFInfo));
+ strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
+ InsertGrowableList(cu, cu->ssa_strings, reinterpret_cast<uintptr_t>(name));
+ DCHECK_EQ(cu->ssa_base_vregs->num_used, cu->ssa_subscripts->num_used);
+ return ssa_reg;
}
/* Find out the latest SSA register for a given Dalvik register */
-static void HandleSSAUse(CompilationUnit* cUnit, int* uses, int dalvikReg, int regIndex)
+static void HandleSSAUse(CompilationUnit* cu, int* uses, int dalvik_reg, int reg_index)
{
- DCHECK((dalvikReg >= 0) && (dalvikReg < cUnit->numDalvikRegisters));
- uses[regIndex] = cUnit->vRegToSSAMap[dalvikReg];
+ DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu->num_dalvik_registers));
+ uses[reg_index] = cu->vreg_to_ssa_map[dalvik_reg];
}
/* Setup a new SSA register for a given Dalvik register */
-static void HandleSSADef(CompilationUnit* cUnit, int* defs, int dalvikReg, int regIndex)
+static void HandleSSADef(CompilationUnit* cu, int* defs, int dalvik_reg, int reg_index)
{
- DCHECK((dalvikReg >= 0) && (dalvikReg < cUnit->numDalvikRegisters));
- int ssaReg = AddNewSReg(cUnit, dalvikReg);
- cUnit->vRegToSSAMap[dalvikReg] = ssaReg;
- defs[regIndex] = ssaReg;
+ DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu->num_dalvik_registers));
+ int ssa_reg = AddNewSReg(cu, dalvik_reg);
+ cu->vreg_to_ssa_map[dalvik_reg] = ssa_reg;
+ defs[reg_index] = ssa_reg;
}
/* Look up new SSA names for format_35c instructions */
-static void DataFlowSSAFormat35C(CompilationUnit* cUnit, MIR* mir)
+static void DataFlowSSAFormat35C(CompilationUnit* cu, MIR* mir)
{
- DecodedInstruction *dInsn = &mir->dalvikInsn;
- int numUses = dInsn->vA;
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
+ int num_uses = d_insn->vA;
int i;
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses = static_cast<int*>(NewMem(cUnit, sizeof(int) * numUses, true, kAllocDFInfo));
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, true, kAllocDFInfo));
// NOTE: will be filled in during type & size inference pass
- mir->ssaRep->fpUse = static_cast<bool*>(NewMem(cUnit, sizeof(bool) * numUses, true,
+ mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true,
kAllocDFInfo));
- for (i = 0; i < numUses; i++) {
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->arg[i], i);
+ for (i = 0; i < num_uses; i++) {
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->arg[i], i);
}
}
/* Look up new SSA names for format_3rc instructions */
-static void DataFlowSSAFormat3RC(CompilationUnit* cUnit, MIR* mir)
+static void DataFlowSSAFormat3RC(CompilationUnit* cu, MIR* mir)
{
- DecodedInstruction *dInsn = &mir->dalvikInsn;
- int numUses = dInsn->vA;
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
+ int num_uses = d_insn->vA;
int i;
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses = static_cast<int*>(NewMem(cUnit, sizeof(int) * numUses, true, kAllocDFInfo));
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, true, kAllocDFInfo));
// NOTE: will be filled in during type & size inference pass
- mir->ssaRep->fpUse = static_cast<bool*>(NewMem(cUnit, sizeof(bool) * numUses, true,
+ mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true,
kAllocDFInfo));
- for (i = 0; i < numUses; i++) {
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC+i, i);
+ for (i = 0; i < num_uses; i++) {
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC+i, i);
}
}
/* Entry function to convert a block into SSA representation */
-bool DoSSAConversion(CompilationUnit* cUnit, BasicBlock* bb)
+bool DoSSAConversion(CompilationUnit* cu, BasicBlock* bb)
{
MIR* mir;
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->data_flow_info == NULL) return false;
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- mir->ssaRep = static_cast<struct SSARepresentation *>(NewMem(cUnit, sizeof(SSARepresentation),
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+ mir->ssa_rep = static_cast<struct SSARepresentation *>(NewMem(cu, sizeof(SSARepresentation),
true, kAllocDFInfo));
- int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
// If not a pseudo-op, note non-leaf or can throw
if (static_cast<int>(mir->dalvikInsn.opcode) <
@@ -1261,189 +1261,189 @@
int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
if (flags & Instruction::kThrow) {
- cUnit->attrs &= ~METHOD_IS_THROW_FREE;
+ cu->attrs &= ~METHOD_IS_THROW_FREE;
}
if (flags & Instruction::kInvoke) {
- cUnit->attrs &= ~METHOD_IS_LEAF;
+ cu->attrs &= ~METHOD_IS_LEAF;
}
}
- int numUses = 0;
+ int num_uses = 0;
- if (dfAttributes & DF_FORMAT_35C) {
- DataFlowSSAFormat35C(cUnit, mir);
+ if (df_attributes & DF_FORMAT_35C) {
+ DataFlowSSAFormat35C(cu, mir);
continue;
}
- if (dfAttributes & DF_FORMAT_3RC) {
- DataFlowSSAFormat3RC(cUnit, mir);
+ if (df_attributes & DF_FORMAT_3RC) {
+ DataFlowSSAFormat3RC(cu, mir);
continue;
}
- if (dfAttributes & DF_HAS_USES) {
- if (dfAttributes & DF_UA) {
- numUses++;
- if (dfAttributes & DF_A_WIDE) {
- numUses ++;
+ if (df_attributes & DF_HAS_USES) {
+ if (df_attributes & DF_UA) {
+ num_uses++;
+ if (df_attributes & DF_A_WIDE) {
+ num_uses ++;
}
}
- if (dfAttributes & DF_UB) {
- numUses++;
- if (dfAttributes & DF_B_WIDE) {
- numUses ++;
+ if (df_attributes & DF_UB) {
+ num_uses++;
+ if (df_attributes & DF_B_WIDE) {
+ num_uses ++;
}
}
- if (dfAttributes & DF_UC) {
- numUses++;
- if (dfAttributes & DF_C_WIDE) {
- numUses ++;
+ if (df_attributes & DF_UC) {
+ num_uses++;
+ if (df_attributes & DF_C_WIDE) {
+ num_uses ++;
}
}
}
- if (numUses) {
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses = static_cast<int*>(NewMem(cUnit, sizeof(int) * numUses, false,
+ if (num_uses) {
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false,
kAllocDFInfo));
- mir->ssaRep->fpUse = static_cast<bool*>(NewMem(cUnit, sizeof(bool) * numUses, false,
+ mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, false,
kAllocDFInfo));
}
- int numDefs = 0;
+ int num_defs = 0;
- if (dfAttributes & DF_HAS_DEFS) {
- numDefs++;
- if (dfAttributes & DF_A_WIDE) {
- numDefs++;
+ if (df_attributes & DF_HAS_DEFS) {
+ num_defs++;
+ if (df_attributes & DF_A_WIDE) {
+ num_defs++;
}
}
- if (numDefs) {
- mir->ssaRep->numDefs = numDefs;
- mir->ssaRep->defs = static_cast<int*>(NewMem(cUnit, sizeof(int) * numDefs, false,
+ if (num_defs) {
+ mir->ssa_rep->num_defs = num_defs;
+ mir->ssa_rep->defs = static_cast<int*>(NewMem(cu, sizeof(int) * num_defs, false,
kAllocDFInfo));
- mir->ssaRep->fpDef = static_cast<bool*>(NewMem(cUnit, sizeof(bool) * numDefs, false,
+ mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_defs, false,
kAllocDFInfo));
}
- DecodedInstruction *dInsn = &mir->dalvikInsn;
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
- if (dfAttributes & DF_HAS_USES) {
- numUses = 0;
- if (dfAttributes & DF_UA) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA, numUses++);
- if (dfAttributes & DF_A_WIDE) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA+1, numUses++);
+ if (df_attributes & DF_HAS_USES) {
+ num_uses = 0;
+ if (df_attributes & DF_UA) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vA, num_uses++);
+ if (df_attributes & DF_A_WIDE) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
}
}
- if (dfAttributes & DF_UB) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB, numUses++);
- if (dfAttributes & DF_B_WIDE) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB+1, numUses++);
+ if (df_attributes & DF_UB) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vB, num_uses++);
+ if (df_attributes & DF_B_WIDE) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
}
}
- if (dfAttributes & DF_UC) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC, numUses++);
- if (dfAttributes & DF_C_WIDE) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
- HandleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC+1, numUses++);
+ if (df_attributes & DF_UC) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC, num_uses++);
+ if (df_attributes & DF_C_WIDE) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
}
}
}
- if (dfAttributes & DF_HAS_DEFS) {
- mir->ssaRep->fpDef[0] = dfAttributes & DF_FP_A;
- HandleSSADef(cUnit, mir->ssaRep->defs, dInsn->vA, 0);
- if (dfAttributes & DF_A_WIDE) {
- mir->ssaRep->fpDef[1] = dfAttributes & DF_FP_A;
- HandleSSADef(cUnit, mir->ssaRep->defs, dInsn->vA+1, 1);
+ if (df_attributes & DF_HAS_DEFS) {
+ mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
+ HandleSSADef(cu, mir->ssa_rep->defs, d_insn->vA, 0);
+ if (df_attributes & DF_A_WIDE) {
+ mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
+ HandleSSADef(cu, mir->ssa_rep->defs, d_insn->vA+1, 1);
}
}
}
- if (!cUnit->disableDataflow) {
+ if (!cu->disable_dataflow) {
/*
* Take a snapshot of Dalvik->SSA mapping at the end of each block. The
* input to PHI nodes can be derived from the snapshot of all
* predecessor blocks.
*/
- bb->dataFlowInfo->vRegToSSAMap =
- static_cast<int*>(NewMem(cUnit, sizeof(int) * cUnit->numDalvikRegisters, false,
+ bb->data_flow_info->vreg_to_ssa_map =
+ static_cast<int*>(NewMem(cu, sizeof(int) * cu->num_dalvik_registers, false,
kAllocDFInfo));
- memcpy(bb->dataFlowInfo->vRegToSSAMap, cUnit->vRegToSSAMap,
- sizeof(int) * cUnit->numDalvikRegisters);
+ memcpy(bb->data_flow_info->vreg_to_ssa_map, cu->vreg_to_ssa_map,
+ sizeof(int) * cu->num_dalvik_registers);
}
return true;
}
/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
-static void SetConstant(CompilationUnit* cUnit, int ssaReg, int value)
+static void SetConstant(CompilationUnit* cu, int ssa_reg, int value)
{
- SetBit(cUnit, cUnit->isConstantV, ssaReg);
- cUnit->constantValues[ssaReg] = value;
+ SetBit(cu, cu->is_constant_v, ssa_reg);
+ cu->constant_values[ssa_reg] = value;
}
-bool DoConstantPropogation(CompilationUnit* cUnit, BasicBlock* bb)
+bool DoConstantPropogation(CompilationUnit* cu, BasicBlock* bb)
{
MIR* mir;
- ArenaBitVector *isConstantV = cUnit->isConstantV;
+ ArenaBitVector *is_constant_v = cu->is_constant_v;
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
- DecodedInstruction *dInsn = &mir->dalvikInsn;
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
- if (!(dfAttributes & DF_HAS_DEFS)) continue;
+ if (!(df_attributes & DF_HAS_DEFS)) continue;
/* Handle instructions that set up constants directly */
- if (dfAttributes & DF_SETS_CONST) {
- if (dfAttributes & DF_DA) {
- switch (dInsn->opcode) {
+ if (df_attributes & DF_SETS_CONST) {
+ if (df_attributes & DF_DA) {
+ switch (d_insn->opcode) {
case Instruction::CONST_4:
case Instruction::CONST_16:
case Instruction::CONST:
- SetConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB);
+ SetConstant(cu, mir->ssa_rep->defs[0], d_insn->vB);
break;
case Instruction::CONST_HIGH16:
- SetConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB << 16);
+ SetConstant(cu, mir->ssa_rep->defs[0], d_insn->vB << 16);
break;
case Instruction::CONST_WIDE_16:
case Instruction::CONST_WIDE_32:
- SetConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB);
- SetConstant(cUnit, mir->ssaRep->defs[1], 0);
+ SetConstant(cu, mir->ssa_rep->defs[0], d_insn->vB);
+ SetConstant(cu, mir->ssa_rep->defs[1], 0);
break;
case Instruction::CONST_WIDE:
- SetConstant(cUnit, mir->ssaRep->defs[0], static_cast<int>(dInsn->vB_wide));
- SetConstant(cUnit, mir->ssaRep->defs[1], static_cast<int>(dInsn->vB_wide >> 32));
+ SetConstant(cu, mir->ssa_rep->defs[0], static_cast<int>(d_insn->vB_wide));
+ SetConstant(cu, mir->ssa_rep->defs[1], static_cast<int>(d_insn->vB_wide >> 32));
break;
case Instruction::CONST_WIDE_HIGH16:
- SetConstant(cUnit, mir->ssaRep->defs[0], 0);
- SetConstant(cUnit, mir->ssaRep->defs[1], dInsn->vB << 16);
+ SetConstant(cu, mir->ssa_rep->defs[0], 0);
+ SetConstant(cu, mir->ssa_rep->defs[1], d_insn->vB << 16);
break;
default:
break;
}
}
/* Handle instructions that set up constants directly */
- } else if (dfAttributes & DF_IS_MOVE) {
+ } else if (df_attributes & DF_IS_MOVE) {
int i;
- for (i = 0; i < mir->ssaRep->numUses; i++) {
- if (!IsBitSet(isConstantV, mir->ssaRep->uses[i])) break;
+ for (i = 0; i < mir->ssa_rep->num_uses; i++) {
+ if (!IsBitSet(is_constant_v, mir->ssa_rep->uses[i])) break;
}
/* Move a register holding a constant to another register */
- if (i == mir->ssaRep->numUses) {
- SetConstant(cUnit, mir->ssaRep->defs[0],
- cUnit->constantValues[mir->ssaRep->uses[0]]);
- if (dfAttributes & DF_A_WIDE) {
- SetConstant(cUnit, mir->ssaRep->defs[1],
- cUnit->constantValues[mir->ssaRep->uses[1]]);
+ if (i == mir->ssa_rep->num_uses) {
+ SetConstant(cu, mir->ssa_rep->defs[0],
+ cu->constant_values[mir->ssa_rep->uses[0]]);
+ if (df_attributes & DF_A_WIDE) {
+ SetConstant(cu, mir->ssa_rep->defs[1],
+ cu->constant_values[mir->ssa_rep->uses[1]]);
}
}
}
@@ -1453,128 +1453,128 @@
}
/* Setup the basic data structures for SSA conversion */
-void CompilerInitializeSSAConversion(CompilationUnit* cUnit)
+void CompilerInitializeSSAConversion(CompilationUnit* cu)
{
int i;
- int numDalvikReg = cUnit->numDalvikRegisters;
+ int num_dalvik_reg = cu->num_dalvik_registers;
- cUnit->ssaBaseVRegs =
- static_cast<GrowableList*>(NewMem(cUnit, sizeof(GrowableList), false, kAllocDFInfo));
- cUnit->ssaSubscripts =
- static_cast<GrowableList*>(NewMem(cUnit, sizeof(GrowableList), false, kAllocDFInfo));
- cUnit->ssaStrings =
- static_cast<GrowableList*>(NewMem(cUnit, sizeof(GrowableList), false, kAllocDFInfo));
+ cu->ssa_base_vregs =
+ static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
+ cu->ssa_subscripts =
+ static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
+ cu->ssa_strings =
+ static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
// Create the ssa mappings, estimating the max size
- CompilerInitGrowableList(cUnit, cUnit->ssaBaseVRegs,
- numDalvikReg + cUnit->defCount + 128,
+ CompilerInitGrowableList(cu, cu->ssa_base_vregs,
+ num_dalvik_reg + cu->def_count + 128,
kListSSAtoDalvikMap);
- CompilerInitGrowableList(cUnit, cUnit->ssaSubscripts,
- numDalvikReg + cUnit->defCount + 128,
+ CompilerInitGrowableList(cu, cu->ssa_subscripts,
+ num_dalvik_reg + cu->def_count + 128,
kListSSAtoDalvikMap);
- CompilerInitGrowableList(cUnit, cUnit->ssaStrings,
- numDalvikReg + cUnit->defCount + 128,
+ CompilerInitGrowableList(cu, cu->ssa_strings,
+ num_dalvik_reg + cu->def_count + 128,
kListSSAtoDalvikMap);
/*
* Initial number of SSA registers is equal to the number of Dalvik
* registers.
*/
- cUnit->numSSARegs = numDalvikReg;
+ cu->num_ssa_regs = num_dalvik_reg;
/*
- * Initialize the SSA2Dalvik map list. For the first numDalvikReg elements,
+ * Initialize the SSA2Dalvik map list. For the first num_dalvik_reg elements,
* the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
* into "(0 << 16) | i"
*/
- for (i = 0; i < numDalvikReg; i++) {
- InsertGrowableList(cUnit, cUnit->ssaBaseVRegs, i);
- InsertGrowableList(cUnit, cUnit->ssaSubscripts, 0);
- std::string ssaName = GetSSAName(cUnit, i);
- char* name = static_cast<char*>(NewMem(cUnit, ssaName.length() + 1, true, kAllocDFInfo));
- strncpy(name, ssaName.c_str(), ssaName.length() + 1);
- InsertGrowableList(cUnit, cUnit->ssaStrings, reinterpret_cast<uintptr_t>(name));
+ for (i = 0; i < num_dalvik_reg; i++) {
+ InsertGrowableList(cu, cu->ssa_base_vregs, i);
+ InsertGrowableList(cu, cu->ssa_subscripts, 0);
+ std::string ssa_name = GetSSAName(cu, i);
+ char* name = static_cast<char*>(NewMem(cu, ssa_name.length() + 1, true, kAllocDFInfo));
+ strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
+ InsertGrowableList(cu, cu->ssa_strings, reinterpret_cast<uintptr_t>(name));
}
/*
* Initialize the DalvikToSSAMap map. There is one entry for each
* Dalvik register, and the SSA names for those are the same.
*/
- cUnit->vRegToSSAMap =
- static_cast<int*>(NewMem(cUnit, sizeof(int) * numDalvikReg, false, kAllocDFInfo));
+ cu->vreg_to_ssa_map =
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
/* Keep track of the higest def for each dalvik reg */
- cUnit->SSALastDefs =
- static_cast<int*>(NewMem(cUnit, sizeof(int) * numDalvikReg, false, kAllocDFInfo));
+ cu->ssa_last_defs =
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
- for (i = 0; i < numDalvikReg; i++) {
- cUnit->vRegToSSAMap[i] = i;
- cUnit->SSALastDefs[i] = 0;
+ for (i = 0; i < num_dalvik_reg; i++) {
+ cu->vreg_to_ssa_map[i] = i;
+ cu->ssa_last_defs[i] = 0;
}
/* Add ssa reg for Method* */
- cUnit->methodSReg = AddNewSReg(cUnit, SSA_METHOD_BASEREG);
+ cu->method_sreg = AddNewSReg(cu, SSA_METHOD_BASEREG);
/*
* Allocate the BasicBlockDataFlow structure for the entry and code blocks
*/
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->blockList, &iterator);
+ GrowableListIteratorInit(&cu->block_list, &iterator);
while (true) {
BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
if (bb == NULL) break;
if (bb->hidden == true) continue;
- if (bb->blockType == kDalvikByteCode ||
- bb->blockType == kEntryBlock ||
- bb->blockType == kExitBlock) {
- bb->dataFlowInfo = static_cast<BasicBlockDataFlow*>(NewMem(cUnit, sizeof(BasicBlockDataFlow),
+ if (bb->block_type == kDalvikByteCode ||
+ bb->block_type == kEntryBlock ||
+ bb->block_type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(NewMem(cu, sizeof(BasicBlockDataFlow),
true, kAllocDFInfo));
}
}
}
/* Clear the visited flag for each BB */
-bool ClearVisitedFlag(struct CompilationUnit* cUnit, struct BasicBlock* bb)
+bool ClearVisitedFlag(struct CompilationUnit* cu, struct BasicBlock* bb)
{
bb->visited = false;
return true;
}
-void DataFlowAnalysisDispatcher(CompilationUnit* cUnit,
+void DataFlowAnalysisDispatcher(CompilationUnit* cu,
bool (*func)(CompilationUnit*, BasicBlock*),
- DataFlowAnalysisMode dfaMode,
- bool isIterative)
+ DataFlowAnalysisMode dfa_mode,
+ bool is_iterative)
{
bool change = true;
while (change) {
change = false;
- switch (dfaMode) {
+ switch (dfa_mode) {
/* Scan all blocks and perform the operations specified in func */
case kAllNodes:
{
GrowableListIterator iterator;
- GrowableListIteratorInit(&cUnit->blockList, &iterator);
+ GrowableListIteratorInit(&cu->block_list, &iterator);
while (true) {
BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
if (bb == NULL) break;
if (bb->hidden == true) continue;
- change |= (*func)(cUnit, bb);
+ change |= (*func)(cu, bb);
}
}
break;
/* Scan reachable blocks and perform the ops specified in func. */
case kReachableNodes:
{
- int numReachableBlocks = cUnit->numReachableBlocks;
+ int num_reachable_blocks = cu->num_reachable_blocks;
int idx;
- const GrowableList *blockList = &cUnit->blockList;
+ const GrowableList *block_list = &cu->block_list;
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int blockIdx = cUnit->dfsOrder.elemList[idx];
+ for (idx = 0; idx < num_reachable_blocks; idx++) {
+ int block_idx = cu->dfs_order.elem_list[idx];
BasicBlock* bb =
- reinterpret_cast<BasicBlock*>( GrowableListGetElement(blockList, blockIdx));
- change |= (*func)(cUnit, bb);
+ reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, block_idx));
+ change |= (*func)(cu, bb);
}
}
break;
@@ -1582,92 +1582,92 @@
/* Scan reachable blocks by pre-order dfs and invoke func on each. */
case kPreOrderDFSTraversal:
{
- int numReachableBlocks = cUnit->numReachableBlocks;
+ int num_reachable_blocks = cu->num_reachable_blocks;
int idx;
- const GrowableList *blockList = &cUnit->blockList;
+ const GrowableList *block_list = &cu->block_list;
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int dfsIdx = cUnit->dfsOrder.elemList[idx];
+ for (idx = 0; idx < num_reachable_blocks; idx++) {
+ int dfs_idx = cu->dfs_order.elem_list[idx];
BasicBlock* bb =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, dfsIdx));
- change |= (*func)(cUnit, bb);
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dfs_idx));
+ change |= (*func)(cu, bb);
}
}
break;
/* Scan reachable blocks post-order dfs and invoke func on each. */
case kPostOrderDFSTraversal:
{
- int numReachableBlocks = cUnit->numReachableBlocks;
+ int num_reachable_blocks = cu->num_reachable_blocks;
int idx;
- const GrowableList *blockList = &cUnit->blockList;
+ const GrowableList *block_list = &cu->block_list;
- for (idx = numReachableBlocks - 1; idx >= 0; idx--) {
- int dfsIdx = cUnit->dfsOrder.elemList[idx];
+ for (idx = num_reachable_blocks - 1; idx >= 0; idx--) {
+ int dfs_idx = cu->dfs_order.elem_list[idx];
BasicBlock* bb =
- reinterpret_cast<BasicBlock *>( GrowableListGetElement(blockList, dfsIdx));
- change |= (*func)(cUnit, bb);
+ reinterpret_cast<BasicBlock *>( GrowableListGetElement(block_list, dfs_idx));
+ change |= (*func)(cu, bb);
}
}
break;
/* Scan reachable post-order dom tree and invoke func on each. */
case kPostOrderDOMTraversal:
{
- int numReachableBlocks = cUnit->numReachableBlocks;
+ int num_reachable_blocks = cu->num_reachable_blocks;
int idx;
- const GrowableList *blockList = &cUnit->blockList;
+ const GrowableList *block_list = &cu->block_list;
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int domIdx = cUnit->domPostOrderTraversal.elemList[idx];
+ for (idx = 0; idx < num_reachable_blocks; idx++) {
+ int dom_idx = cu->dom_post_order_traversal.elem_list[idx];
BasicBlock* bb =
- reinterpret_cast<BasicBlock*>( GrowableListGetElement(blockList, domIdx));
- change |= (*func)(cUnit, bb);
+ reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, dom_idx));
+ change |= (*func)(cu, bb);
}
}
break;
/* Scan reachable blocks reverse post-order dfs, invoke func on each */
case kReversePostOrderTraversal:
{
- int numReachableBlocks = cUnit->numReachableBlocks;
+ int num_reachable_blocks = cu->num_reachable_blocks;
int idx;
- const GrowableList *blockList = &cUnit->blockList;
+ const GrowableList *block_list = &cu->block_list;
- for (idx = numReachableBlocks - 1; idx >= 0; idx--) {
- int revIdx = cUnit->dfsPostOrder.elemList[idx];
+ for (idx = num_reachable_blocks - 1; idx >= 0; idx--) {
+ int rev_idx = cu->dfs_post_order.elem_list[idx];
BasicBlock* bb =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, revIdx));
- change |= (*func)(cUnit, bb);
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, rev_idx));
+ change |= (*func)(cu, bb);
}
}
break;
default:
- LOG(FATAL) << "Unknown traversal mode: " << dfaMode;
+ LOG(FATAL) << "Unknown traversal mode: " << dfa_mode;
}
- /* If isIterative is false, exit the loop after the first iteration */
- change &= isIterative;
+ /* If is_iterative is false, exit the loop after the first iteration */
+ change &= is_iterative;
}
}
/* Advance to next strictly dominated MIR node in an extended basic block */
-static MIR* AdvanceMIR(CompilationUnit* cUnit, BasicBlock** pBb, MIR* mir,
- ArenaBitVector* bv, bool clearMark) {
- BasicBlock* bb = *pBb;
+static MIR* AdvanceMIR(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir,
+ ArenaBitVector* bv, bool clear_mark) {
+ BasicBlock* bb = *p_bb;
if (mir != NULL) {
mir = mir->next;
if (mir == NULL) {
- bb = bb->fallThrough;
- if ((bb == NULL) || bb->predecessors->numUsed != 1) {
+ bb = bb->fall_through;
+ if ((bb == NULL) || bb->predecessors->num_used != 1) {
mir = NULL;
} else {
if (bv) {
- SetBit(cUnit, bv, bb->id);
+ SetBit(cu, bv, bb->id);
}
- *pBb = bb;
- mir = bb->firstMIRInsn;
+ *p_bb = bb;
+ mir = bb->first_mir_insn;
}
}
}
- if (mir && clearMark) {
- mir->optimizationFlags &= ~MIR_MARK;
+ if (mir && clear_mark) {
+ mir->optimization_flags &= ~MIR_MARK;
}
return mir;
}
@@ -1679,10 +1679,10 @@
* opcodes or incoming arcs. However, if the result of the invoke is not
* used, a move-result may not be present.
*/
-MIR* FindMoveResult(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
+MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
{
BasicBlock* tbb = bb;
- mir = AdvanceMIR(cUnit, &tbb, mir, NULL, false);
+ mir = AdvanceMIR(cu, &tbb, mir, NULL, false);
while (mir != NULL) {
int opcode = mir->dalvikInsn.opcode;
if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
@@ -1694,26 +1694,26 @@
if (opcode < kNumPackedOpcodes) {
mir = NULL;
} else {
- mir = AdvanceMIR(cUnit, &tbb, mir, NULL, false);
+ mir = AdvanceMIR(cu, &tbb, mir, NULL, false);
}
}
return mir;
}
-static void SquashDupRangeChecks(CompilationUnit* cUnit, BasicBlock** pBp, MIR* mir,
- int arraySreg, int indexSreg)
+static void SquashDupRangeChecks(CompilationUnit* cu, BasicBlock** p_bp, MIR* mir,
+ int array_sreg, int index_sreg)
{
while (true) {
- mir = AdvanceMIR(cUnit, pBp, mir, NULL, false);
+ mir = AdvanceMIR(cu, p_bp, mir, NULL, false);
if (!mir) {
break;
}
- if ((mir->ssaRep == NULL) ||
- (mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ if ((mir->ssa_rep == NULL) ||
+ (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK)) {
continue;
}
- int checkArray = INVALID_SREG;
- int checkIndex = INVALID_SREG;
+ int check_array = INVALID_SREG;
+ int check_index = INVALID_SREG;
switch (mir->dalvikInsn.opcode) {
case Instruction::AGET:
case Instruction::AGET_OBJECT:
@@ -1722,8 +1722,8 @@
case Instruction::AGET_CHAR:
case Instruction::AGET_SHORT:
case Instruction::AGET_WIDE:
- checkArray = mir->ssaRep->uses[0];
- checkIndex = mir->ssaRep->uses[1];
+ check_array = mir->ssa_rep->uses[0];
+ check_index = mir->ssa_rep->uses[1];
break;
case Instruction::APUT:
case Instruction::APUT_OBJECT:
@@ -1731,33 +1731,33 @@
case Instruction::APUT_CHAR:
case Instruction::APUT_BYTE:
case Instruction::APUT_BOOLEAN:
- checkArray = mir->ssaRep->uses[1];
- checkIndex = mir->ssaRep->uses[2];
+ check_array = mir->ssa_rep->uses[1];
+ check_index = mir->ssa_rep->uses[2];
break;
case Instruction::APUT_WIDE:
- checkArray = mir->ssaRep->uses[2];
- checkIndex = mir->ssaRep->uses[3];
+ check_array = mir->ssa_rep->uses[2];
+ check_index = mir->ssa_rep->uses[3];
default:
break;
}
- if (checkArray == INVALID_SREG) {
+ if (check_array == INVALID_SREG) {
continue;
}
- if ((arraySreg == checkArray) && (indexSreg == checkIndex)) {
- if (cUnit->printMe) {
+ if ((array_sreg == check_array) && (index_sreg == check_index)) {
+ if (cu->verbose) {
LOG(INFO) << "Squashing range check @ 0x" << std::hex << mir->offset;
}
- mir->optimizationFlags |= MIR_IGNORE_RANGE_CHECK;
+ mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
}
}
}
/* Do some MIR-level basic block optimizations */
-static bool BasicBlockOpt(CompilationUnit* cUnit, BasicBlock* bb)
+static bool BasicBlockOpt(CompilationUnit* cu, BasicBlock* bb)
{
- int numTemps = 0;
+ int num_temps = 0;
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir; mir = mir->next) {
// Look for interesting opcodes, skip otherwise
Instruction::Code opcode = mir->dalvikInsn.opcode;
switch (opcode) {
@@ -1768,11 +1768,11 @@
case Instruction::AGET_CHAR:
case Instruction::AGET_SHORT:
case Instruction::AGET_WIDE:
- if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
- int arrSreg = mir->ssaRep->uses[0];
- int idxSreg = mir->ssaRep->uses[1];
+ if (!(mir->optimization_flags & MIR_IGNORE_RANGE_CHECK)) {
+ int arr_sreg = mir->ssa_rep->uses[0];
+ int idx_sreg = mir->ssa_rep->uses[1];
BasicBlock* tbb = bb;
- SquashDupRangeChecks(cUnit, &tbb, mir, arrSreg, idxSreg);
+ SquashDupRangeChecks(cu, &tbb, mir, arr_sreg, idx_sreg);
}
break;
case Instruction::APUT:
@@ -1782,12 +1782,12 @@
case Instruction::APUT_BYTE:
case Instruction::APUT_BOOLEAN:
case Instruction::APUT_WIDE:
- if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ if (!(mir->optimization_flags & MIR_IGNORE_RANGE_CHECK)) {
int start = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
- int arrSreg = mir->ssaRep->uses[start];
- int idxSreg = mir->ssaRep->uses[start + 1];
+ int arr_sreg = mir->ssa_rep->uses[start];
+ int idx_sreg = mir->ssa_rep->uses[start + 1];
BasicBlock* tbb = bb;
- SquashDupRangeChecks(cUnit, &tbb, mir, arrSreg, idxSreg);
+ SquashDupRangeChecks(cu, &tbb, mir, arr_sreg, idx_sreg);
}
break;
case Instruction::CMPL_FLOAT:
@@ -1795,15 +1795,15 @@
case Instruction::CMPG_FLOAT:
case Instruction::CMPG_DOUBLE:
case Instruction::CMP_LONG:
- if (cUnit->genBitcode) {
+ if (cu->gen_bitcode) {
// Bitcode doesn't allow this optimization.
break;
}
if (mir->next != NULL) {
- MIR* mirNext = mir->next;
- Instruction::Code brOpcode = mirNext->dalvikInsn.opcode;
+ MIR* mir_next = mir->next;
+ Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
ConditionCode ccode = kCondNv;
- switch(brOpcode) {
+ switch(br_opcode) {
case Instruction::IF_EQZ:
ccode = kCondEq;
break;
@@ -1827,39 +1827,39 @@
}
// Make sure result of cmp is used by next insn and nowhere else
if ((ccode != kCondNv) &&
- (mir->ssaRep->defs[0] == mirNext->ssaRep->uses[0]) &&
- (GetSSAUseCount(cUnit, mir->ssaRep->defs[0]) == 1)) {
- mirNext->dalvikInsn.arg[0] = ccode;
+ (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
+ (GetSSAUseCount(cu, mir->ssa_rep->defs[0]) == 1)) {
+ mir_next->dalvikInsn.arg[0] = ccode;
switch(opcode) {
case Instruction::CMPL_FLOAT:
- mirNext->dalvikInsn.opcode =
+ mir_next->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
break;
case Instruction::CMPL_DOUBLE:
- mirNext->dalvikInsn.opcode =
+ mir_next->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
break;
case Instruction::CMPG_FLOAT:
- mirNext->dalvikInsn.opcode =
+ mir_next->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
break;
case Instruction::CMPG_DOUBLE:
- mirNext->dalvikInsn.opcode =
+ mir_next->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
break;
case Instruction::CMP_LONG:
- mirNext->dalvikInsn.opcode =
+ mir_next->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpFusedCmpLong);
break;
default: LOG(ERROR) << "Unexpected opcode: " << opcode;
}
mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- mirNext->ssaRep->numUses = mir->ssaRep->numUses;
- mirNext->ssaRep->uses = mir->ssaRep->uses;
- mirNext->ssaRep->fpUse = mir->ssaRep->fpUse;
- mirNext->ssaRep->numDefs = 0;
- mir->ssaRep->numUses = 0;
- mir->ssaRep->numDefs = 0;
+ mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
+ mir_next->ssa_rep->uses = mir->ssa_rep->uses;
+ mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
+ mir_next->ssa_rep->num_defs = 0;
+ mir->ssa_rep->num_uses = 0;
+ mir->ssa_rep->num_defs = 0;
}
}
break;
@@ -1868,40 +1868,40 @@
}
}
- if (numTemps > cUnit->numCompilerTemps) {
- cUnit->numCompilerTemps = numTemps;
+ if (num_temps > cu->num_compiler_temps) {
+ cu->num_compiler_temps = num_temps;
}
return true;
}
-static bool NullCheckEliminationInit(struct CompilationUnit* cUnit, struct BasicBlock* bb)
+static bool NullCheckEliminationInit(struct CompilationUnit* cu, struct BasicBlock* bb)
{
- if (bb->dataFlowInfo == NULL) return false;
- bb->dataFlowInfo->endingNullCheckV =
- AllocBitVector(cUnit, cUnit->numSSARegs, false, kBitMapNullCheck);
- ClearAllBits(bb->dataFlowInfo->endingNullCheckV);
+ if (bb->data_flow_info == NULL) return false;
+ bb->data_flow_info->ending_null_check_v =
+ AllocBitVector(cu, cu->num_ssa_regs, false, kBitMapNullCheck);
+ ClearAllBits(bb->data_flow_info->ending_null_check_v);
return true;
}
/* Collect stats on number of checks removed */
-static bool CountChecks( struct CompilationUnit* cUnit, struct BasicBlock* bb)
+static bool CountChecks( struct CompilationUnit* cu, struct BasicBlock* bb)
{
- if (bb->dataFlowInfo == NULL) return false;
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
- if (mir->ssaRep == NULL) {
+ if (bb->data_flow_info == NULL) return false;
+ for (MIR* mir = bb->first_mir_insn; mir; mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
continue;
}
- int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
- if (dfAttributes & DF_HAS_NULL_CHKS) {
- cUnit->checkstats->nullChecks++;
- if (mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
- cUnit->checkstats->nullChecksEliminated++;
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+ if (df_attributes & DF_HAS_NULL_CHKS) {
+ cu->checkstats->null_checks++;
+ if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
+ cu->checkstats->null_checks_eliminated++;
}
}
- if (dfAttributes & DF_HAS_RANGE_CHKS) {
- cUnit->checkstats->rangeChecks++;
- if (mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK) {
- cUnit->checkstats->rangeChecksEliminated++;
+ if (df_attributes & DF_HAS_RANGE_CHKS) {
+ cu->checkstats->range_checks++;
+ if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
+ cu->checkstats->range_checks_eliminated++;
}
}
}
@@ -1909,27 +1909,27 @@
}
/* Try to make common case the fallthrough path */
-static bool LayoutBlocks(struct CompilationUnit* cUnit, struct BasicBlock* bb)
+static bool LayoutBlocks(struct CompilationUnit* cu, struct BasicBlock* bb)
{
// TODO: For now, just looking for direct throws. Consider generalizing for profile feedback
- if (!bb->explicitThrow) {
+ if (!bb->explicit_throw) {
return false;
}
BasicBlock* walker = bb;
while (true) {
// Check termination conditions
- if ((walker->blockType == kEntryBlock) || (walker->predecessors->numUsed != 1)) {
+ if ((walker->block_type == kEntryBlock) || (walker->predecessors->num_used != 1)) {
break;
}
BasicBlock* prev = GET_ELEM_N(walker->predecessors, BasicBlock*, 0);
- if (prev->conditionalBranch) {
- if (prev->fallThrough == walker) {
+ if (prev->conditional_branch) {
+ if (prev->fall_through == walker) {
// Already done - return
break;
}
DCHECK_EQ(walker, prev->taken);
// Got one. Flip it and exit
- Instruction::Code opcode = prev->lastMIRInsn->dalvikInsn.opcode;
+ Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
switch (opcode) {
case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
@@ -1945,10 +1945,10 @@
case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
default: LOG(FATAL) << "Unexpected opcode " << opcode;
}
- prev->lastMIRInsn->dalvikInsn.opcode = opcode;
- BasicBlock* tBB = prev->taken;
- prev->taken = prev->fallThrough;
- prev->fallThrough = tBB;
+ prev->last_mir_insn->dalvikInsn.opcode = opcode;
+ BasicBlock* t_bb = prev->taken;
+ prev->taken = prev->fall_through;
+ prev->fall_through = t_bb;
break;
}
walker = prev;
@@ -1957,63 +1957,63 @@
}
/* Combine any basic blocks terminated by instructions that we now know can't throw */
-static bool CombineBlocks(struct CompilationUnit* cUnit, struct BasicBlock* bb)
+static bool CombineBlocks(struct CompilationUnit* cu, struct BasicBlock* bb)
{
// Loop here to allow combining a sequence of blocks
while (true) {
// Check termination conditions
- if ((bb->firstMIRInsn == NULL)
- || (bb->dataFlowInfo == NULL)
- || (bb->blockType == kExceptionHandling)
- || (bb->blockType == kExitBlock)
- || (bb->blockType == kDead)
- || ((bb->taken == NULL) || (bb->taken->blockType != kExceptionHandling))
- || (bb->successorBlockList.blockListType != kNotUsed)
- || (static_cast<int>(bb->lastMIRInsn->dalvikInsn.opcode) != kMirOpCheck)) {
+ if ((bb->first_mir_insn == NULL)
+ || (bb->data_flow_info == NULL)
+ || (bb->block_type == kExceptionHandling)
+ || (bb->block_type == kExitBlock)
+ || (bb->block_type == kDead)
+ || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
+ || (bb->successor_block_list.block_list_type != kNotUsed)
+ || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
break;
}
// Test the kMirOpCheck instruction
- MIR* mir = bb->lastMIRInsn;
+ MIR* mir = bb->last_mir_insn;
// Grab the attributes from the paired opcode
- MIR* throwInsn = mir->meta.throwInsn;
- int dfAttributes = oatDataFlowAttributes[throwInsn->dalvikInsn.opcode];
- bool canCombine = true;
- if (dfAttributes & DF_HAS_NULL_CHKS) {
- canCombine &= ((throwInsn->optimizationFlags & MIR_IGNORE_NULL_CHECK) != 0);
+ MIR* throw_insn = mir->meta.throw_insn;
+ int df_attributes = oat_data_flow_attributes[throw_insn->dalvikInsn.opcode];
+ bool can_combine = true;
+ if (df_attributes & DF_HAS_NULL_CHKS) {
+ can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
}
- if (dfAttributes & DF_HAS_RANGE_CHKS) {
- canCombine &= ((throwInsn->optimizationFlags & MIR_IGNORE_RANGE_CHECK) != 0);
+ if (df_attributes & DF_HAS_RANGE_CHKS) {
+ can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
}
- if (!canCombine) {
+ if (!can_combine) {
break;
}
// OK - got one. Combine
- BasicBlock* bbNext = bb->fallThrough;
- DCHECK(!bbNext->catchEntry);
- DCHECK_EQ(bbNext->predecessors->numUsed, 1U);
- MIR* tMir = bb->lastMIRInsn->prev;
+ BasicBlock* bb_next = bb->fall_through;
+ DCHECK(!bb_next->catch_entry);
+ DCHECK_EQ(bb_next->predecessors->num_used, 1U);
+ MIR* t_mir = bb->last_mir_insn->prev;
// Overwrite the kOpCheck insn with the paired opcode
- DCHECK_EQ(bbNext->firstMIRInsn, throwInsn);
- *bb->lastMIRInsn = *throwInsn;
- bb->lastMIRInsn->prev = tMir;
+ DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
+ *bb->last_mir_insn = *throw_insn;
+ bb->last_mir_insn->prev = t_mir;
// Use the successor info from the next block
- bb->successorBlockList = bbNext->successorBlockList;
+ bb->successor_block_list = bb_next->successor_block_list;
// Use the ending block linkage from the next block
- bb->fallThrough = bbNext->fallThrough;
- bb->taken->blockType = kDead; // Kill the unused exception block
- bb->taken = bbNext->taken;
+ bb->fall_through = bb_next->fall_through;
+ bb->taken->block_type = kDead; // Kill the unused exception block
+ bb->taken = bb_next->taken;
// Include the rest of the instructions
- bb->lastMIRInsn = bbNext->lastMIRInsn;
+ bb->last_mir_insn = bb_next->last_mir_insn;
/*
* NOTE: we aren't updating all dataflow info here. Should either make sure this pass
- * happens after uses of iDominated, domFrontier or update the dataflow info here.
+ * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
*/
- // Kill bbNext and remap now-dead id to parent
- bbNext->blockType = kDead;
- cUnit->blockIdMap.Overwrite(bbNext->id, bb->id);
+ // Kill bb_next and remap now-dead id to parent
+ bb_next->block_type = kDead;
+ cu->block_id_map.Overwrite(bb_next->id, bb->id);
// Now, loop back and see if we can keep going
}
@@ -2021,70 +2021,70 @@
}
/* Eliminate unnecessary null checks for a basic block. */
-static bool EliminateNullChecks( struct CompilationUnit* cUnit, struct BasicBlock* bb)
+static bool EliminateNullChecks( struct CompilationUnit* cu, struct BasicBlock* bb)
{
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->data_flow_info == NULL) return false;
/*
* Set initial state. Be conservative with catch
* blocks and start with no assumptions about null check
* status (except for "this").
*/
- if ((bb->blockType == kEntryBlock) | bb->catchEntry) {
- ClearAllBits(cUnit->tempSSARegisterV);
- if ((cUnit->access_flags & kAccStatic) == 0) {
+ if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
+ ClearAllBits(cu->temp_ssa_register_v);
+ if ((cu->access_flags & kAccStatic) == 0) {
// If non-static method, mark "this" as non-null
- int thisReg = cUnit->numDalvikRegisters - cUnit->numIns;
- SetBit(cUnit, cUnit->tempSSARegisterV, thisReg);
+ int this_reg = cu->num_dalvik_registers - cu->num_ins;
+ SetBit(cu, cu->temp_ssa_register_v, this_reg);
}
} else {
// Starting state is intesection of all incoming arcs
GrowableListIterator iter;
GrowableListIteratorInit(bb->predecessors, &iter);
- BasicBlock* predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
- DCHECK(predBB != NULL);
- CopyBitVector(cUnit->tempSSARegisterV,
- predBB->dataFlowInfo->endingNullCheckV);
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ DCHECK(pred_bb != NULL);
+ CopyBitVector(cu->temp_ssa_register_v,
+ pred_bb->data_flow_info->ending_null_check_v);
while (true) {
- predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
- if (!predBB) break;
- if ((predBB->dataFlowInfo == NULL) ||
- (predBB->dataFlowInfo->endingNullCheckV == NULL)) {
+ pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ if ((pred_bb->data_flow_info == NULL) ||
+ (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
continue;
}
- IntersectBitVectors(cUnit->tempSSARegisterV,
- cUnit->tempSSARegisterV,
- predBB->dataFlowInfo->endingNullCheckV);
+ IntersectBitVectors(cu->temp_ssa_register_v,
+ cu->temp_ssa_register_v,
+ pred_bb->data_flow_info->ending_null_check_v);
}
}
// Walk through the instruction in the block, updating as necessary
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
- if (mir->ssaRep == NULL) {
+ for (MIR* mir = bb->first_mir_insn; mir; mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
continue;
}
- int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
// Mark target of NEW* as non-null
- if (dfAttributes & DF_NON_NULL_DST) {
- SetBit(cUnit, cUnit->tempSSARegisterV, mir->ssaRep->defs[0]);
+ if (df_attributes & DF_NON_NULL_DST) {
+ SetBit(cu, cu->temp_ssa_register_v, mir->ssa_rep->defs[0]);
}
// Mark non-null returns from invoke-style NEW*
- if (dfAttributes & DF_NON_NULL_RET) {
- MIR* nextMir = mir->next;
+ if (df_attributes & DF_NON_NULL_RET) {
+ MIR* next_mir = mir->next;
// Next should be an MOVE_RESULT_OBJECT
- if (nextMir &&
- nextMir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+ if (next_mir &&
+ next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
// Mark as null checked
- SetBit(cUnit, cUnit->tempSSARegisterV, nextMir->ssaRep->defs[0]);
+ SetBit(cu, cu->temp_ssa_register_v, next_mir->ssa_rep->defs[0]);
} else {
- if (nextMir) {
- LOG(WARNING) << "Unexpected opcode following new: " << nextMir->dalvikInsn.opcode;
- } else if (bb->fallThrough) {
+ if (next_mir) {
+ LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
+ } else if (bb->fall_through) {
// Look in next basic block
- struct BasicBlock* nextBB = bb->fallThrough;
- for (MIR* tmir = nextBB->firstMIRInsn; tmir;
+ struct BasicBlock* next_bb = bb->fall_through;
+ for (MIR* tmir = next_bb->first_mir_insn; tmir;
tmir =tmir->next) {
if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
continue;
@@ -2092,7 +2092,7 @@
// First non-pseudo should be MOVE_RESULT_OBJECT
if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
// Mark as null checked
- SetBit(cUnit, cUnit->tempSSARegisterV, tmir->ssaRep->defs[0]);
+ SetBit(cu, cu->temp_ssa_register_v, tmir->ssa_rep->defs[0]);
} else {
LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
}
@@ -2107,247 +2107,247 @@
* Phi pseudo copies. For the latter, nullcheck state is
* the "and" of all the Phi's operands.
*/
- if (dfAttributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
- int tgtSreg = mir->ssaRep->defs[0];
- int operands = (dfAttributes & DF_NULL_TRANSFER_0) ? 1 :
- mir->ssaRep->numUses;
- bool nullChecked = true;
+ if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
+ int tgt_sreg = mir->ssa_rep->defs[0];
+ int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
+ mir->ssa_rep->num_uses;
+ bool null_checked = true;
for (int i = 0; i < operands; i++) {
- nullChecked &= IsBitSet(cUnit->tempSSARegisterV,
- mir->ssaRep->uses[i]);
+ null_checked &= IsBitSet(cu->temp_ssa_register_v,
+ mir->ssa_rep->uses[i]);
}
- if (nullChecked) {
- SetBit(cUnit, cUnit->tempSSARegisterV, tgtSreg);
+ if (null_checked) {
+ SetBit(cu, cu->temp_ssa_register_v, tgt_sreg);
}
}
// Already nullchecked?
- if ((dfAttributes & DF_HAS_NULL_CHKS) && !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
- int srcIdx;
- if (dfAttributes & DF_NULL_CHK_1) {
- srcIdx = 1;
- } else if (dfAttributes & DF_NULL_CHK_2) {
- srcIdx = 2;
+ if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+ int src_idx;
+ if (df_attributes & DF_NULL_CHK_1) {
+ src_idx = 1;
+ } else if (df_attributes & DF_NULL_CHK_2) {
+ src_idx = 2;
} else {
- srcIdx = 0;
+ src_idx = 0;
}
- int srcSreg = mir->ssaRep->uses[srcIdx];
- if (IsBitSet(cUnit->tempSSARegisterV, srcSreg)) {
+ int src_sreg = mir->ssa_rep->uses[src_idx];
+ if (IsBitSet(cu->temp_ssa_register_v, src_sreg)) {
// Eliminate the null check
- mir->optimizationFlags |= MIR_IGNORE_NULL_CHECK;
+ mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
} else {
- // Mark sReg as null-checked
- SetBit(cUnit, cUnit->tempSSARegisterV, srcSreg);
+ // Mark s_reg as null-checked
+ SetBit(cu, cu->temp_ssa_register_v, src_sreg);
}
}
}
// Did anything change?
- bool res = CompareBitVectors(bb->dataFlowInfo->endingNullCheckV,
- cUnit->tempSSARegisterV);
+ bool res = CompareBitVectors(bb->data_flow_info->ending_null_check_v,
+ cu->temp_ssa_register_v);
if (res) {
- CopyBitVector(bb->dataFlowInfo->endingNullCheckV,
- cUnit->tempSSARegisterV);
+ CopyBitVector(bb->data_flow_info->ending_null_check_v,
+ cu->temp_ssa_register_v);
}
return res;
}
-void NullCheckElimination(CompilationUnit *cUnit)
+void NullCheckElimination(CompilationUnit *cu)
{
- if (!(cUnit->disableOpt & (1 << kNullCheckElimination))) {
- DCHECK(cUnit->tempSSARegisterV != NULL);
- DataFlowAnalysisDispatcher(cUnit, NullCheckEliminationInit, kAllNodes,
- false /* isIterative */);
- DataFlowAnalysisDispatcher(cUnit, EliminateNullChecks,
+ if (!(cu->disable_opt & (1 << kNullCheckElimination))) {
+ DCHECK(cu->temp_ssa_register_v != NULL);
+ DataFlowAnalysisDispatcher(cu, NullCheckEliminationInit, kAllNodes,
+ false /* is_iterative */);
+ DataFlowAnalysisDispatcher(cu, EliminateNullChecks,
kPreOrderDFSTraversal,
- true /* isIterative */);
+ true /* is_iterative */);
}
}
-void BasicBlockCombine(CompilationUnit* cUnit)
+void BasicBlockCombine(CompilationUnit* cu)
{
- DataFlowAnalysisDispatcher(cUnit, CombineBlocks, kPreOrderDFSTraversal, false);
+ DataFlowAnalysisDispatcher(cu, CombineBlocks, kPreOrderDFSTraversal, false);
}
-void CodeLayout(CompilationUnit* cUnit)
+void CodeLayout(CompilationUnit* cu)
{
- DataFlowAnalysisDispatcher(cUnit, LayoutBlocks, kAllNodes, false);
+ DataFlowAnalysisDispatcher(cu, LayoutBlocks, kAllNodes, false);
}
-void DumpCheckStats(CompilationUnit *cUnit)
+void DumpCheckStats(CompilationUnit *cu)
{
Checkstats* stats =
- static_cast<Checkstats*>(NewMem(cUnit, sizeof(Checkstats), true, kAllocDFInfo));
- cUnit->checkstats = stats;
- DataFlowAnalysisDispatcher(cUnit, CountChecks, kAllNodes, false /* isIterative */);
- if (stats->nullChecks > 0) {
- float eliminated = static_cast<float>(stats->nullChecksEliminated);
- float checks = static_cast<float>(stats->nullChecks);
- LOG(INFO) << "Null Checks: " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << stats->nullChecksEliminated << " of " << stats->nullChecks << " -> "
+ static_cast<Checkstats*>(NewMem(cu, sizeof(Checkstats), true, kAllocDFInfo));
+ cu->checkstats = stats;
+ DataFlowAnalysisDispatcher(cu, CountChecks, kAllNodes, false /* is_iterative */);
+ if (stats->null_checks > 0) {
+ float eliminated = static_cast<float>(stats->null_checks_eliminated);
+ float checks = static_cast<float>(stats->null_checks);
+ LOG(INFO) << "Null Checks: " << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
<< (eliminated/checks) * 100.0 << "%";
}
- if (stats->rangeChecks > 0) {
- float eliminated = static_cast<float>(stats->rangeChecksEliminated);
- float checks = static_cast<float>(stats->rangeChecks);
- LOG(INFO) << "Range Checks: " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << stats->rangeChecksEliminated << " of " << stats->rangeChecks << " -> "
+ if (stats->range_checks > 0) {
+ float eliminated = static_cast<float>(stats->range_checks_eliminated);
+ float checks = static_cast<float>(stats->range_checks);
+ LOG(INFO) << "Range Checks: " << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
<< (eliminated/checks) * 100.0 << "%";
}
}
-void BasicBlockOptimization(CompilationUnit *cUnit)
+void BasicBlockOptimization(CompilationUnit *cu)
{
- if (!(cUnit->disableOpt & (1 << kBBOpt))) {
- CompilerInitGrowableList(cUnit, &cUnit->compilerTemps, 6, kListMisc);
- DCHECK_EQ(cUnit->numCompilerTemps, 0);
- DataFlowAnalysisDispatcher(cUnit, BasicBlockOpt,
- kAllNodes, false /* isIterative */);
+ if (!(cu->disable_opt & (1 << kBBOpt))) {
+ CompilerInitGrowableList(cu, &cu->compiler_temps, 6, kListMisc);
+ DCHECK_EQ(cu->num_compiler_temps, 0);
+ DataFlowAnalysisDispatcher(cu, BasicBlockOpt,
+ kAllNodes, false /* is_iterative */);
}
}
-static void AddLoopHeader(CompilationUnit* cUnit, BasicBlock* header,
- BasicBlock* backEdge)
+static void AddLoopHeader(CompilationUnit* cu, BasicBlock* header,
+ BasicBlock* back_edge)
{
GrowableListIterator iter;
- GrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
(loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
if (loop->header == header) {
- InsertGrowableList(cUnit, &loop->incomingBackEdges,
- reinterpret_cast<uintptr_t>(backEdge));
+ InsertGrowableList(cu, &loop->incoming_back_edges,
+ reinterpret_cast<uintptr_t>(back_edge));
return;
}
}
- LoopInfo* info = static_cast<LoopInfo*>(NewMem(cUnit, sizeof(LoopInfo), true, kAllocDFInfo));
+ LoopInfo* info = static_cast<LoopInfo*>(NewMem(cu, sizeof(LoopInfo), true, kAllocDFInfo));
info->header = header;
- CompilerInitGrowableList(cUnit, &info->incomingBackEdges, 2, kListMisc);
- InsertGrowableList(cUnit, &info->incomingBackEdges, reinterpret_cast<uintptr_t>(backEdge));
- InsertGrowableList(cUnit, &cUnit->loopHeaders, reinterpret_cast<uintptr_t>(info));
+ CompilerInitGrowableList(cu, &info->incoming_back_edges, 2, kListMisc);
+ InsertGrowableList(cu, &info->incoming_back_edges, reinterpret_cast<uintptr_t>(back_edge));
+ InsertGrowableList(cu, &cu->loop_headers, reinterpret_cast<uintptr_t>(info));
}
-static bool FindBackEdges(struct CompilationUnit* cUnit, struct BasicBlock* bb)
+static bool FindBackEdges(struct CompilationUnit* cu, struct BasicBlock* bb)
{
- if ((bb->dataFlowInfo == NULL) || (bb->lastMIRInsn == NULL)) {
+ if ((bb->data_flow_info == NULL) || (bb->last_mir_insn == NULL)) {
return false;
}
- Instruction::Code opcode = bb->lastMIRInsn->dalvikInsn.opcode;
+ Instruction::Code opcode = bb->last_mir_insn->dalvikInsn.opcode;
if (Instruction::FlagsOf(opcode) & Instruction::kBranch) {
- if (bb->taken && (bb->taken->startOffset <= bb->startOffset)) {
+ if (bb->taken && (bb->taken->start_offset <= bb->start_offset)) {
DCHECK(bb->dominators != NULL);
if (IsBitSet(bb->dominators, bb->taken->id)) {
- if (cUnit->printMe) {
+ if (cu->verbose) {
LOG(INFO) << "Loop backedge from 0x"
- << std::hex << bb->lastMIRInsn->offset
- << " to 0x" << std::hex << bb->taken->startOffset;
+ << std::hex << bb->last_mir_insn->offset
+ << " to 0x" << std::hex << bb->taken->start_offset;
}
- AddLoopHeader(cUnit, bb->taken, bb);
+ AddLoopHeader(cu, bb->taken, bb);
}
}
}
return false;
}
-static void AddBlocksToLoop(CompilationUnit* cUnit, ArenaBitVector* blocks,
- BasicBlock* bb, int headId)
+static void AddBlocksToLoop(CompilationUnit* cu, ArenaBitVector* blocks,
+ BasicBlock* bb, int head_id)
{
- if (!IsBitSet(bb->dominators, headId) ||
+ if (!IsBitSet(bb->dominators, head_id) ||
IsBitSet(blocks, bb->id)) {
return;
}
- SetBit(cUnit, blocks, bb->id);
+ SetBit(cu, blocks, bb->id);
GrowableListIterator iter;
GrowableListIteratorInit(bb->predecessors, &iter);
- BasicBlock* predBB;
- for (predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); predBB;
- predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
- AddBlocksToLoop(cUnit, blocks, predBB, headId);
+ BasicBlock* pred_bb;
+ for (pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); pred_bb;
+ pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
+ AddBlocksToLoop(cu, blocks, pred_bb, head_id);
}
}
-static void DumpLoops(CompilationUnit *cUnit)
+static void DumpLoops(CompilationUnit *cu)
{
GrowableListIterator iter;
- GrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
(loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
LOG(INFO) << "Loop head block id " << loop->header->id
- << ", offset 0x" << std::hex << loop->header->startOffset
- << ", Depth: " << loop->header->nestingDepth;
+ << ", offset 0x" << std::hex << loop->header->start_offset
+ << ", Depth: " << loop->header->nesting_depth;
GrowableListIterator iter;
- GrowableListIteratorInit(&loop->incomingBackEdges, &iter);
- BasicBlock* edgeBB;
- for (edgeBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edgeBB;
- edgeBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
- LOG(INFO) << " Backedge block id " << edgeBB->id
- << ", offset 0x" << std::hex << edgeBB->startOffset;
- ArenaBitVectorIterator bIter;
- BitVectorIteratorInit(loop->blocks, &bIter);
- for (int bbId = BitVectorIteratorNext(&bIter); bbId != -1;
- bbId = BitVectorIteratorNext(&bIter)) {
+ GrowableListIteratorInit(&loop->incoming_back_edges, &iter);
+ BasicBlock* edge_bb;
+ for (edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edge_bb;
+ edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
+ LOG(INFO) << " Backedge block id " << edge_bb->id
+ << ", offset 0x" << std::hex << edge_bb->start_offset;
+ ArenaBitVectorIterator b_iter;
+ BitVectorIteratorInit(loop->blocks, &b_iter);
+ for (int bb_id = BitVectorIteratorNext(&b_iter); bb_id != -1;
+ bb_id = BitVectorIteratorNext(&b_iter)) {
BasicBlock *bb;
- bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cUnit->blockList, bbId));
+ bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, bb_id));
LOG(INFO) << " (" << bb->id << ", 0x" << std::hex
- << bb->startOffset << ")";
+ << bb->start_offset << ")";
}
}
}
}
-void LoopDetection(CompilationUnit *cUnit)
+void LoopDetection(CompilationUnit *cu)
{
- if (cUnit->disableOpt & (1 << kPromoteRegs)) {
+ if (cu->disable_opt & (1 << kPromoteRegs)) {
return;
}
- CompilerInitGrowableList(cUnit, &cUnit->loopHeaders, 6, kListMisc);
+ CompilerInitGrowableList(cu, &cu->loop_headers, 6, kListMisc);
// Find the loop headers
- DataFlowAnalysisDispatcher(cUnit, FindBackEdges, kAllNodes, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, FindBackEdges, kAllNodes, false /* is_iterative */);
GrowableListIterator iter;
- GrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
// Add blocks to each header
for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
loop; loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
- loop->blocks = AllocBitVector(cUnit, cUnit->numBlocks, true,
+ loop->blocks = AllocBitVector(cu, cu->num_blocks, true,
kBitMapMisc);
- SetBit(cUnit, loop->blocks, loop->header->id);
+ SetBit(cu, loop->blocks, loop->header->id);
GrowableListIterator iter;
- GrowableListIteratorInit(&loop->incomingBackEdges, &iter);
- BasicBlock* edgeBB;
- for (edgeBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edgeBB;
- edgeBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
- AddBlocksToLoop(cUnit, loop->blocks, edgeBB, loop->header->id);
+ GrowableListIteratorInit(&loop->incoming_back_edges, &iter);
+ BasicBlock* edge_bb;
+ for (edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edge_bb;
+ edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
+ AddBlocksToLoop(cu, loop->blocks, edge_bb, loop->header->id);
}
}
// Compute the nesting depth of each header
- GrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
loop; loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
GrowableListIterator iter2;
- GrowableListIteratorInit(&cUnit->loopHeaders, &iter2);
+ GrowableListIteratorInit(&cu->loop_headers, &iter2);
LoopInfo* loop2;
for (loop2 = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter2));
loop2; loop2 = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter2))) {
if (IsBitSet(loop2->blocks, loop->header->id)) {
- loop->header->nestingDepth++;
+ loop->header->nesting_depth++;
}
}
}
// Assign nesting depth to each block in all loops
- GrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
(loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
- ArenaBitVectorIterator bIter;
- BitVectorIteratorInit(loop->blocks, &bIter);
- for (int bbId = BitVectorIteratorNext(&bIter); bbId != -1;
- bbId = BitVectorIteratorNext(&bIter)) {
+ ArenaBitVectorIterator b_iter;
+ BitVectorIteratorInit(loop->blocks, &b_iter);
+ for (int bb_id = BitVectorIteratorNext(&b_iter); bb_id != -1;
+ bb_id = BitVectorIteratorNext(&b_iter)) {
BasicBlock *bb;
- bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cUnit->blockList, bbId));
- bb->nestingDepth = std::max(bb->nestingDepth,
- loop->header->nestingDepth);
+ bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, bb_id));
+ bb->nesting_depth = std::max(bb->nesting_depth,
+ loop->header->nesting_depth);
}
}
- if (cUnit->printMe) {
- DumpLoops(cUnit);
+ if (cu->verbose) {
+ DumpLoops(cu);
}
}
@@ -2357,7 +2357,7 @@
* and attempting to do would involve more complexity than it's
* worth.
*/
-static bool InvokeUsesMethodStar(CompilationUnit* cUnit, MIR* mir)
+static bool InvokeUsesMethodStar(CompilationUnit* cu, MIR* mir)
{
InvokeType type;
Instruction::Code opcode = mir->dalvikInsn.opcode;
@@ -2385,63 +2385,63 @@
LOG(WARNING) << "Unexpected invoke op: " << opcode;
return false;
}
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file,
+ cu->code_item, cu->method_idx,
+ cu->access_flags);
// TODO: add a flag so we don't counts the stats for this twice
- uint32_t dexMethodIdx = mir->dalvikInsn.vB;
- int vtableIdx;
- uintptr_t directCode;
- uintptr_t directMethod;
- bool fastPath =
- cUnit->compiler->ComputeInvokeInfo(dexMethodIdx, &mUnit, type,
- vtableIdx, directCode,
- directMethod) &&
+ uint32_t dex_method_idx = mir->dalvikInsn.vB;
+ int vtable_idx;
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ bool fast_path =
+ cu->compiler->ComputeInvokeInfo(dex_method_idx, &m_unit, type,
+ vtable_idx, direct_code,
+ direct_method) &&
!SLOW_INVOKE_PATH;
return (((type == kDirect) || (type == kStatic)) &&
- fastPath && ((directCode == 0) || (directMethod == 0)));
+ fast_path && ((direct_code == 0) || (direct_method == 0)));
}
/*
* Count uses, weighting by loop nesting depth. This code only
- * counts explicitly used sRegs. A later phase will add implicit
+ * counts explicitly used s_regs. A later phase will add implicit
* counts for things such as Method*, null-checked references, etc.
*/
-static bool CountUses(struct CompilationUnit* cUnit, struct BasicBlock* bb)
+static bool CountUses(struct CompilationUnit* cu, struct BasicBlock* bb)
{
- if (bb->blockType != kDalvikByteCode) {
+ if (bb->block_type != kDalvikByteCode) {
return false;
}
- for (MIR* mir = bb->firstMIRInsn; (mir != NULL); mir = mir->next) {
- if (mir->ssaRep == NULL) {
+ for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
continue;
}
- uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nestingDepth));
- for (int i = 0; i < mir->ssaRep->numUses; i++) {
- int sReg = mir->ssaRep->uses[i];
- DCHECK_LT(sReg, static_cast<int>(cUnit->useCounts.numUsed));
- cUnit->rawUseCounts.elemList[sReg]++;
- cUnit->useCounts.elemList[sReg] += (1 << weight);
+ uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nesting_depth));
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ int s_reg = mir->ssa_rep->uses[i];
+ DCHECK_LT(s_reg, static_cast<int>(cu->use_counts.num_used));
+ cu->raw_use_counts.elem_list[s_reg]++;
+ cu->use_counts.elem_list[s_reg] += (1 << weight);
}
- if (!(cUnit->disableOpt & (1 << kPromoteCompilerTemps))) {
- int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ if (!(cu->disable_opt & (1 << kPromoteCompilerTemps))) {
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
// Implicit use of Method* ? */
- if (dfAttributes & DF_UMS) {
+ if (df_attributes & DF_UMS) {
/*
* Some invokes will not use Method* - need to perform test similar
* to that found in GenInvoke() to decide whether to count refs
* for Method* on invoke-class opcodes.
* TODO: refactor for common test here, save results for GenInvoke
*/
- int usesMethodStar = true;
- if ((dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
- !(dfAttributes & DF_NON_NULL_RET)) {
- usesMethodStar &= InvokeUsesMethodStar(cUnit, mir);
+ int uses_method_star = true;
+ if ((df_attributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
+ !(df_attributes & DF_NON_NULL_RET)) {
+ uses_method_star &= InvokeUsesMethodStar(cu, mir);
}
- if (usesMethodStar) {
- cUnit->rawUseCounts.elemList[cUnit->methodSReg]++;
- cUnit->useCounts.elemList[cUnit->methodSReg] += (1 << weight);
+ if (uses_method_star) {
+ cu->raw_use_counts.elem_list[cu->method_sreg]++;
+ cu->use_counts.elem_list[cu->method_sreg] += (1 << weight);
}
}
}
@@ -2449,20 +2449,20 @@
return false;
}
-void MethodUseCount(CompilationUnit *cUnit)
+void MethodUseCount(CompilationUnit *cu)
{
- CompilerInitGrowableList(cUnit, &cUnit->useCounts, cUnit->numSSARegs + 32, kListMisc);
- CompilerInitGrowableList(cUnit, &cUnit->rawUseCounts, cUnit->numSSARegs + 32, kListMisc);
+ CompilerInitGrowableList(cu, &cu->use_counts, cu->num_ssa_regs + 32, kListMisc);
+ CompilerInitGrowableList(cu, &cu->raw_use_counts, cu->num_ssa_regs + 32, kListMisc);
// Initialize list
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- InsertGrowableList(cUnit, &cUnit->useCounts, 0);
- InsertGrowableList(cUnit, &cUnit->rawUseCounts, 0);
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ InsertGrowableList(cu, &cu->use_counts, 0);
+ InsertGrowableList(cu, &cu->raw_use_counts, 0);
}
- if (cUnit->disableOpt & (1 << kPromoteRegs)) {
+ if (cu->disable_opt & (1 << kPromoteRegs)) {
return;
}
- DataFlowAnalysisDispatcher(cUnit, CountUses,
- kAllNodes, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, CountUses,
+ kAllNodes, false /* is_iterative */);
}
} // namespace art
diff --git a/src/compiler/dataflow.h b/src/compiler/dataflow.h
index 35259ed..00e6487 100644
--- a/src/compiler/dataflow.h
+++ b/src/compiler/dataflow.h
@@ -112,24 +112,24 @@
#define DF_IS_GETTER_OR_SETTER (DF_IS_GETTER | DF_IS_SETTER)
#define DF_USES_FP (DF_FP_A | DF_FP_B | DF_FP_C)
-extern const int oatDataFlowAttributes[kMirOpLast];
+extern const int oat_data_flow_attributes[kMirOpLast];
struct BasicBlockDataFlow {
- ArenaBitVector* useV;
- ArenaBitVector* defV;
- ArenaBitVector* liveInV;
- ArenaBitVector* phiV;
- int* vRegToSSAMap;
- ArenaBitVector* endingNullCheckV;
+ ArenaBitVector* use_v;
+ ArenaBitVector* def_v;
+ ArenaBitVector* live_in_v;
+ ArenaBitVector* phi_v;
+ int* vreg_to_ssa_map;
+ ArenaBitVector* ending_null_check_v;
};
struct SSARepresentation {
- int numUses;
+ int num_uses;
int* uses;
- bool* fpUse;
- int numDefs;
+ bool* fp_use;
+ int num_defs;
int* defs;
- bool* fpDef;
+ bool* fp_def;
};
/*
@@ -137,44 +137,44 @@
* induction variable.
*/
struct InductionVariableInfo {
- int ssaReg;
- int basicSSAReg;
+ int ssa_reg;
+ int basic_ssa_reg;
int m; // multiplier
int c; // constant
int inc; // loop increment
};
struct ArrayAccessInfo {
- int arrayReg;
- int ivReg;
- int maxC; // For DIV - will affect upper bound checking
- int minC; // For DIV - will affect lower bound checking
+ int array_reg;
+ int iv_reg;
+ int max_c; // For DIV - will affect upper bound checking
+ int min_c; // For DIV - will affect lower bound checking
};
struct LoopInfo {
BasicBlock* header;
- GrowableList incomingBackEdges;
+ GrowableList incoming_back_edges;
ArenaBitVector* blocks;
};
-int SRegToVReg(const CompilationUnit* cUnit, int ssaReg);
-char* GetDalvikDisassembly(CompilationUnit* cUnit, const DecodedInstruction& insn, const char* note);
-char* FullDisassembler(CompilationUnit* cUnit, const MIR* mir);
-char* GetSSAString(CompilationUnit* cUnit, SSARepresentation* ssaRep);
-bool FindLocalLiveIn(CompilationUnit* cUnit, BasicBlock* bb);
-bool DoSSAConversion(CompilationUnit* cUnit, BasicBlock* bb);
-bool DoConstantPropogation(CompilationUnit* cUnit, BasicBlock* bb);
-void CompilerInitializeSSAConversion(CompilationUnit* cUnit);
-bool ClearVisitedFlag(struct CompilationUnit* cUnit, struct BasicBlock* bb);
-void DataFlowAnalysisDispatcher(CompilationUnit* cUnit, bool (*func)(CompilationUnit*, BasicBlock*), DataFlowAnalysisMode dfaMode, bool isIterative);
-MIR* FindMoveResult(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir);
-void NullCheckElimination(CompilationUnit *cUnit);
-void BasicBlockCombine(CompilationUnit* cUnit);
-void CodeLayout(CompilationUnit* cUnit);
-void DumpCheckStats(CompilationUnit *cUnit);
-void BasicBlockOptimization(CompilationUnit *cUnit);
-void LoopDetection(CompilationUnit *cUnit);
-void MethodUseCount(CompilationUnit *cUnit);
+int SRegToVReg(const CompilationUnit* cu, int ssa_reg);
+char* GetDalvikDisassembly(CompilationUnit* cu, const DecodedInstruction& insn, const char* note);
+char* FullDisassembler(CompilationUnit* cu, const MIR* mir);
+char* GetSSAString(CompilationUnit* cu, SSARepresentation* ssa_rep);
+bool FindLocalLiveIn(CompilationUnit* cu, BasicBlock* bb);
+bool DoSSAConversion(CompilationUnit* cu, BasicBlock* bb);
+bool DoConstantPropogation(CompilationUnit* cu, BasicBlock* bb);
+void CompilerInitializeSSAConversion(CompilationUnit* cu);
+bool ClearVisitedFlag(struct CompilationUnit* cu, struct BasicBlock* bb);
+void DataFlowAnalysisDispatcher(CompilationUnit* cu, bool (*func)(CompilationUnit*, BasicBlock*), DataFlowAnalysisMode dfa_mode, bool is_iterative);
+MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+void NullCheckElimination(CompilationUnit *cu);
+void BasicBlockCombine(CompilationUnit* cu);
+void CodeLayout(CompilationUnit* cu);
+void DumpCheckStats(CompilationUnit *cu);
+void BasicBlockOptimization(CompilationUnit *cu);
+void LoopDetection(CompilationUnit *cu);
+void MethodUseCount(CompilationUnit *cu);
} // namespace art
diff --git a/src/compiler/frontend.cc b/src/compiler/frontend.cc
index 5a17b02..c05130b 100644
--- a/src/compiler/frontend.cc
+++ b/src/compiler/frontend.cc
@@ -55,8 +55,8 @@
extern "C" void ArtInitQuickCompilerContext(art::Compiler& compiler) {
CHECK(compiler.GetCompilerContext() == NULL);
- LLVMInfo* llvmInfo = new LLVMInfo();
- compiler.SetCompilerContext(llvmInfo);
+ LLVMInfo* llvm_info = new LLVMInfo();
+ compiler.SetCompilerContext(llvm_info);
}
extern "C" void ArtUnInitQuickCompilerContext(art::Compiler& compiler) {
@@ -98,8 +98,8 @@
//(1 << kDebugVerifyBitcode) |
0;
-static bool ContentIsInsn(const uint16_t* codePtr) {
- uint16_t instr = *codePtr;
+static bool ContentIsInsn(const uint16_t* code_ptr) {
+ uint16_t instr = *code_ptr;
Instruction::Code opcode = static_cast<Instruction::Code>(instr & 0xff);
/*
@@ -112,22 +112,22 @@
/*
* Parse an instruction, return the length of the instruction
*/
-static int ParseInsn(CompilationUnit* cUnit, const uint16_t* codePtr,
- DecodedInstruction* decoded_instruction, bool printMe)
+static int ParseInsn(CompilationUnit* cu, const uint16_t* code_ptr,
+ DecodedInstruction* decoded_instruction, bool verbose)
{
// Don't parse instruction data
- if (!ContentIsInsn(codePtr)) {
+ if (!ContentIsInsn(code_ptr)) {
return 0;
}
- const Instruction* instruction = Instruction::At(codePtr);
+ const Instruction* instruction = Instruction::At(code_ptr);
*decoded_instruction = DecodedInstruction(instruction);
- if (printMe) {
- char* decodedString = GetDalvikDisassembly(cUnit, *decoded_instruction,
+ if (verbose) {
+ char* decoded_string = GetDalvikDisassembly(cu, *decoded_instruction,
NULL);
- LOG(INFO) << codePtr << ": 0x" << std::hex << static_cast<int>(decoded_instruction->opcode)
- << " " << decodedString;
+ LOG(INFO) << code_ptr << ": 0x" << std::hex << static_cast<int>(decoded_instruction->opcode)
+ << " " << decoded_string;
}
return instruction->SizeInCodeUnits();
}
@@ -135,68 +135,68 @@
#define UNKNOWN_TARGET 0xffffffff
/* Split an existing block from the specified code offset into two */
-static BasicBlock *SplitBlock(CompilationUnit* cUnit, unsigned int codeOffset,
- BasicBlock* origBlock, BasicBlock** immedPredBlockP)
+static BasicBlock *SplitBlock(CompilationUnit* cu, unsigned int code_offset,
+ BasicBlock* orig_block, BasicBlock** immed_pred_block_p)
{
- MIR* insn = origBlock->firstMIRInsn;
+ MIR* insn = orig_block->first_mir_insn;
while (insn) {
- if (insn->offset == codeOffset) break;
+ if (insn->offset == code_offset) break;
insn = insn->next;
}
if (insn == NULL) {
LOG(FATAL) << "Break split failed";
}
- BasicBlock *bottomBlock = NewMemBB(cUnit, kDalvikByteCode,
- cUnit->numBlocks++);
- InsertGrowableList(cUnit, &cUnit->blockList, reinterpret_cast<uintptr_t>(bottomBlock));
+ BasicBlock *bottom_block = NewMemBB(cu, kDalvikByteCode,
+ cu->num_blocks++);
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(bottom_block));
- bottomBlock->startOffset = codeOffset;
- bottomBlock->firstMIRInsn = insn;
- bottomBlock->lastMIRInsn = origBlock->lastMIRInsn;
+ bottom_block->start_offset = code_offset;
+ bottom_block->first_mir_insn = insn;
+ bottom_block->last_mir_insn = orig_block->last_mir_insn;
/* Add it to the quick lookup cache */
- cUnit->blockMap.Put(bottomBlock->startOffset, bottomBlock);
+ cu->block_map.Put(bottom_block->start_offset, bottom_block);
/* Handle the taken path */
- bottomBlock->taken = origBlock->taken;
- if (bottomBlock->taken) {
- origBlock->taken = NULL;
- DeleteGrowableList(bottomBlock->taken->predecessors, reinterpret_cast<uintptr_t>(origBlock));
- InsertGrowableList(cUnit, bottomBlock->taken->predecessors,
- reinterpret_cast<uintptr_t>(bottomBlock));
+ bottom_block->taken = orig_block->taken;
+ if (bottom_block->taken) {
+ orig_block->taken = NULL;
+ DeleteGrowableList(bottom_block->taken->predecessors, reinterpret_cast<uintptr_t>(orig_block));
+ InsertGrowableList(cu, bottom_block->taken->predecessors,
+ reinterpret_cast<uintptr_t>(bottom_block));
}
/* Handle the fallthrough path */
- bottomBlock->fallThrough = origBlock->fallThrough;
- origBlock->fallThrough = bottomBlock;
- InsertGrowableList(cUnit, bottomBlock->predecessors,
- reinterpret_cast<uintptr_t>(origBlock));
- if (bottomBlock->fallThrough) {
- DeleteGrowableList(bottomBlock->fallThrough->predecessors,
- reinterpret_cast<uintptr_t>(origBlock));
- InsertGrowableList(cUnit, bottomBlock->fallThrough->predecessors,
- reinterpret_cast<uintptr_t>(bottomBlock));
+ bottom_block->fall_through = orig_block->fall_through;
+ orig_block->fall_through = bottom_block;
+ InsertGrowableList(cu, bottom_block->predecessors,
+ reinterpret_cast<uintptr_t>(orig_block));
+ if (bottom_block->fall_through) {
+ DeleteGrowableList(bottom_block->fall_through->predecessors,
+ reinterpret_cast<uintptr_t>(orig_block));
+ InsertGrowableList(cu, bottom_block->fall_through->predecessors,
+ reinterpret_cast<uintptr_t>(bottom_block));
}
/* Handle the successor list */
- if (origBlock->successorBlockList.blockListType != kNotUsed) {
- bottomBlock->successorBlockList = origBlock->successorBlockList;
- origBlock->successorBlockList.blockListType = kNotUsed;
+ if (orig_block->successor_block_list.block_list_type != kNotUsed) {
+ bottom_block->successor_block_list = orig_block->successor_block_list;
+ orig_block->successor_block_list.block_list_type = kNotUsed;
GrowableListIterator iterator;
- GrowableListIteratorInit(&bottomBlock->successorBlockList.blocks,
+ GrowableListIteratorInit(&bottom_block->successor_block_list.blocks,
&iterator);
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
- if (successorBlockInfo == NULL) break;
- BasicBlock *bb = successorBlockInfo->block;
- DeleteGrowableList(bb->predecessors, reinterpret_cast<uintptr_t>(origBlock));
- InsertGrowableList(cUnit, bb->predecessors, reinterpret_cast<uintptr_t>(bottomBlock));
+ if (successor_block_info == NULL) break;
+ BasicBlock *bb = successor_block_info->block;
+ DeleteGrowableList(bb->predecessors, reinterpret_cast<uintptr_t>(orig_block));
+ InsertGrowableList(cu, bb->predecessors, reinterpret_cast<uintptr_t>(bottom_block));
}
}
- origBlock->lastMIRInsn = insn->prev;
+ orig_block->last_mir_insn = insn->prev;
insn->prev->next = NULL;
insn->prev = NULL;
@@ -204,63 +204,63 @@
* Update the immediate predecessor block pointer so that outgoing edges
* can be applied to the proper block.
*/
- if (immedPredBlockP) {
- DCHECK_EQ(*immedPredBlockP, origBlock);
- *immedPredBlockP = bottomBlock;
+ if (immed_pred_block_p) {
+ DCHECK_EQ(*immed_pred_block_p, orig_block);
+ *immed_pred_block_p = bottom_block;
}
- return bottomBlock;
+ return bottom_block;
}
/*
* Given a code offset, find out the block that starts with it. If the offset
- * is in the middle of an existing block, split it into two. If immedPredBlockP
- * is not non-null and is the block being split, update *immedPredBlockP to
+ * is in the middle of an existing block, split it into two. If immed_pred_block_p
+ * is not non-null and is the block being split, update *immed_pred_block_p to
* point to the bottom block so that outgoing edges can be set up properly
* (by the caller)
* Utilizes a map for fast lookup of the typical cases.
*/
-BasicBlock *FindBlock(CompilationUnit* cUnit, unsigned int codeOffset,
- bool split, bool create, BasicBlock** immedPredBlockP)
+BasicBlock *FindBlock(CompilationUnit* cu, unsigned int code_offset,
+ bool split, bool create, BasicBlock** immed_pred_block_p)
{
- GrowableList* blockList = &cUnit->blockList;
+ GrowableList* block_list = &cu->block_list;
BasicBlock* bb;
unsigned int i;
SafeMap<unsigned int, BasicBlock*>::iterator it;
- it = cUnit->blockMap.find(codeOffset);
- if (it != cUnit->blockMap.end()) {
+ it = cu->block_map.find(code_offset);
+ if (it != cu->block_map.end()) {
return it->second;
} else if (!create) {
return NULL;
}
if (split) {
- for (i = 0; i < blockList->numUsed; i++) {
- bb = reinterpret_cast<BasicBlock*>(blockList->elemList[i]);
- if (bb->blockType != kDalvikByteCode) continue;
+ for (i = 0; i < block_list->num_used; i++) {
+ bb = reinterpret_cast<BasicBlock*>(block_list->elem_list[i]);
+ if (bb->block_type != kDalvikByteCode) continue;
/* Check if a branch jumps into the middle of an existing block */
- if ((codeOffset > bb->startOffset) && (bb->lastMIRInsn != NULL) &&
- (codeOffset <= bb->lastMIRInsn->offset)) {
- BasicBlock *newBB = SplitBlock(cUnit, codeOffset, bb,
- bb == *immedPredBlockP ?
- immedPredBlockP : NULL);
- return newBB;
+ if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
+ (code_offset <= bb->last_mir_insn->offset)) {
+ BasicBlock *new_bb = SplitBlock(cu, code_offset, bb,
+ bb == *immed_pred_block_p ?
+ immed_pred_block_p : NULL);
+ return new_bb;
}
}
}
/* Create a new one */
- bb = NewMemBB(cUnit, kDalvikByteCode, cUnit->numBlocks++);
- InsertGrowableList(cUnit, &cUnit->blockList, reinterpret_cast<uintptr_t>(bb));
- bb->startOffset = codeOffset;
- cUnit->blockMap.Put(bb->startOffset, bb);
+ bb = NewMemBB(cu, kDalvikByteCode, cu->num_blocks++);
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(bb));
+ bb->start_offset = code_offset;
+ cu->block_map.Put(bb->start_offset, bb);
return bb;
}
/* Find existing block */
-BasicBlock* FindBlock(CompilationUnit* cUnit, unsigned int codeOffset)
+BasicBlock* FindBlock(CompilationUnit* cu, unsigned int code_offset)
{
- return FindBlock(cUnit, codeOffset, false, false, NULL);
+ return FindBlock(cu, code_offset, false, false, NULL);
}
/* Turn method name into a legal Linux file name */
@@ -275,13 +275,13 @@
}
/* Dump the CFG into a DOT graph */
-void DumpCFG(CompilationUnit* cUnit, const char* dirPrefix)
+void DumpCFG(CompilationUnit* cu, const char* dir_prefix)
{
FILE* file;
- std::string fname(PrettyMethod(cUnit->method_idx, *cUnit->dex_file));
+ std::string fname(PrettyMethod(cu->method_idx, *cu->dex_file));
ReplaceSpecialChars(fname);
- fname = StringPrintf("%s%s%x.dot", dirPrefix, fname.c_str(),
- cUnit->entryBlock->fallThrough->startOffset);
+ fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
+ cu->entry_block->fall_through->start_offset);
file = fopen(fname.c_str(), "w");
if (file == NULL) {
return;
@@ -290,115 +290,115 @@
fprintf(file, " rankdir=TB\n");
- int numReachableBlocks = cUnit->numReachableBlocks;
+ int num_reachable_blocks = cu->num_reachable_blocks;
int idx;
- const GrowableList *blockList = &cUnit->blockList;
+ const GrowableList *block_list = &cu->block_list;
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int blockIdx = cUnit->dfsOrder.elemList[idx];
- BasicBlock *bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, blockIdx));
+ for (idx = 0; idx < num_reachable_blocks; idx++) {
+ int block_idx = cu->dfs_order.elem_list[idx];
+ BasicBlock *bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, block_idx));
if (bb == NULL) break;
- if (bb->blockType == kDead) continue;
- if (bb->blockType == kEntryBlock) {
+ if (bb->block_type == kDead) continue;
+ if (bb->block_type == kEntryBlock) {
fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id);
- } else if (bb->blockType == kExitBlock) {
+ } else if (bb->block_type == kExitBlock) {
fprintf(file, " exit_%d [shape=Mdiamond];\n", bb->id);
- } else if (bb->blockType == kDalvikByteCode) {
+ } else if (bb->block_type == kDalvikByteCode) {
fprintf(file, " block%04x_%d [shape=record,label = \"{ \\\n",
- bb->startOffset, bb->id);
+ bb->start_offset, bb->id);
const MIR *mir;
fprintf(file, " {block id %d\\l}%s\\\n", bb->id,
- bb->firstMIRInsn ? " | " : " ");
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ bb->first_mir_insn ? " | " : " ");
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
fprintf(file, " {%04x %s\\l}%s\\\n", mir->offset,
- mir->ssaRep ? FullDisassembler(cUnit, mir) :
+ mir->ssa_rep ? FullDisassembler(cu, mir) :
Instruction::Name(mir->dalvikInsn.opcode),
mir->next ? " | " : " ");
}
fprintf(file, " }\"];\n\n");
- } else if (bb->blockType == kExceptionHandling) {
- char blockName[BLOCK_NAME_LEN];
+ } else if (bb->block_type == kExceptionHandling) {
+ char block_name[BLOCK_NAME_LEN];
- GetBlockName(bb, blockName);
- fprintf(file, " %s [shape=invhouse];\n", blockName);
+ GetBlockName(bb, block_name);
+ fprintf(file, " %s [shape=invhouse];\n", block_name);
}
- char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
+ char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
if (bb->taken) {
- GetBlockName(bb, blockName1);
- GetBlockName(bb->taken, blockName2);
+ GetBlockName(bb, block_name1);
+ GetBlockName(bb->taken, block_name2);
fprintf(file, " %s:s -> %s:n [style=dotted]\n",
- blockName1, blockName2);
+ block_name1, block_name2);
}
- if (bb->fallThrough) {
- GetBlockName(bb, blockName1);
- GetBlockName(bb->fallThrough, blockName2);
- fprintf(file, " %s:s -> %s:n\n", blockName1, blockName2);
+ if (bb->fall_through) {
+ GetBlockName(bb, block_name1);
+ GetBlockName(bb->fall_through, block_name2);
+ fprintf(file, " %s:s -> %s:n\n", block_name1, block_name2);
}
- if (bb->successorBlockList.blockListType != kNotUsed) {
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n",
- bb->startOffset, bb->id,
- (bb->successorBlockList.blockListType == kCatch) ?
+ bb->start_offset, bb->id,
+ (bb->successor_block_list.block_list_type == kCatch) ?
"Mrecord" : "record");
GrowableListIterator iterator;
- GrowableListIteratorInit(&bb->successorBlockList.blocks,
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
&iterator);
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
- int succId = 0;
+ int succ_id = 0;
while (true) {
- if (successorBlockInfo == NULL) break;
+ if (successor_block_info == NULL) break;
- BasicBlock *destBlock = successorBlockInfo->block;
- SuccessorBlockInfo *nextSuccessorBlockInfo =
+ BasicBlock *dest_block = successor_block_info->block;
+ SuccessorBlockInfo *next_successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n",
- succId++,
- successorBlockInfo->key,
- destBlock->startOffset,
- (nextSuccessorBlockInfo != NULL) ? " | " : " ");
+ succ_id++,
+ successor_block_info->key,
+ dest_block->start_offset,
+ (next_successor_block_info != NULL) ? " | " : " ");
- successorBlockInfo = nextSuccessorBlockInfo;
+ successor_block_info = next_successor_block_info;
}
fprintf(file, " }\"];\n\n");
- GetBlockName(bb, blockName1);
+ GetBlockName(bb, block_name1);
fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n",
- blockName1, bb->startOffset, bb->id);
+ block_name1, bb->start_offset, bb->id);
- if (bb->successorBlockList.blockListType == kPackedSwitch ||
- bb->successorBlockList.blockListType == kSparseSwitch) {
+ if (bb->successor_block_list.block_list_type == kPackedSwitch ||
+ bb->successor_block_list.block_list_type == kSparseSwitch) {
- GrowableListIteratorInit(&bb->successorBlockList.blocks,
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
&iterator);
- succId = 0;
+ succ_id = 0;
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>( GrowableListIteratorNext(&iterator));
- if (successorBlockInfo == NULL) break;
+ if (successor_block_info == NULL) break;
- BasicBlock *destBlock = successorBlockInfo->block;
+ BasicBlock *dest_block = successor_block_info->block;
- GetBlockName(destBlock, blockName2);
- fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->startOffset,
- bb->id, succId++, blockName2);
+ GetBlockName(dest_block, block_name2);
+ fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
+ bb->id, succ_id++, block_name2);
}
}
}
fprintf(file, "\n");
/* Display the dominator tree */
- GetBlockName(bb, blockName1);
+ GetBlockName(bb, block_name1);
fprintf(file, " cfg%s [label=\"%s\", shape=none];\n",
- blockName1, blockName1);
- if (bb->iDom) {
- GetBlockName(bb->iDom, blockName2);
- fprintf(file, " cfg%s:s -> cfg%s:n\n\n", blockName2, blockName1);
+ block_name1, block_name1);
+ if (bb->i_dom) {
+ GetBlockName(bb->i_dom, block_name2);
+ fprintf(file, " cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
}
}
fprintf(file, "}\n");
@@ -406,66 +406,66 @@
}
/* Verify if all the successor is connected with all the claimed predecessors */
-static bool VerifyPredInfo(CompilationUnit* cUnit, BasicBlock* bb)
+static bool VerifyPredInfo(CompilationUnit* cu, BasicBlock* bb)
{
GrowableListIterator iter;
GrowableListIteratorInit(bb->predecessors, &iter);
while (true) {
- BasicBlock *predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
- if (!predBB) break;
+ BasicBlock *pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
bool found = false;
- if (predBB->taken == bb) {
+ if (pred_bb->taken == bb) {
found = true;
- } else if (predBB->fallThrough == bb) {
+ } else if (pred_bb->fall_through == bb) {
found = true;
- } else if (predBB->successorBlockList.blockListType != kNotUsed) {
+ } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
GrowableListIterator iterator;
- GrowableListIteratorInit(&predBB->successorBlockList.blocks,
+ GrowableListIteratorInit(&pred_bb->successor_block_list.blocks,
&iterator);
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
- if (successorBlockInfo == NULL) break;
- BasicBlock *succBB = successorBlockInfo->block;
- if (succBB == bb) {
+ if (successor_block_info == NULL) break;
+ BasicBlock *succ_bb = successor_block_info->block;
+ if (succ_bb == bb) {
found = true;
break;
}
}
}
if (found == false) {
- char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
- GetBlockName(bb, blockName1);
- GetBlockName(predBB, blockName2);
- DumpCFG(cUnit, "/sdcard/cfg/");
- LOG(FATAL) << "Successor " << blockName1 << "not found from "
- << blockName2;
+ char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
+ GetBlockName(bb, block_name1);
+ GetBlockName(pred_bb, block_name2);
+ DumpCFG(cu, "/sdcard/cfg/");
+ LOG(FATAL) << "Successor " << block_name1 << "not found from "
+ << block_name2;
}
}
return true;
}
/* Identify code range in try blocks and set up the empty catch blocks */
-static void ProcessTryCatchBlocks(CompilationUnit* cUnit)
+static void ProcessTryCatchBlocks(CompilationUnit* cu)
{
- const DexFile::CodeItem* code_item = cUnit->code_item;
- int triesSize = code_item->tries_size_;
+ const DexFile::CodeItem* code_item = cu->code_item;
+ int tries_size = code_item->tries_size_;
int offset;
- if (triesSize == 0) {
+ if (tries_size == 0) {
return;
}
- ArenaBitVector* tryBlockAddr = cUnit->tryBlockAddr;
+ ArenaBitVector* try_block_addr = cu->try_block_addr;
- for (int i = 0; i < triesSize; i++) {
+ for (int i = 0; i < tries_size; i++) {
const DexFile::TryItem* pTry =
DexFile::GetTryItems(*code_item, i);
- int startOffset = pTry->start_addr_;
- int endOffset = startOffset + pTry->insn_count_;
- for (offset = startOffset; offset < endOffset; offset++) {
- SetBit(cUnit, tryBlockAddr, offset);
+ int start_offset = pTry->start_addr_;
+ int end_offset = start_offset + pTry->insn_count_;
+ for (offset = start_offset; offset < end_offset; offset++) {
+ SetBit(cu, try_block_addr, offset);
}
}
@@ -476,19 +476,19 @@
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- FindBlock(cUnit, address, false /* split */, true /*create*/,
- /* immedPredBlockP */ NULL);
+ FindBlock(cu, address, false /* split */, true /*create*/,
+ /* immed_pred_block_p */ NULL);
}
handlers_ptr = iterator.EndDataPointer();
}
}
/* Process instructions with the kBranch flag */
-static BasicBlock* ProcessCanBranch(CompilationUnit* cUnit, BasicBlock* curBlock,
- MIR* insn, int curOffset, int width, int flags,
- const uint16_t* codePtr, const uint16_t* codeEnd)
+static BasicBlock* ProcessCanBranch(CompilationUnit* cu, BasicBlock* cur_block,
+ MIR* insn, int cur_offset, int width, int flags,
+ const uint16_t* code_ptr, const uint16_t* code_end)
{
- int target = curOffset;
+ int target = cur_offset;
switch (insn->dalvikInsn.opcode) {
case Instruction::GOTO:
case Instruction::GOTO_16:
@@ -501,7 +501,7 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE:
- curBlock->conditionalBranch = true;
+ cur_block->conditional_branch = true;
target += insn->dalvikInsn.vC;
break;
case Instruction::IF_EQZ:
@@ -510,26 +510,26 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ:
- curBlock->conditionalBranch = true;
+ cur_block->conditional_branch = true;
target += insn->dalvikInsn.vB;
break;
default:
LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
}
- BasicBlock *takenBlock = FindBlock(cUnit, target,
+ BasicBlock *taken_block = FindBlock(cu, target,
/* split */
true,
/* create */
true,
- /* immedPredBlockP */
- &curBlock);
- curBlock->taken = takenBlock;
- InsertGrowableList(cUnit, takenBlock->predecessors, reinterpret_cast<uintptr_t>(curBlock));
+ /* immed_pred_block_p */
+ &cur_block);
+ cur_block->taken = taken_block;
+ InsertGrowableList(cu, taken_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
- BasicBlock *fallthroughBlock = FindBlock(cUnit,
- curOffset + width,
+ BasicBlock *fallthrough_block = FindBlock(cu,
+ cur_offset + width,
/*
* If the method is processed
* in sequential order from the
@@ -545,37 +545,37 @@
true,
/* create */
true,
- /* immedPredBlockP */
- &curBlock);
- curBlock->fallThrough = fallthroughBlock;
- InsertGrowableList(cUnit, fallthroughBlock->predecessors,
- reinterpret_cast<uintptr_t>(curBlock));
- } else if (codePtr < codeEnd) {
+ /* immed_pred_block_p */
+ &cur_block);
+ cur_block->fall_through = fallthrough_block;
+ InsertGrowableList(cu, fallthrough_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
+ } else if (code_ptr < code_end) {
/* Create a fallthrough block for real instructions (incl. NOP) */
- if (ContentIsInsn(codePtr)) {
- FindBlock(cUnit, curOffset + width,
+ if (ContentIsInsn(code_ptr)) {
+ FindBlock(cu, cur_offset + width,
/* split */
false,
/* create */
true,
- /* immedPredBlockP */
+ /* immed_pred_block_p */
NULL);
}
}
- return curBlock;
+ return cur_block;
}
/* Process instructions with the kSwitch flag */
-static void ProcessCanSwitch(CompilationUnit* cUnit, BasicBlock* curBlock,
- MIR* insn, int curOffset, int width, int flags)
+static void ProcessCanSwitch(CompilationUnit* cu, BasicBlock* cur_block,
+ MIR* insn, int cur_offset, int width, int flags)
{
- const uint16_t* switchData =
- reinterpret_cast<const uint16_t*>(cUnit->insns + curOffset + insn->dalvikInsn.vB);
+ const uint16_t* switch_data =
+ reinterpret_cast<const uint16_t*>(cu->insns + cur_offset + insn->dalvikInsn.vB);
int size;
const int* keyTable;
- const int* targetTable;
+ const int* target_table;
int i;
- int firstKey;
+ int first_key;
/*
* Packed switch data format:
@@ -587,11 +587,11 @@
* Total size is (4+size*2) 16-bit code units.
*/
if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
- DCHECK_EQ(static_cast<int>(switchData[0]),
+ DCHECK_EQ(static_cast<int>(switch_data[0]),
static_cast<int>(Instruction::kPackedSwitchSignature));
- size = switchData[1];
- firstKey = switchData[2] | (switchData[3] << 16);
- targetTable = reinterpret_cast<const int*>(&switchData[4]);
+ size = switch_data[1];
+ first_key = switch_data[2] | (switch_data[3] << 16);
+ target_table = reinterpret_cast<const int*>(&switch_data[4]);
keyTable = NULL; // Make the compiler happy
/*
* Sparse switch data format:
@@ -603,117 +603,117 @@
* Total size is (2+size*4) 16-bit code units.
*/
} else {
- DCHECK_EQ(static_cast<int>(switchData[0]),
+ DCHECK_EQ(static_cast<int>(switch_data[0]),
static_cast<int>(Instruction::kSparseSwitchSignature));
- size = switchData[1];
- keyTable = reinterpret_cast<const int*>(&switchData[2]);
- targetTable = reinterpret_cast<const int*>(&switchData[2 + size*2]);
- firstKey = 0; // To make the compiler happy
+ size = switch_data[1];
+ keyTable = reinterpret_cast<const int*>(&switch_data[2]);
+ target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
+ first_key = 0; // To make the compiler happy
}
- if (curBlock->successorBlockList.blockListType != kNotUsed) {
+ if (cur_block->successor_block_list.block_list_type != kNotUsed) {
LOG(FATAL) << "Successor block list already in use: "
- << static_cast<int>(curBlock->successorBlockList.blockListType);
+ << static_cast<int>(cur_block->successor_block_list.block_list_type);
}
- curBlock->successorBlockList.blockListType =
+ cur_block->successor_block_list.block_list_type =
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
kPackedSwitch : kSparseSwitch;
- CompilerInitGrowableList(cUnit, &curBlock->successorBlockList.blocks, size,
+ CompilerInitGrowableList(cu, &cur_block->successor_block_list.blocks, size,
kListSuccessorBlocks);
for (i = 0; i < size; i++) {
- BasicBlock *caseBlock = FindBlock(cUnit, curOffset + targetTable[i],
+ BasicBlock *case_block = FindBlock(cu, cur_offset + target_table[i],
/* split */
true,
/* create */
true,
- /* immedPredBlockP */
- &curBlock);
- SuccessorBlockInfo *successorBlockInfo =
- static_cast<SuccessorBlockInfo*>(NewMem(cUnit, sizeof(SuccessorBlockInfo),
+ /* immed_pred_block_p */
+ &cur_block);
+ SuccessorBlockInfo *successor_block_info =
+ static_cast<SuccessorBlockInfo*>(NewMem(cu, sizeof(SuccessorBlockInfo),
false, kAllocSuccessor));
- successorBlockInfo->block = caseBlock;
- successorBlockInfo->key =
+ successor_block_info->block = case_block;
+ successor_block_info->key =
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
- firstKey + i : keyTable[i];
- InsertGrowableList(cUnit, &curBlock->successorBlockList.blocks,
- reinterpret_cast<uintptr_t>(successorBlockInfo));
- InsertGrowableList(cUnit, caseBlock->predecessors,
- reinterpret_cast<uintptr_t>(curBlock));
+ first_key + i : keyTable[i];
+ InsertGrowableList(cu, &cur_block->successor_block_list.blocks,
+ reinterpret_cast<uintptr_t>(successor_block_info));
+ InsertGrowableList(cu, case_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
}
/* Fall-through case */
- BasicBlock* fallthroughBlock = FindBlock(cUnit,
- curOffset + width,
+ BasicBlock* fallthrough_block = FindBlock(cu,
+ cur_offset + width,
/* split */
false,
/* create */
true,
- /* immedPredBlockP */
+ /* immed_pred_block_p */
NULL);
- curBlock->fallThrough = fallthroughBlock;
- InsertGrowableList(cUnit, fallthroughBlock->predecessors,
- reinterpret_cast<uintptr_t>(curBlock));
+ cur_block->fall_through = fallthrough_block;
+ InsertGrowableList(cu, fallthrough_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
}
/* Process instructions with the kThrow flag */
-static BasicBlock* ProcessCanThrow(CompilationUnit* cUnit, BasicBlock* curBlock,
- MIR* insn, int curOffset, int width, int flags,
- ArenaBitVector* tryBlockAddr, const uint16_t* codePtr,
- const uint16_t* codeEnd)
+static BasicBlock* ProcessCanThrow(CompilationUnit* cu, BasicBlock* cur_block,
+ MIR* insn, int cur_offset, int width, int flags,
+ ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
+ const uint16_t* code_end)
{
- const DexFile::CodeItem* code_item = cUnit->code_item;
- bool inTryBlock = IsBitSet(tryBlockAddr, curOffset);
+ const DexFile::CodeItem* code_item = cu->code_item;
+ bool in_try_block = IsBitSet(try_block_addr, cur_offset);
/* In try block */
- if (inTryBlock) {
- CatchHandlerIterator iterator(*code_item, curOffset);
+ if (in_try_block) {
+ CatchHandlerIterator iterator(*code_item, cur_offset);
- if (curBlock->successorBlockList.blockListType != kNotUsed) {
- LOG(INFO) << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+ LOG(INFO) << PrettyMethod(cu->method_idx, *cu->dex_file);
LOG(FATAL) << "Successor block list already in use: "
- << static_cast<int>(curBlock->successorBlockList.blockListType);
+ << static_cast<int>(cur_block->successor_block_list.block_list_type);
}
- curBlock->successorBlockList.blockListType = kCatch;
- CompilerInitGrowableList(cUnit, &curBlock->successorBlockList.blocks, 2,
+ cur_block->successor_block_list.block_list_type = kCatch;
+ CompilerInitGrowableList(cu, &cur_block->successor_block_list.blocks, 2,
kListSuccessorBlocks);
for (;iterator.HasNext(); iterator.Next()) {
- BasicBlock *catchBlock = FindBlock(cUnit, iterator.GetHandlerAddress(),
+ BasicBlock *catch_block = FindBlock(cu, iterator.GetHandlerAddress(),
false /* split*/,
false /* creat */,
- NULL /* immedPredBlockP */);
- catchBlock->catchEntry = true;
- cUnit->catches.insert(catchBlock->startOffset);
- SuccessorBlockInfo *successorBlockInfo = reinterpret_cast<SuccessorBlockInfo*>
- (NewMem(cUnit, sizeof(SuccessorBlockInfo), false, kAllocSuccessor));
- successorBlockInfo->block = catchBlock;
- successorBlockInfo->key = iterator.GetHandlerTypeIndex();
- InsertGrowableList(cUnit, &curBlock->successorBlockList.blocks,
- reinterpret_cast<uintptr_t>(successorBlockInfo));
- InsertGrowableList(cUnit, catchBlock->predecessors,
- reinterpret_cast<uintptr_t>(curBlock));
+ NULL /* immed_pred_block_p */);
+ catch_block->catch_entry = true;
+ cu->catches.insert(catch_block->start_offset);
+ SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+ (NewMem(cu, sizeof(SuccessorBlockInfo), false, kAllocSuccessor));
+ successor_block_info->block = catch_block;
+ successor_block_info->key = iterator.GetHandlerTypeIndex();
+ InsertGrowableList(cu, &cur_block->successor_block_list.blocks,
+ reinterpret_cast<uintptr_t>(successor_block_info));
+ InsertGrowableList(cu, catch_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
}
} else {
- BasicBlock *ehBlock = NewMemBB(cUnit, kExceptionHandling,
- cUnit->numBlocks++);
- curBlock->taken = ehBlock;
- InsertGrowableList(cUnit, &cUnit->blockList, reinterpret_cast<uintptr_t>(ehBlock));
- ehBlock->startOffset = curOffset;
- InsertGrowableList(cUnit, ehBlock->predecessors, reinterpret_cast<uintptr_t>(curBlock));
+ BasicBlock *eh_block = NewMemBB(cu, kExceptionHandling,
+ cu->num_blocks++);
+ cur_block->taken = eh_block;
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(eh_block));
+ eh_block->start_offset = cur_offset;
+ InsertGrowableList(cu, eh_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
}
if (insn->dalvikInsn.opcode == Instruction::THROW){
- curBlock->explicitThrow = true;
- if ((codePtr < codeEnd) && ContentIsInsn(codePtr)) {
+ cur_block->explicit_throw = true;
+ if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) {
// Force creation of new block following THROW via side-effect
- FindBlock(cUnit, curOffset + width, /* split */ false,
- /* create */ true, /* immedPredBlockP */ NULL);
+ FindBlock(cu, cur_offset + width, /* split */ false,
+ /* create */ true, /* immed_pred_block_p */ NULL);
}
- if (!inTryBlock) {
+ if (!in_try_block) {
// Don't split a THROW that can't rethrow - we're done.
- return curBlock;
+ return cur_block;
}
}
@@ -723,7 +723,7 @@
* edges and terminates the basic block. It always falls through.
* Then, create a new basic block that begins with the throwing instruction
* (minus exceptions). Note: this new basic block must NOT be entered into
- * the blockMap. If the potentially-throwing instruction is the target of a
+ * the block_map. If the potentially-throwing instruction is the target of a
* future branch, we need to find the check psuedo half. The new
* basic block containing the work portion of the instruction should
* only be entered via fallthrough from the block containing the
@@ -731,33 +731,33 @@
* not automatically terminated after the work portion, and may
* contain following instructions.
*/
- BasicBlock *newBlock = NewMemBB(cUnit, kDalvikByteCode, cUnit->numBlocks++);
- InsertGrowableList(cUnit, &cUnit->blockList, reinterpret_cast<uintptr_t>(newBlock));
- newBlock->startOffset = insn->offset;
- curBlock->fallThrough = newBlock;
- InsertGrowableList(cUnit, newBlock->predecessors, reinterpret_cast<uintptr_t>(curBlock));
- MIR* newInsn = static_cast<MIR*>(NewMem(cUnit, sizeof(MIR), true, kAllocMIR));
- *newInsn = *insn;
+ BasicBlock *new_block = NewMemBB(cu, kDalvikByteCode, cu->num_blocks++);
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(new_block));
+ new_block->start_offset = insn->offset;
+ cur_block->fall_through = new_block;
+ InsertGrowableList(cu, new_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+ MIR* new_insn = static_cast<MIR*>(NewMem(cu, sizeof(MIR), true, kAllocMIR));
+ *new_insn = *insn;
insn->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpCheck);
// Associate the two halves
- insn->meta.throwInsn = newInsn;
- newInsn->meta.throwInsn = insn;
- AppendMIR(newBlock, newInsn);
- return newBlock;
+ insn->meta.throw_insn = new_insn;
+ new_insn->meta.throw_insn = insn;
+ AppendMIR(new_block, new_insn);
+ return new_block;
}
-void CompilerInit(CompilationUnit* cUnit, const Compiler& compiler) {
+void CompilerInit(CompilationUnit* cu, const Compiler& compiler) {
if (!ArchInit()) {
LOG(FATAL) << "Failed to initialize oat";
}
- if (!HeapInit(cUnit)) {
+ if (!HeapInit(cu)) {
LOG(FATAL) << "Failed to initialize oat heap";
}
}
static CompiledMethod* CompileMethod(Compiler& compiler,
- const CompilerBackend compilerBackend,
+ const CompilerBackend compiler_backend,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
uint32_t method_idx, jobject class_loader,
@@ -766,59 +766,59 @@
{
VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
- const uint16_t* codePtr = code_item->insns_;
- const uint16_t* codeEnd = code_item->insns_ + code_item->insns_size_in_code_units_;
- int numBlocks = 0;
- unsigned int curOffset = 0;
+ const uint16_t* code_ptr = code_item->insns_;
+ const uint16_t* code_end = code_item->insns_ + code_item->insns_size_in_code_units_;
+ int num_blocks = 0;
+ unsigned int cur_offset = 0;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- UniquePtr<CompilationUnit> cUnit(new CompilationUnit);
+ UniquePtr<CompilationUnit> cu(new CompilationUnit);
- CompilerInit(cUnit.get(), compiler);
+ CompilerInit(cu.get(), compiler);
- cUnit->compiler = &compiler;
- cUnit->class_linker = class_linker;
- cUnit->dex_file = &dex_file;
- cUnit->method_idx = method_idx;
- cUnit->code_item = code_item;
- cUnit->access_flags = access_flags;
- cUnit->invoke_type = invoke_type;
- cUnit->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
- cUnit->instructionSet = compiler.GetInstructionSet();
- cUnit->insns = code_item->insns_;
- cUnit->insnsSize = code_item->insns_size_in_code_units_;
- cUnit->numIns = code_item->ins_size_;
- cUnit->numRegs = code_item->registers_size_ - cUnit->numIns;
- cUnit->numOuts = code_item->outs_size_;
- DCHECK((cUnit->instructionSet == kThumb2) ||
- (cUnit->instructionSet == kX86) ||
- (cUnit->instructionSet == kMips));
- if ((compilerBackend == kQuickGBC) || (compilerBackend == kPortable)) {
- cUnit->genBitcode = true;
+ cu->compiler = &compiler;
+ cu->class_linker = class_linker;
+ cu->dex_file = &dex_file;
+ cu->method_idx = method_idx;
+ cu->code_item = code_item;
+ cu->access_flags = access_flags;
+ cu->invoke_type = invoke_type;
+ cu->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+ cu->instruction_set = compiler.GetInstructionSet();
+ cu->insns = code_item->insns_;
+ cu->insns_size = code_item->insns_size_in_code_units_;
+ cu->num_ins = code_item->ins_size_;
+ cu->num_regs = code_item->registers_size_ - cu->num_ins;
+ cu->num_outs = code_item->outs_size_;
+ DCHECK((cu->instruction_set == kThumb2) ||
+ (cu->instruction_set == kX86) ||
+ (cu->instruction_set == kMips));
+ if ((compiler_backend == kQuickGBC) || (compiler_backend == kPortable)) {
+ cu->gen_bitcode = true;
}
- DCHECK_NE(compilerBackend, kIceland); // TODO: remove when Portable/Iceland merge complete
+ DCHECK_NE(compiler_backend, kIceland); // TODO: remove when Portable/Iceland merge complete
// TODO: remove this once x86 is tested
- if (cUnit->genBitcode && (cUnit->instructionSet != kThumb2)) {
+ if (cu->gen_bitcode && (cu->instruction_set != kThumb2)) {
UNIMPLEMENTED(WARNING) << "GBC generation untested for non-Thumb targets";
}
- cUnit->llvm_info = llvm_info;
+ cu->llvm_info = llvm_info;
/* Adjust this value accordingly once inlining is performed */
- cUnit->numDalvikRegisters = code_item->registers_size_;
+ cu->num_dalvik_registers = code_item->registers_size_;
// TODO: set this from command line
- cUnit->compilerFlipMatch = false;
- bool useMatch = !cUnit->compilerMethodMatch.empty();
- bool match = useMatch && (cUnit->compilerFlipMatch ^
- (PrettyMethod(method_idx, dex_file).find(cUnit->compilerMethodMatch) !=
+ cu->compiler_flip_match = false;
+ bool use_match = !cu->compiler_method_match.empty();
+ bool match = use_match && (cu->compiler_flip_match ^
+ (PrettyMethod(method_idx, dex_file).find(cu->compiler_method_match) !=
std::string::npos));
- if (!useMatch || match) {
- cUnit->disableOpt = kCompilerOptimizerDisableFlags;
- cUnit->enableDebug = kCompilerDebugFlags;
- cUnit->printMe = VLOG_IS_ON(compiler) ||
- (cUnit->enableDebug & (1 << kDebugVerbose));
+ if (!use_match || match) {
+ cu->disable_opt = kCompilerOptimizerDisableFlags;
+ cu->enable_debug = kCompilerDebugFlags;
+ cu->verbose = VLOG_IS_ON(compiler) ||
+ (cu->enable_debug & (1 << kDebugVerbose));
}
#ifndef NDEBUG
- if (cUnit->genBitcode) {
- cUnit->enableDebug |= (1 << kDebugVerifyBitcode);
+ if (cu->gen_bitcode) {
+ cu->enable_debug |= (1 << kDebugVerifyBitcode);
}
#endif
@@ -828,7 +828,7 @@
// to see if monkey results change. Should be removed after monkey runs
// complete.
if (PrettyMethod(method_idx, dex_file).find("void com.android.inputmethod.keyboard.Key.<init>(android.content.res.Resources, com.android.inputmethod.keyboard.Keyboard$Params, com.android.inputmethod.keyboard.Keyboard$Builder$Row, org.xmlpull.v1.XmlPullParser)") != std::string::npos) {
- cUnit->disableOpt |= (
+ cu->disable_opt |= (
(1 << kLoadStoreElimination) |
(1 << kLoadHoisting) |
(1 << kSuppressLoads) |
@@ -844,9 +844,9 @@
}
#endif
- if (cUnit->instructionSet == kMips) {
+ if (cu->instruction_set == kMips) {
// Disable some optimizations for mips for now
- cUnit->disableOpt |= (
+ cu->disable_opt |= (
(1 << kLoadStoreElimination) |
(1 << kLoadHoisting) |
(1 << kSuppressLoads) |
@@ -862,90 +862,90 @@
/* Gathering opcode stats? */
if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
- cUnit->opcodeCount =
- static_cast<int*>(NewMem(cUnit.get(), kNumPackedOpcodes * sizeof(int), true, kAllocMisc));
+ cu->opcode_count =
+ static_cast<int*>(NewMem(cu.get(), kNumPackedOpcodes * sizeof(int), true, kAllocMisc));
}
/* Assume non-throwing leaf */
- cUnit->attrs = (METHOD_IS_LEAF | METHOD_IS_THROW_FREE);
+ cu->attrs = (METHOD_IS_LEAF | METHOD_IS_THROW_FREE);
- /* Initialize the block list, estimate size based on insnsSize */
- CompilerInitGrowableList(cUnit.get(), &cUnit->blockList, cUnit->insnsSize,
+ /* Initialize the block list, estimate size based on insns_size */
+ CompilerInitGrowableList(cu.get(), &cu->block_list, cu->insns_size,
kListBlockList);
- /* Initialize the switchTables list */
- CompilerInitGrowableList(cUnit.get(), &cUnit->switchTables, 4,
+ /* Initialize the switch_tables list */
+ CompilerInitGrowableList(cu.get(), &cu->switch_tables, 4,
kListSwitchTables);
- /* Intialize the fillArrayData list */
- CompilerInitGrowableList(cUnit.get(), &cUnit->fillArrayData, 4,
+ /* Intialize the fill_array_data list */
+ CompilerInitGrowableList(cu.get(), &cu->fill_array_data, 4,
kListFillArrayData);
- /* Intialize the throwLaunchpads list, estimate size based on insnsSize */
- CompilerInitGrowableList(cUnit.get(), &cUnit->throwLaunchpads, cUnit->insnsSize,
+ /* Intialize the throw_launchpads list, estimate size based on insns_size */
+ CompilerInitGrowableList(cu.get(), &cu->throw_launchpads, cu->insns_size,
kListThrowLaunchPads);
- /* Intialize the instrinsicLaunchpads list */
- CompilerInitGrowableList(cUnit.get(), &cUnit->intrinsicLaunchpads, 4,
+ /* Intialize the instrinsic_launchpads list */
+ CompilerInitGrowableList(cu.get(), &cu->intrinsic_launchpads, 4,
kListMisc);
- /* Intialize the suspendLaunchpads list */
- CompilerInitGrowableList(cUnit.get(), &cUnit->suspendLaunchpads, 2048,
+ /* Intialize the suspend_launchpads list */
+ CompilerInitGrowableList(cu.get(), &cu->suspend_launchpads, 2048,
kListSuspendLaunchPads);
/* Allocate the bit-vector to track the beginning of basic blocks */
- ArenaBitVector *tryBlockAddr = AllocBitVector(cUnit.get(),
- cUnit->insnsSize,
+ ArenaBitVector *try_block_addr = AllocBitVector(cu.get(),
+ cu->insns_size,
true /* expandable */);
- cUnit->tryBlockAddr = tryBlockAddr;
+ cu->try_block_addr = try_block_addr;
/* Create the default entry and exit blocks and enter them to the list */
- BasicBlock *entryBlock = NewMemBB(cUnit.get(), kEntryBlock, numBlocks++);
- BasicBlock *exitBlock = NewMemBB(cUnit.get(), kExitBlock, numBlocks++);
+ BasicBlock *entry_block = NewMemBB(cu.get(), kEntryBlock, num_blocks++);
+ BasicBlock *exit_block = NewMemBB(cu.get(), kExitBlock, num_blocks++);
- cUnit->entryBlock = entryBlock;
- cUnit->exitBlock = exitBlock;
+ cu->entry_block = entry_block;
+ cu->exit_block = exit_block;
- InsertGrowableList(cUnit.get(), &cUnit->blockList, reinterpret_cast<uintptr_t>(entryBlock));
- InsertGrowableList(cUnit.get(), &cUnit->blockList, reinterpret_cast<uintptr_t>(exitBlock));
+ InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(entry_block));
+ InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(exit_block));
/* Current block to record parsed instructions */
- BasicBlock *curBlock = NewMemBB(cUnit.get(), kDalvikByteCode, numBlocks++);
- curBlock->startOffset = 0;
- InsertGrowableList(cUnit.get(), &cUnit->blockList, reinterpret_cast<uintptr_t>(curBlock));
+ BasicBlock *cur_block = NewMemBB(cu.get(), kDalvikByteCode, num_blocks++);
+ cur_block->start_offset = 0;
+ InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(cur_block));
/* Add first block to the fast lookup cache */
- cUnit->blockMap.Put(curBlock->startOffset, curBlock);
- entryBlock->fallThrough = curBlock;
- InsertGrowableList(cUnit.get(), curBlock->predecessors,
- reinterpret_cast<uintptr_t>(entryBlock));
+ cu->block_map.Put(cur_block->start_offset, cur_block);
+ entry_block->fall_through = cur_block;
+ InsertGrowableList(cu.get(), cur_block->predecessors,
+ reinterpret_cast<uintptr_t>(entry_block));
/*
* Store back the number of blocks since new blocks may be created of
- * accessing cUnit.
+ * accessing cu.
*/
- cUnit->numBlocks = numBlocks;
+ cu->num_blocks = num_blocks;
/* Identify code range in try blocks and set up the empty catch blocks */
- ProcessTryCatchBlocks(cUnit.get());
+ ProcessTryCatchBlocks(cu.get());
/* Set up for simple method detection */
- int numPatterns = sizeof(specialPatterns)/sizeof(specialPatterns[0]);
- bool livePattern = (numPatterns > 0) && !(cUnit->disableOpt & (1 << kMatch));
- bool* deadPattern =
- static_cast<bool*>(NewMem(cUnit.get(), sizeof(bool) * numPatterns, true, kAllocMisc));
- SpecialCaseHandler specialCase = kNoHandler;
- int patternPos = 0;
+ int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
+ bool live_pattern = (num_patterns > 0) && !(cu->disable_opt & (1 << kMatch));
+ bool* dead_pattern =
+ static_cast<bool*>(NewMem(cu.get(), sizeof(bool) * num_patterns, true, kAllocMisc));
+ SpecialCaseHandler special_case = kNoHandler;
+ int pattern_pos = 0;
/* Parse all instructions and put them into containing basic blocks */
- while (codePtr < codeEnd) {
- MIR *insn = static_cast<MIR *>(NewMem(cUnit.get(), sizeof(MIR), true, kAllocMIR));
- insn->offset = curOffset;
- int width = ParseInsn(cUnit.get(), codePtr, &insn->dalvikInsn, false);
+ while (code_ptr < code_end) {
+ MIR *insn = static_cast<MIR *>(NewMem(cu.get(), sizeof(MIR), true, kAllocMIR));
+ insn->offset = cur_offset;
+ int width = ParseInsn(cu.get(), code_ptr, &insn->dalvikInsn, false);
insn->width = width;
Instruction::Code opcode = insn->dalvikInsn.opcode;
- if (cUnit->opcodeCount != NULL) {
- cUnit->opcodeCount[static_cast<int>(opcode)]++;
+ if (cu->opcode_count != NULL) {
+ cu->opcode_count[static_cast<int>(opcode)]++;
}
/* Terminate when the data section is seen */
@@ -953,257 +953,257 @@
break;
/* Possible simple method? */
- if (livePattern) {
- livePattern = false;
- specialCase = kNoHandler;
- for (int i = 0; i < numPatterns; i++) {
- if (!deadPattern[i]) {
- if (specialPatterns[i].opcodes[patternPos] == opcode) {
- livePattern = true;
- specialCase = specialPatterns[i].handlerCode;
+ if (live_pattern) {
+ live_pattern = false;
+ special_case = kNoHandler;
+ for (int i = 0; i < num_patterns; i++) {
+ if (!dead_pattern[i]) {
+ if (special_patterns[i].opcodes[pattern_pos] == opcode) {
+ live_pattern = true;
+ special_case = special_patterns[i].handler_code;
} else {
- deadPattern[i] = true;
+ dead_pattern[i] = true;
}
}
}
- patternPos++;
+ pattern_pos++;
}
- AppendMIR(curBlock, insn);
+ AppendMIR(cur_block, insn);
- codePtr += width;
+ code_ptr += width;
int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
- int dfFlags = oatDataFlowAttributes[insn->dalvikInsn.opcode];
+ int df_flags = oat_data_flow_attributes[insn->dalvikInsn.opcode];
- if (dfFlags & DF_HAS_DEFS) {
- cUnit->defCount += (dfFlags & DF_A_WIDE) ? 2 : 1;
+ if (df_flags & DF_HAS_DEFS) {
+ cu->def_count += (df_flags & DF_A_WIDE) ? 2 : 1;
}
if (flags & Instruction::kBranch) {
- curBlock = ProcessCanBranch(cUnit.get(), curBlock, insn, curOffset,
- width, flags, codePtr, codeEnd);
+ cur_block = ProcessCanBranch(cu.get(), cur_block, insn, cur_offset,
+ width, flags, code_ptr, code_end);
} else if (flags & Instruction::kReturn) {
- curBlock->fallThrough = exitBlock;
- InsertGrowableList(cUnit.get(), exitBlock->predecessors,
- reinterpret_cast<uintptr_t>(curBlock));
+ cur_block->fall_through = exit_block;
+ InsertGrowableList(cu.get(), exit_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
/*
* Terminate the current block if there are instructions
* afterwards.
*/
- if (codePtr < codeEnd) {
+ if (code_ptr < code_end) {
/*
* Create a fallthrough block for real instructions
* (incl. NOP).
*/
- if (ContentIsInsn(codePtr)) {
- FindBlock(cUnit.get(), curOffset + width,
+ if (ContentIsInsn(code_ptr)) {
+ FindBlock(cu.get(), cur_offset + width,
/* split */
false,
/* create */
true,
- /* immedPredBlockP */
+ /* immed_pred_block_p */
NULL);
}
}
} else if (flags & Instruction::kThrow) {
- curBlock = ProcessCanThrow(cUnit.get(), curBlock, insn, curOffset,
- width, flags, tryBlockAddr, codePtr, codeEnd);
+ cur_block = ProcessCanThrow(cu.get(), cur_block, insn, cur_offset,
+ width, flags, try_block_addr, code_ptr, code_end);
} else if (flags & Instruction::kSwitch) {
- ProcessCanSwitch(cUnit.get(), curBlock, insn, curOffset, width, flags);
+ ProcessCanSwitch(cu.get(), cur_block, insn, cur_offset, width, flags);
}
- curOffset += width;
- BasicBlock *nextBlock = FindBlock(cUnit.get(), curOffset,
+ cur_offset += width;
+ BasicBlock *next_block = FindBlock(cu.get(), cur_offset,
/* split */
false,
/* create */
false,
- /* immedPredBlockP */
+ /* immed_pred_block_p */
NULL);
- if (nextBlock) {
+ if (next_block) {
/*
* The next instruction could be the target of a previously parsed
* forward branch so a block is already created. If the current
* instruction is not an unconditional branch, connect them through
* the fall-through link.
*/
- DCHECK(curBlock->fallThrough == NULL ||
- curBlock->fallThrough == nextBlock ||
- curBlock->fallThrough == exitBlock);
+ DCHECK(cur_block->fall_through == NULL ||
+ cur_block->fall_through == next_block ||
+ cur_block->fall_through == exit_block);
- if ((curBlock->fallThrough == NULL) && (flags & Instruction::kContinue)) {
- curBlock->fallThrough = nextBlock;
- InsertGrowableList(cUnit.get(), nextBlock->predecessors,
- reinterpret_cast<uintptr_t>(curBlock));
+ if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
+ cur_block->fall_through = next_block;
+ InsertGrowableList(cu.get(), next_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
}
- curBlock = nextBlock;
+ cur_block = next_block;
}
}
- if (!(cUnit->disableOpt & (1 << kSkipLargeMethodOptimization))) {
- if ((cUnit->numBlocks > MANY_BLOCKS) ||
- ((cUnit->numBlocks > MANY_BLOCKS_INITIALIZER) &&
+ if (!(cu->disable_opt & (1 << kSkipLargeMethodOptimization))) {
+ if ((cu->num_blocks > MANY_BLOCKS) ||
+ ((cu->num_blocks > MANY_BLOCKS_INITIALIZER) &&
PrettyMethod(method_idx, dex_file, false).find("init>") !=
std::string::npos)) {
- cUnit->qdMode = true;
+ cu->qd_mode = true;
}
}
- if (cUnit->qdMode) {
+ if (cu->qd_mode) {
// Bitcode generation requires full dataflow analysis
- cUnit->disableDataflow = !cUnit->genBitcode;
+ cu->disable_dataflow = !cu->gen_bitcode;
// Disable optimization which require dataflow/ssa
- cUnit->disableOpt |= (1 << kBBOpt) | (1 << kPromoteRegs) | (1 << kNullCheckElimination);
- if (cUnit->printMe) {
+ cu->disable_opt |= (1 << kBBOpt) | (1 << kPromoteRegs) | (1 << kNullCheckElimination);
+ if (cu->verbose) {
LOG(INFO) << "QD mode enabled: "
<< PrettyMethod(method_idx, dex_file)
- << " num blocks: " << cUnit->numBlocks;
+ << " num blocks: " << cu->num_blocks;
}
}
- if (cUnit->printMe) {
- DumpCompilationUnit(cUnit.get());
+ if (cu->verbose) {
+ DumpCompilationUnit(cu.get());
}
/* Do a code layout pass */
- CodeLayout(cUnit.get());
+ CodeLayout(cu.get());
- if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
+ if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
/* Verify if all blocks are connected as claimed */
- DataFlowAnalysisDispatcher(cUnit.get(), VerifyPredInfo, kAllNodes,
- false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu.get(), VerifyPredInfo, kAllNodes,
+ false /* is_iterative */);
}
/* Perform SSA transformation for the whole method */
- SSATransformation(cUnit.get());
+ SSATransformation(cu.get());
/* Do constant propagation */
// TODO: Probably need to make these expandable to support new ssa names
// introducted during MIR optimization passes
- cUnit->isConstantV = AllocBitVector(cUnit.get(), cUnit->numSSARegs,
+ cu->is_constant_v = AllocBitVector(cu.get(), cu->num_ssa_regs,
false /* not expandable */);
- cUnit->constantValues =
- static_cast<int*>(NewMem(cUnit.get(), sizeof(int) * cUnit->numSSARegs, true, kAllocDFInfo));
- DataFlowAnalysisDispatcher(cUnit.get(), DoConstantPropogation,
+ cu->constant_values =
+ static_cast<int*>(NewMem(cu.get(), sizeof(int) * cu->num_ssa_regs, true, kAllocDFInfo));
+ DataFlowAnalysisDispatcher(cu.get(), DoConstantPropogation,
kAllNodes,
- false /* isIterative */);
+ false /* is_iterative */);
/* Detect loops */
- LoopDetection(cUnit.get());
+ LoopDetection(cu.get());
/* Count uses */
- MethodUseCount(cUnit.get());
+ MethodUseCount(cu.get());
/* Perform null check elimination */
- NullCheckElimination(cUnit.get());
+ NullCheckElimination(cu.get());
/* Combine basic blocks where possible */
- BasicBlockCombine(cUnit.get());
+ BasicBlockCombine(cu.get());
/* Do some basic block optimizations */
- BasicBlockOptimization(cUnit.get());
+ BasicBlockOptimization(cu.get());
- if (cUnit->enableDebug & (1 << kDebugDumpCheckStats)) {
- DumpCheckStats(cUnit.get());
+ if (cu->enable_debug & (1 << kDebugDumpCheckStats)) {
+ DumpCheckStats(cu.get());
}
- CompilerInitializeRegAlloc(cUnit.get()); // Needs to happen after SSA naming
+ CompilerInitializeRegAlloc(cu.get()); // Needs to happen after SSA naming
/* Allocate Registers using simple local allocation scheme */
- SimpleRegAlloc(cUnit.get());
+ SimpleRegAlloc(cu.get());
/* Go the LLVM path? */
- if (cUnit->genBitcode) {
+ if (cu->gen_bitcode) {
// MIR->Bitcode
- MethodMIR2Bitcode(cUnit.get());
- if (compilerBackend == kPortable) {
+ MethodMIR2Bitcode(cu.get());
+ if (compiler_backend == kPortable) {
// all done
- ArenaReset(cUnit.get());
+ ArenaReset(cu.get());
return NULL;
}
// Bitcode->LIR
- MethodBitcode2LIR(cUnit.get());
+ MethodBitcode2LIR(cu.get());
} else {
- if (specialCase != kNoHandler) {
+ if (special_case != kNoHandler) {
/*
* Custom codegen for special cases. If for any reason the
- * special codegen doesn't succeed, cUnit->firstLIRInsn will
+ * special codegen doesn't succeed, cu->first_lir_insn will
* set to NULL;
*/
- SpecialMIR2LIR(cUnit.get(), specialCase);
+ SpecialMIR2LIR(cu.get(), special_case);
}
/* Convert MIR to LIR, etc. */
- if (cUnit->firstLIRInsn == NULL) {
- MethodMIR2LIR(cUnit.get());
+ if (cu->first_lir_insn == NULL) {
+ MethodMIR2LIR(cu.get());
}
}
// Debugging only
- if (cUnit->enableDebug & (1 << kDebugDumpCFG)) {
- DumpCFG(cUnit.get(), "/sdcard/cfg/");
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/cfg/");
}
/* Method is not empty */
- if (cUnit->firstLIRInsn) {
+ if (cu->first_lir_insn) {
// mark the targets of switch statement case labels
- ProcessSwitchTables(cUnit.get());
+ ProcessSwitchTables(cu.get());
/* Convert LIR into machine code. */
- AssembleLIR(cUnit.get());
+ AssembleLIR(cu.get());
- if (cUnit->printMe) {
- CodegenDump(cUnit.get());
+ if (cu->verbose) {
+ CodegenDump(cu.get());
}
- if (cUnit->opcodeCount != NULL) {
+ if (cu->opcode_count != NULL) {
LOG(INFO) << "Opcode Count";
for (int i = 0; i < kNumPackedOpcodes; i++) {
- if (cUnit->opcodeCount[i] != 0) {
+ if (cu->opcode_count[i] != 0) {
LOG(INFO) << "-C- "
<< Instruction::Name(static_cast<Instruction::Code>(i))
- << " " << cUnit->opcodeCount[i];
+ << " " << cu->opcode_count[i];
}
}
}
}
- // Combine vmap tables - core regs, then fp regs - into vmapTable
- std::vector<uint16_t> vmapTable;
+ // Combine vmap tables - core regs, then fp regs - into vmap_table
+ std::vector<uint16_t> vmap_table;
// Core regs may have been inserted out of order - sort first
- std::sort(cUnit->coreVmapTable.begin(), cUnit->coreVmapTable.end());
- for (size_t i = 0 ; i < cUnit->coreVmapTable.size(); i++) {
+ std::sort(cu->core_vmap_table.begin(), cu->core_vmap_table.end());
+ for (size_t i = 0 ; i < cu->core_vmap_table.size(); i++) {
// Copy, stripping out the phys register sort key
- vmapTable.push_back(~(-1 << VREG_NUM_WIDTH) & cUnit->coreVmapTable[i]);
+ vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & cu->core_vmap_table[i]);
}
// If we have a frame, push a marker to take place of lr
- if (cUnit->frameSize > 0) {
- vmapTable.push_back(INVALID_VREG);
+ if (cu->frame_size > 0) {
+ vmap_table.push_back(INVALID_VREG);
} else {
- DCHECK_EQ(__builtin_popcount(cUnit->coreSpillMask), 0);
- DCHECK_EQ(__builtin_popcount(cUnit->fpSpillMask), 0);
+ DCHECK_EQ(__builtin_popcount(cu->core_spill_mask), 0);
+ DCHECK_EQ(__builtin_popcount(cu->fp_spill_mask), 0);
}
// Combine vmap tables - core regs, then fp regs. fp regs already sorted
- for (uint32_t i = 0; i < cUnit->fpVmapTable.size(); i++) {
- vmapTable.push_back(cUnit->fpVmapTable[i]);
+ for (uint32_t i = 0; i < cu->fp_vmap_table.size(); i++) {
+ vmap_table.push_back(cu->fp_vmap_table[i]);
}
CompiledMethod* result =
- new CompiledMethod(cUnit->instructionSet, cUnit->codeBuffer,
- cUnit->frameSize, cUnit->coreSpillMask, cUnit->fpSpillMask,
- cUnit->combinedMappingTable, vmapTable, cUnit->nativeGcMap);
+ new CompiledMethod(cu->instruction_set, cu->code_buffer,
+ cu->frame_size, cu->core_spill_mask, cu->fp_spill_mask,
+ cu->combined_mapping_table, vmap_table, cu->native_gc_map);
VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file)
- << " (" << (cUnit->codeBuffer.size() * sizeof(cUnit->codeBuffer[0]))
+ << " (" << (cu->code_buffer.size() * sizeof(cu->code_buffer[0]))
<< " bytes)";
#ifdef WITH_MEMSTATS
- if (cUnit->enableDebug & (1 << kDebugShowMemoryUsage)) {
- DumpMemStats(cUnit.get());
+ if (cu->enable_debug & (1 << kDebugShowMemoryUsage)) {
+ DumpMemStats(cu.get());
}
#endif
- ArenaReset(cUnit.get());
+ ArenaReset(cu.get());
return result;
}
@@ -1214,10 +1214,10 @@
uint32_t access_flags, InvokeType invoke_type,
uint32_t method_idx, jobject class_loader,
const DexFile& dex_file,
- LLVMInfo* llvmInfo)
+ LLVMInfo* llvm_info)
{
return CompileMethod(compiler, backend, code_item, access_flags, invoke_type, method_idx, class_loader,
- dex_file, llvmInfo);
+ dex_file, llvm_info);
}
} // namespace art
@@ -1232,5 +1232,5 @@
// TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default
art::CompilerBackend backend = compiler.GetCompilerBackend();
return art::CompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
- method_idx, class_loader, dex_file, NULL /* use thread llvmInfo */);
+ method_idx, class_loader, dex_file, NULL /* use thread llvm_info */);
}
diff --git a/src/compiler/frontend.h b/src/compiler/frontend.h
index 1898b9b..f7e76f8 100644
--- a/src/compiler/frontend.h
+++ b/src/compiler/frontend.h
@@ -50,7 +50,7 @@
#define MAX_ASSEMBLER_RETRIES 50
/* Suppress optimization if corresponding bit set */
-enum optControlVector {
+enum opt_control_vector {
kLoadStoreElimination = 0,
kLoadHoisting,
kSuppressLoads,
@@ -136,7 +136,7 @@
struct CompilationUnit;
struct BasicBlock;
-BasicBlock* FindBlock(CompilationUnit* cUnit, unsigned int codeOffset);
+BasicBlock* FindBlock(CompilationUnit* cu, unsigned int code_offset);
void ReplaceSpecialChars(std::string& str);
} // namespace art
diff --git a/src/compiler/intermediate_rep.cc b/src/compiler/intermediate_rep.cc
index dd2740d..c06693e 100644
--- a/src/compiler/intermediate_rep.cc
+++ b/src/compiler/intermediate_rep.cc
@@ -19,63 +19,63 @@
namespace art {
/* Allocate a new basic block */
-BasicBlock* NewMemBB(CompilationUnit* cUnit, BBType blockType, int blockId)
+BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id)
{
- BasicBlock* bb = static_cast<BasicBlock*>(NewMem(cUnit, sizeof(BasicBlock), true, kAllocBB));
- bb->blockType = blockType;
- bb->id = blockId;
+ BasicBlock* bb = static_cast<BasicBlock*>(NewMem(cu, sizeof(BasicBlock), true, kAllocBB));
+ bb->block_type = block_type;
+ bb->id = block_id;
bb->predecessors = static_cast<GrowableList*>
- (NewMem(cUnit, sizeof(GrowableList), false, kAllocPredecessors));
- CompilerInitGrowableList(cUnit, bb->predecessors,
- (blockType == kExitBlock) ? 2048 : 2,
+ (NewMem(cu, sizeof(GrowableList), false, kAllocPredecessors));
+ CompilerInitGrowableList(cu, bb->predecessors,
+ (block_type == kExitBlock) ? 2048 : 2,
kListPredecessors);
- cUnit->blockIdMap.Put(blockId, blockId);
+ cu->block_id_map.Put(block_id, block_id);
return bb;
}
/* Insert an MIR instruction to the end of a basic block */
void AppendMIR(BasicBlock* bb, MIR* mir)
{
- if (bb->firstMIRInsn == NULL) {
- DCHECK(bb->lastMIRInsn == NULL);
- bb->lastMIRInsn = bb->firstMIRInsn = mir;
+ if (bb->first_mir_insn == NULL) {
+ DCHECK(bb->last_mir_insn == NULL);
+ bb->last_mir_insn = bb->first_mir_insn = mir;
mir->prev = mir->next = NULL;
} else {
- bb->lastMIRInsn->next = mir;
- mir->prev = bb->lastMIRInsn;
+ bb->last_mir_insn->next = mir;
+ mir->prev = bb->last_mir_insn;
mir->next = NULL;
- bb->lastMIRInsn = mir;
+ bb->last_mir_insn = mir;
}
}
/* Insert an MIR instruction to the head of a basic block */
void PrependMIR(BasicBlock* bb, MIR* mir)
{
- if (bb->firstMIRInsn == NULL) {
- DCHECK(bb->lastMIRInsn == NULL);
- bb->lastMIRInsn = bb->firstMIRInsn = mir;
+ if (bb->first_mir_insn == NULL) {
+ DCHECK(bb->last_mir_insn == NULL);
+ bb->last_mir_insn = bb->first_mir_insn = mir;
mir->prev = mir->next = NULL;
} else {
- bb->firstMIRInsn->prev = mir;
- mir->next = bb->firstMIRInsn;
+ bb->first_mir_insn->prev = mir;
+ mir->next = bb->first_mir_insn;
mir->prev = NULL;
- bb->firstMIRInsn = mir;
+ bb->first_mir_insn = mir;
}
}
/* Insert a MIR instruction after the specified MIR */
-void InsertMIRAfter(BasicBlock* bb, MIR* currentMIR, MIR* newMIR)
+void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir)
{
- newMIR->prev = currentMIR;
- newMIR->next = currentMIR->next;
- currentMIR->next = newMIR;
+ new_mir->prev = current_mir;
+ new_mir->next = current_mir->next;
+ current_mir->next = new_mir;
- if (newMIR->next) {
+ if (new_mir->next) {
/* Is not the last MIR in the block */
- newMIR->next->prev = newMIR;
+ new_mir->next->prev = new_mir;
} else {
/* Is the last MIR in the block */
- bb->lastMIRInsn = newMIR;
+ bb->last_mir_insn = new_mir;
}
}
@@ -83,17 +83,17 @@
* Append an LIR instruction to the LIR list maintained by a compilation
* unit
*/
-void AppendLIR(CompilationUnit *cUnit, LIR* lir)
+void AppendLIR(CompilationUnit *cu, LIR* lir)
{
- if (cUnit->firstLIRInsn == NULL) {
- DCHECK(cUnit->lastLIRInsn == NULL);
- cUnit->lastLIRInsn = cUnit->firstLIRInsn = lir;
+ if (cu->first_lir_insn == NULL) {
+ DCHECK(cu->last_lir_insn == NULL);
+ cu->last_lir_insn = cu->first_lir_insn = lir;
lir->prev = lir->next = NULL;
} else {
- cUnit->lastLIRInsn->next = lir;
- lir->prev = cUnit->lastLIRInsn;
+ cu->last_lir_insn->next = lir;
+ lir->prev = cu->last_lir_insn;
lir->next = NULL;
- cUnit->lastLIRInsn = lir;
+ cu->last_lir_insn = lir;
}
}
@@ -101,31 +101,31 @@
* Insert an LIR instruction before the current instruction, which cannot be the
* first instruction.
*
- * prevLIR <-> newLIR <-> currentLIR
+ * prev_lir <-> new_lir <-> current_lir
*/
-void InsertLIRBefore(LIR* currentLIR, LIR* newLIR)
+void InsertLIRBefore(LIR* current_lir, LIR* new_lir)
{
- DCHECK(currentLIR->prev != NULL);
- LIR *prevLIR = currentLIR->prev;
+ DCHECK(current_lir->prev != NULL);
+ LIR *prev_lir = current_lir->prev;
- prevLIR->next = newLIR;
- newLIR->prev = prevLIR;
- newLIR->next = currentLIR;
- currentLIR->prev = newLIR;
+ prev_lir->next = new_lir;
+ new_lir->prev = prev_lir;
+ new_lir->next = current_lir;
+ current_lir->prev = new_lir;
}
/*
* Insert an LIR instruction after the current instruction, which cannot be the
* first instruction.
*
- * currentLIR -> newLIR -> oldNext
+ * current_lir -> new_lir -> old_next
*/
-void InsertLIRAfter(LIR* currentLIR, LIR* newLIR)
+void InsertLIRAfter(LIR* current_lir, LIR* new_lir)
{
- newLIR->prev = currentLIR;
- newLIR->next = currentLIR->next;
- currentLIR->next = newLIR;
- newLIR->next->prev = newLIR;
+ new_lir->prev = current_lir;
+ new_lir->next = current_lir->next;
+ current_lir->next = new_lir;
+ new_lir->next->prev = new_lir;
}
} // namespace art
diff --git a/src/compiler/ralloc.cc b/src/compiler/ralloc.cc
index 4eab177..4f4d489 100644
--- a/src/compiler/ralloc.cc
+++ b/src/compiler/ralloc.cc
@@ -20,68 +20,68 @@
namespace art {
-static bool SetFp(CompilationUnit* cUnit, int index, bool isFP) {
+static bool SetFp(CompilationUnit* cu, int index, bool is_fp) {
bool change = false;
- if (isFP && !cUnit->regLocation[index].fp) {
- cUnit->regLocation[index].fp = true;
- cUnit->regLocation[index].defined = true;
+ if (is_fp && !cu->reg_location[index].fp) {
+ cu->reg_location[index].fp = true;
+ cu->reg_location[index].defined = true;
change = true;
}
return change;
}
-static bool SetCore(CompilationUnit* cUnit, int index, bool isCore) {
+static bool SetCore(CompilationUnit* cu, int index, bool is_core) {
bool change = false;
- if (isCore && !cUnit->regLocation[index].defined) {
- cUnit->regLocation[index].core = true;
- cUnit->regLocation[index].defined = true;
+ if (is_core && !cu->reg_location[index].defined) {
+ cu->reg_location[index].core = true;
+ cu->reg_location[index].defined = true;
change = true;
}
return change;
}
-static bool SetRef(CompilationUnit* cUnit, int index, bool isRef) {
+static bool SetRef(CompilationUnit* cu, int index, bool is_ref) {
bool change = false;
- if (isRef && !cUnit->regLocation[index].defined) {
- cUnit->regLocation[index].ref = true;
- cUnit->regLocation[index].defined = true;
+ if (is_ref && !cu->reg_location[index].defined) {
+ cu->reg_location[index].ref = true;
+ cu->reg_location[index].defined = true;
change = true;
}
return change;
}
-static bool SetWide(CompilationUnit* cUnit, int index, bool isWide) {
+static bool SetWide(CompilationUnit* cu, int index, bool is_wide) {
bool change = false;
- if (isWide && !cUnit->regLocation[index].wide) {
- cUnit->regLocation[index].wide = true;
+ if (is_wide && !cu->reg_location[index].wide) {
+ cu->reg_location[index].wide = true;
change = true;
}
return change;
}
-static bool SetHigh(CompilationUnit* cUnit, int index, bool isHigh) {
+static bool SetHigh(CompilationUnit* cu, int index, bool is_high) {
bool change = false;
- if (isHigh && !cUnit->regLocation[index].highWord) {
- cUnit->regLocation[index].highWord = true;
+ if (is_high && !cu->reg_location[index].high_word) {
+ cu->reg_location[index].high_word = true;
change = true;
}
return change;
}
-static bool RemapNames(CompilationUnit* cUnit, BasicBlock* bb)
+static bool RemapNames(CompilationUnit* cu, BasicBlock* bb)
{
- if (bb->blockType != kDalvikByteCode && bb->blockType != kEntryBlock &&
- bb->blockType != kExitBlock)
+ if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock &&
+ bb->block_type != kExitBlock)
return false;
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
- SSARepresentation *ssaRep = mir->ssaRep;
- if (ssaRep) {
- for (int i = 0; i < ssaRep->numUses; i++) {
- ssaRep->uses[i] = cUnit->phiAliasMap[ssaRep->uses[i]];
+ for (MIR* mir = bb->first_mir_insn; mir; mir = mir->next) {
+ SSARepresentation *ssa_rep = mir->ssa_rep;
+ if (ssa_rep) {
+ for (int i = 0; i < ssa_rep->num_uses; i++) {
+ ssa_rep->uses[i] = cu->phi_alias_map[ssa_rep->uses[i]];
}
- for (int i = 0; i < ssaRep->numDefs; i++) {
- ssaRep->defs[i] = cUnit->phiAliasMap[ssaRep->defs[i]];
+ for (int i = 0; i < ssa_rep->num_defs; i++) {
+ ssa_rep->defs[i] = cu->phi_alias_map[ssa_rep->defs[i]];
}
}
}
@@ -93,34 +93,34 @@
* as it doesn't propagate. We're guaranteed at least one pass through
* the cfg.
*/
-static bool InferTypeAndSize(CompilationUnit* cUnit, BasicBlock* bb)
+static bool InferTypeAndSize(CompilationUnit* cu, BasicBlock* bb)
{
MIR *mir;
bool changed = false; // Did anything change?
- if (bb->dataFlowInfo == NULL) return false;
- if (bb->blockType != kDalvikByteCode && bb->blockType != kEntryBlock)
+ if (bb->data_flow_info == NULL) return false;
+ if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock)
return false;
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- SSARepresentation *ssaRep = mir->ssaRep;
- if (ssaRep) {
- int attrs = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+ SSARepresentation *ssa_rep = mir->ssa_rep;
+ if (ssa_rep) {
+ int attrs = oat_data_flow_attributes[mir->dalvikInsn.opcode];
// Handle defs
if (attrs & DF_DA) {
if (attrs & DF_CORE_A) {
- changed |= SetCore(cUnit, ssaRep->defs[0], true);
+ changed |= SetCore(cu, ssa_rep->defs[0], true);
}
if (attrs & DF_REF_A) {
- changed |= SetRef(cUnit, ssaRep->defs[0], true);
+ changed |= SetRef(cu, ssa_rep->defs[0], true);
}
if (attrs & DF_A_WIDE) {
- cUnit->regLocation[ssaRep->defs[0]].wide = true;
- cUnit->regLocation[ssaRep->defs[1]].wide = true;
- cUnit->regLocation[ssaRep->defs[1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->defs[0])+1,
- SRegToVReg(cUnit, ssaRep->defs[1]));
+ cu->reg_location[ssa_rep->defs[0]].wide = true;
+ cu->reg_location[ssa_rep->defs[1]].wide = true;
+ cu->reg_location[ssa_rep->defs[1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->defs[0])+1,
+ SRegToVReg(cu, ssa_rep->defs[1]));
}
}
@@ -128,17 +128,17 @@
int next = 0;
if (attrs & DF_UA) {
if (attrs & DF_CORE_A) {
- changed |= SetCore(cUnit, ssaRep->uses[next], true);
+ changed |= SetCore(cu, ssa_rep->uses[next], true);
}
if (attrs & DF_REF_A) {
- changed |= SetRef(cUnit, ssaRep->uses[next], true);
+ changed |= SetRef(cu, ssa_rep->uses[next], true);
}
if (attrs & DF_A_WIDE) {
- cUnit->regLocation[ssaRep->uses[next]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
- SRegToVReg(cUnit, ssaRep->uses[next + 1]));
+ cu->reg_location[ssa_rep->uses[next]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
+ SRegToVReg(cu, ssa_rep->uses[next + 1]));
next += 2;
} else {
next++;
@@ -146,17 +146,17 @@
}
if (attrs & DF_UB) {
if (attrs & DF_CORE_B) {
- changed |= SetCore(cUnit, ssaRep->uses[next], true);
+ changed |= SetCore(cu, ssa_rep->uses[next], true);
}
if (attrs & DF_REF_B) {
- changed |= SetRef(cUnit, ssaRep->uses[next], true);
+ changed |= SetRef(cu, ssa_rep->uses[next], true);
}
if (attrs & DF_B_WIDE) {
- cUnit->regLocation[ssaRep->uses[next]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
- SRegToVReg(cUnit, ssaRep->uses[next + 1]));
+ cu->reg_location[ssa_rep->uses[next]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
+ SRegToVReg(cu, ssa_rep->uses[next + 1]));
next += 2;
} else {
next++;
@@ -164,17 +164,17 @@
}
if (attrs & DF_UC) {
if (attrs & DF_CORE_C) {
- changed |= SetCore(cUnit, ssaRep->uses[next], true);
+ changed |= SetCore(cu, ssa_rep->uses[next], true);
}
if (attrs & DF_REF_C) {
- changed |= SetRef(cUnit, ssaRep->uses[next], true);
+ changed |= SetRef(cu, ssa_rep->uses[next], true);
}
if (attrs & DF_C_WIDE) {
- cUnit->regLocation[ssaRep->uses[next]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
- SRegToVReg(cUnit, ssaRep->uses[next + 1]));
+ cu->reg_location[ssa_rep->uses[next]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
+ SRegToVReg(cu, ssa_rep->uses[next + 1]));
}
}
@@ -182,29 +182,29 @@
if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
(mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
(mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
- switch(cUnit->shorty[0]) {
+ switch(cu->shorty[0]) {
case 'I':
- changed |= SetCore(cUnit, ssaRep->uses[0], true);
+ changed |= SetCore(cu, ssa_rep->uses[0], true);
break;
case 'J':
- changed |= SetCore(cUnit, ssaRep->uses[0], true);
- changed |= SetCore(cUnit, ssaRep->uses[1], true);
- cUnit->regLocation[ssaRep->uses[0]].wide = true;
- cUnit->regLocation[ssaRep->uses[1]].wide = true;
- cUnit->regLocation[ssaRep->uses[1]].highWord = true;
+ changed |= SetCore(cu, ssa_rep->uses[0], true);
+ changed |= SetCore(cu, ssa_rep->uses[1], true);
+ cu->reg_location[ssa_rep->uses[0]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].high_word = true;
break;
case 'F':
- changed |= SetFp(cUnit, ssaRep->uses[0], true);
+ changed |= SetFp(cu, ssa_rep->uses[0], true);
break;
case 'D':
- changed |= SetFp(cUnit, ssaRep->uses[0], true);
- changed |= SetFp(cUnit, ssaRep->uses[1], true);
- cUnit->regLocation[ssaRep->uses[0]].wide = true;
- cUnit->regLocation[ssaRep->uses[1]].wide = true;
- cUnit->regLocation[ssaRep->uses[1]].highWord = true;
+ changed |= SetFp(cu, ssa_rep->uses[0], true);
+ changed |= SetFp(cu, ssa_rep->uses[1], true);
+ cu->reg_location[ssa_rep->uses[0]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].high_word = true;
break;
case 'L':
- changed |= SetRef(cUnit, ssaRep->uses[0], true);
+ changed |= SetRef(cu, ssa_rep->uses[0], true);
break;
default: break;
}
@@ -218,63 +218,63 @@
(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
DCHECK_EQ(next, 0);
int target_idx = mir->dalvikInsn.vB;
- const char* shorty = GetShortyFromTargetIdx(cUnit, target_idx);
+ const char* shorty = GetShortyFromTargetIdx(cu, target_idx);
// Handle result type if floating point
if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
- MIR* moveResultMIR = FindMoveResult(cUnit, bb, mir);
+ MIR* move_result_mir = FindMoveResult(cu, bb, mir);
// Result might not be used at all, so no move-result
- if (moveResultMIR && (moveResultMIR->dalvikInsn.opcode !=
+ if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
Instruction::MOVE_RESULT_OBJECT)) {
- SSARepresentation* tgtRep = moveResultMIR->ssaRep;
- DCHECK(tgtRep != NULL);
- tgtRep->fpDef[0] = true;
- changed |= SetFp(cUnit, tgtRep->defs[0], true);
+ SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
+ DCHECK(tgt_rep != NULL);
+ tgt_rep->fp_def[0] = true;
+ changed |= SetFp(cu, tgt_rep->defs[0], true);
if (shorty[0] == 'D') {
- tgtRep->fpDef[1] = true;
- changed |= SetFp(cUnit, tgtRep->defs[1], true);
+ tgt_rep->fp_def[1] = true;
+ changed |= SetFp(cu, tgt_rep->defs[1], true);
}
}
}
- int numUses = mir->dalvikInsn.vA;
+ int num_uses = mir->dalvikInsn.vA;
// If this is a non-static invoke, mark implicit "this"
if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
(mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
- cUnit->regLocation[ssaRep->uses[next]].defined = true;
- cUnit->regLocation[ssaRep->uses[next]].ref = true;
+ cu->reg_location[ssa_rep->uses[next]].defined = true;
+ cu->reg_location[ssa_rep->uses[next]].ref = true;
next++;
}
uint32_t cpos = 1;
if (strlen(shorty) > 1) {
- for (int i = next; i < numUses;) {
+ for (int i = next; i < num_uses;) {
DCHECK_LT(cpos, strlen(shorty));
switch (shorty[cpos++]) {
case 'D':
- ssaRep->fpUse[i] = true;
- ssaRep->fpUse[i+1] = true;
- cUnit->regLocation[ssaRep->uses[i]].wide = true;
- cUnit->regLocation[ssaRep->uses[i+1]].wide = true;
- cUnit->regLocation[ssaRep->uses[i+1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[i])+1,
- SRegToVReg(cUnit, ssaRep->uses[i+1]));
+ ssa_rep->fp_use[i] = true;
+ ssa_rep->fp_use[i+1] = true;
+ cu->reg_location[ssa_rep->uses[i]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[i])+1,
+ SRegToVReg(cu, ssa_rep->uses[i+1]));
i++;
break;
case 'J':
- cUnit->regLocation[ssaRep->uses[i]].wide = true;
- cUnit->regLocation[ssaRep->uses[i+1]].wide = true;
- cUnit->regLocation[ssaRep->uses[i+1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[i])+1,
- SRegToVReg(cUnit, ssaRep->uses[i+1]));
- changed |= SetCore(cUnit, ssaRep->uses[i],true);
+ cu->reg_location[ssa_rep->uses[i]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[i])+1,
+ SRegToVReg(cu, ssa_rep->uses[i+1]));
+ changed |= SetCore(cu, ssa_rep->uses[i],true);
i++;
break;
case 'F':
- ssaRep->fpUse[i] = true;
+ ssa_rep->fp_use[i] = true;
break;
case 'L':
- changed |= SetRef(cUnit,ssaRep->uses[i], true);
+ changed |= SetRef(cu,ssa_rep->uses[i], true);
break;
default:
- changed |= SetCore(cUnit,ssaRep->uses[i], true);
+ changed |= SetCore(cu,ssa_rep->uses[i], true);
break;
}
i++;
@@ -282,13 +282,13 @@
}
}
- for (int i=0; ssaRep->fpUse && i< ssaRep->numUses; i++) {
- if (ssaRep->fpUse[i])
- changed |= SetFp(cUnit, ssaRep->uses[i], true);
+ for (int i=0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
+ if (ssa_rep->fp_use[i])
+ changed |= SetFp(cu, ssa_rep->uses[i], true);
}
- for (int i=0; ssaRep->fpDef && i< ssaRep->numDefs; i++) {
- if (ssaRep->fpDef[i])
- changed |= SetFp(cUnit, ssaRep->defs[i], true);
+ for (int i=0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
+ if (ssa_rep->fp_def[i])
+ changed |= SetFp(cu, ssa_rep->defs[i], true);
}
// Special-case handling for moves & Phi
if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
@@ -298,21 +298,21 @@
* The Phi set will include all low words or all high
* words, so we have to treat them specially.
*/
- bool isPhi = (static_cast<int>(mir->dalvikInsn.opcode) ==
+ bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
kMirOpPhi);
- RegLocation rlTemp = cUnit->regLocation[ssaRep->defs[0]];
- bool definedFP = rlTemp.defined && rlTemp.fp;
- bool definedCore = rlTemp.defined && rlTemp.core;
- bool definedRef = rlTemp.defined && rlTemp.ref;
- bool isWide = rlTemp.wide || ((attrs & DF_A_WIDE) != 0);
- bool isHigh = isPhi && rlTemp.wide && rlTemp.highWord;
- for (int i = 0; i < ssaRep->numUses;i++) {
- rlTemp = cUnit->regLocation[ssaRep->uses[i]];
- definedFP |= rlTemp.defined && rlTemp.fp;
- definedCore |= rlTemp.defined && rlTemp.core;
- definedRef |= rlTemp.defined && rlTemp.ref;
- isWide |= rlTemp.wide;
- isHigh |= isPhi && rlTemp.wide && rlTemp.highWord;
+ RegLocation rl_temp = cu->reg_location[ssa_rep->defs[0]];
+ bool defined_fp = rl_temp.defined && rl_temp.fp;
+ bool defined_core = rl_temp.defined && rl_temp.core;
+ bool defined_ref = rl_temp.defined && rl_temp.ref;
+ bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
+ bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
+ for (int i = 0; i < ssa_rep->num_uses;i++) {
+ rl_temp = cu->reg_location[ssa_rep->uses[i]];
+ defined_fp |= rl_temp.defined && rl_temp.fp;
+ defined_core |= rl_temp.defined && rl_temp.core;
+ defined_ref |= rl_temp.defined && rl_temp.ref;
+ is_wide |= rl_temp.wide;
+ is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
}
/*
* TODO: cleaner fix
@@ -328,33 +328,33 @@
* disable register promotion (which is the only thing that
* relies on distinctions between core and fp usages.
*/
- if ((definedFP && (definedCore | definedRef)) &&
- ((cUnit->disableOpt & (1 << kPromoteRegs)) == 0)) {
- LOG(WARNING) << PrettyMethod(cUnit->method_idx, *cUnit->dex_file)
+ if ((defined_fp && (defined_core | defined_ref)) &&
+ ((cu->disable_opt & (1 << kPromoteRegs)) == 0)) {
+ LOG(WARNING) << PrettyMethod(cu->method_idx, *cu->dex_file)
<< " op at block " << bb->id
<< " has both fp and core/ref uses for same def.";
- cUnit->disableOpt |= (1 << kPromoteRegs);
+ cu->disable_opt |= (1 << kPromoteRegs);
}
- changed |= SetFp(cUnit, ssaRep->defs[0], definedFP);
- changed |= SetCore(cUnit, ssaRep->defs[0], definedCore);
- changed |= SetRef(cUnit, ssaRep->defs[0], definedRef);
- changed |= SetWide(cUnit, ssaRep->defs[0], isWide);
- changed |= SetHigh(cUnit, ssaRep->defs[0], isHigh);
+ changed |= SetFp(cu, ssa_rep->defs[0], defined_fp);
+ changed |= SetCore(cu, ssa_rep->defs[0], defined_core);
+ changed |= SetRef(cu, ssa_rep->defs[0], defined_ref);
+ changed |= SetWide(cu, ssa_rep->defs[0], is_wide);
+ changed |= SetHigh(cu, ssa_rep->defs[0], is_high);
if (attrs & DF_A_WIDE) {
- changed |= SetWide(cUnit, ssaRep->defs[1], true);
- changed |= SetHigh(cUnit, ssaRep->defs[1], true);
+ changed |= SetWide(cu, ssa_rep->defs[1], true);
+ changed |= SetHigh(cu, ssa_rep->defs[1], true);
}
- for (int i = 0; i < ssaRep->numUses; i++) {
- changed |= SetFp(cUnit, ssaRep->uses[i], definedFP);
- changed |= SetCore(cUnit, ssaRep->uses[i], definedCore);
- changed |= SetRef(cUnit, ssaRep->uses[i], definedRef);
- changed |= SetWide(cUnit, ssaRep->uses[i], isWide);
- changed |= SetHigh(cUnit, ssaRep->uses[i], isHigh);
+ for (int i = 0; i < ssa_rep->num_uses; i++) {
+ changed |= SetFp(cu, ssa_rep->uses[i], defined_fp);
+ changed |= SetCore(cu, ssa_rep->uses[i], defined_core);
+ changed |= SetRef(cu, ssa_rep->uses[i], defined_ref);
+ changed |= SetWide(cu, ssa_rep->uses[i], is_wide);
+ changed |= SetHigh(cu, ssa_rep->uses[i], is_high);
}
if (attrs & DF_A_WIDE) {
- DCHECK_EQ(ssaRep->numUses, 2);
- changed |= SetWide(cUnit, ssaRep->uses[1], true);
- changed |= SetHigh(cUnit, ssaRep->uses[1], true);
+ DCHECK_EQ(ssa_rep->num_uses, 2);
+ changed |= SetWide(cu, ssa_rep->uses[1], true);
+ changed |= SetHigh(cu, ssa_rep->uses[1], true);
}
}
}
@@ -362,33 +362,33 @@
return changed;
}
-static const char* storageName[] = {" Frame ", "PhysReg", " Spill "};
+static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
static void DumpRegLocTable(RegLocation* table, int count)
{
for (int i = 0; i < count; i++) {
LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c%d %c%d S%d",
- table[i].origSReg, storageName[table[i].location],
+ table[i].orig_sreg, storage_name[table[i].location],
table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
- table[i].highWord ? 'H' : 'L', table[i].home ? 'h' : 't',
- IsFpReg(table[i].lowReg) ? 's' : 'r',
- table[i].lowReg & FpRegMask(),
- IsFpReg(table[i].highReg) ? 's' : 'r',
- table[i].highReg & FpRegMask(), table[i].sRegLow);
+ table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
+ IsFpReg(table[i].low_reg) ? 's' : 'r',
+ table[i].low_reg & FpRegMask(),
+ IsFpReg(table[i].high_reg) ? 's' : 'r',
+ table[i].high_reg & FpRegMask(), table[i].s_reg_low);
}
}
-static const RegLocation freshLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
INVALID_REG, INVALID_REG, INVALID_SREG,
INVALID_SREG};
-int ComputeFrameSize(CompilationUnit* cUnit) {
+int ComputeFrameSize(CompilationUnit* cu) {
/* Figure out the frame size */
static const uint32_t kAlignMask = kStackAlignment - 1;
- uint32_t size = (cUnit->numCoreSpills + cUnit->numFPSpills +
- 1 /* filler word */ + cUnit->numRegs + cUnit->numOuts +
- cUnit->numCompilerTemps + 1 /* curMethod* */)
+ uint32_t size = (cu->num_core_spills + cu->num_fp_spills +
+ 1 /* filler word */ + cu->num_regs + cu->num_outs +
+ cu->num_compiler_temps + 1 /* cur_method* */)
* sizeof(uint32_t);
/* Align and set */
return (size + kAlignMask) & ~(kAlignMask);
@@ -400,126 +400,126 @@
* allocation is done on the fly. We also do some initialization and
* type inference here.
*/
-void SimpleRegAlloc(CompilationUnit* cUnit)
+void SimpleRegAlloc(CompilationUnit* cu)
{
int i;
RegLocation* loc;
/* Allocate the location map */
- loc = static_cast<RegLocation*>(NewMem(cUnit, cUnit->numSSARegs * sizeof(*loc),
+ loc = static_cast<RegLocation*>(NewMem(cu, cu->num_ssa_regs * sizeof(*loc),
true, kAllocRegAlloc));
- for (i=0; i< cUnit->numSSARegs; i++) {
- loc[i] = freshLoc;
- loc[i].sRegLow = i;
- loc[i].isConst = IsBitSet(cUnit->isConstantV, i);
+ for (i=0; i< cu->num_ssa_regs; i++) {
+ loc[i] = fresh_loc;
+ loc[i].s_reg_low = i;
+ loc[i].is_const = IsBitSet(cu->is_constant_v, i);
}
/* Patch up the locations for Method* and the compiler temps */
- loc[cUnit->methodSReg].location = kLocCompilerTemp;
- loc[cUnit->methodSReg].defined = true;
- for (i = 0; i < cUnit->numCompilerTemps; i++) {
- CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cUnit->compilerTemps.elemList[i]);
- loc[ct->sReg].location = kLocCompilerTemp;
- loc[ct->sReg].defined = true;
+ loc[cu->method_sreg].location = kLocCompilerTemp;
+ loc[cu->method_sreg].defined = true;
+ for (i = 0; i < cu->num_compiler_temps; i++) {
+ CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu->compiler_temps.elem_list[i]);
+ loc[ct->s_reg].location = kLocCompilerTemp;
+ loc[ct->s_reg].defined = true;
}
- cUnit->regLocation = loc;
+ cu->reg_location = loc;
/* Allocation the promotion map */
- int numRegs = cUnit->numDalvikRegisters;
- cUnit->promotionMap = static_cast<PromotionMap*>
- (NewMem(cUnit, (numRegs + cUnit->numCompilerTemps + 1) * sizeof(cUnit->promotionMap[0]),
+ int num_regs = cu->num_dalvik_registers;
+ cu->promotion_map = static_cast<PromotionMap*>
+ (NewMem(cu, (num_regs + cu->num_compiler_temps + 1) * sizeof(cu->promotion_map[0]),
true, kAllocRegAlloc));
/* Add types of incoming arguments based on signature */
- int numIns = cUnit->numIns;
- if (numIns > 0) {
- int sReg = numRegs - numIns;
- if ((cUnit->access_flags & kAccStatic) == 0) {
+ int num_ins = cu->num_ins;
+ if (num_ins > 0) {
+ int s_reg = num_regs - num_ins;
+ if ((cu->access_flags & kAccStatic) == 0) {
// For non-static, skip past "this"
- cUnit->regLocation[sReg].defined = true;
- cUnit->regLocation[sReg].ref = true;
- sReg++;
+ cu->reg_location[s_reg].defined = true;
+ cu->reg_location[s_reg].ref = true;
+ s_reg++;
}
- const char* shorty = cUnit->shorty;
+ const char* shorty = cu->shorty;
int shorty_len = strlen(shorty);
for (int i = 1; i < shorty_len; i++) {
switch (shorty[i]) {
case 'D':
- cUnit->regLocation[sReg].wide = true;
- cUnit->regLocation[sReg+1].highWord = true;
- cUnit->regLocation[sReg+1].fp = true;
- DCHECK_EQ(SRegToVReg(cUnit, sReg)+1, SRegToVReg(cUnit, sReg+1));
- cUnit->regLocation[sReg].fp = true;
- cUnit->regLocation[sReg].defined = true;
- sReg++;
+ cu->reg_location[s_reg].wide = true;
+ cu->reg_location[s_reg+1].high_word = true;
+ cu->reg_location[s_reg+1].fp = true;
+ DCHECK_EQ(SRegToVReg(cu, s_reg)+1, SRegToVReg(cu, s_reg+1));
+ cu->reg_location[s_reg].fp = true;
+ cu->reg_location[s_reg].defined = true;
+ s_reg++;
break;
case 'J':
- cUnit->regLocation[sReg].wide = true;
- cUnit->regLocation[sReg+1].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, sReg)+1, SRegToVReg(cUnit, sReg+1));
- cUnit->regLocation[sReg].core = true;
- cUnit->regLocation[sReg].defined = true;
- sReg++;
+ cu->reg_location[s_reg].wide = true;
+ cu->reg_location[s_reg+1].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, s_reg)+1, SRegToVReg(cu, s_reg+1));
+ cu->reg_location[s_reg].core = true;
+ cu->reg_location[s_reg].defined = true;
+ s_reg++;
break;
case 'F':
- cUnit->regLocation[sReg].fp = true;
- cUnit->regLocation[sReg].defined = true;
+ cu->reg_location[s_reg].fp = true;
+ cu->reg_location[s_reg].defined = true;
break;
case 'L':
- cUnit->regLocation[sReg].ref = true;
- cUnit->regLocation[sReg].defined = true;
+ cu->reg_location[s_reg].ref = true;
+ cu->reg_location[s_reg].defined = true;
break;
default:
- cUnit->regLocation[sReg].core = true;
- cUnit->regLocation[sReg].defined = true;
+ cu->reg_location[s_reg].core = true;
+ cu->reg_location[s_reg].defined = true;
break;
}
- sReg++;
+ s_reg++;
}
}
- if (!cUnit->genBitcode) {
+ if (!cu->gen_bitcode) {
/* Remap names */
- DataFlowAnalysisDispatcher(cUnit, RemapNames,
+ DataFlowAnalysisDispatcher(cu, RemapNames,
kPreOrderDFSTraversal,
- false /* isIterative */);
+ false /* is_iterative */);
}
/* Do type & size inference pass */
- DataFlowAnalysisDispatcher(cUnit, InferTypeAndSize,
+ DataFlowAnalysisDispatcher(cu, InferTypeAndSize,
kPreOrderDFSTraversal,
- true /* isIterative */);
+ true /* is_iterative */);
/*
- * Set the sRegLow field to refer to the pre-SSA name of the
+ * Set the s_reg_low field to refer to the pre-SSA name of the
* base Dalvik virtual register. Once we add a better register
* allocator, remove this remapping.
*/
- for (i=0; i < cUnit->numSSARegs; i++) {
- if (cUnit->regLocation[i].location != kLocCompilerTemp) {
- int origSReg = cUnit->regLocation[i].sRegLow;
- cUnit->regLocation[i].origSReg = origSReg;
- cUnit->regLocation[i].sRegLow = SRegToVReg(cUnit, origSReg);
+ for (i=0; i < cu->num_ssa_regs; i++) {
+ if (cu->reg_location[i].location != kLocCompilerTemp) {
+ int orig_sreg = cu->reg_location[i].s_reg_low;
+ cu->reg_location[i].orig_sreg = orig_sreg;
+ cu->reg_location[i].s_reg_low = SRegToVReg(cu, orig_sreg);
}
}
- cUnit->coreSpillMask = 0;
- cUnit->fpSpillMask = 0;
- cUnit->numCoreSpills = 0;
+ cu->core_spill_mask = 0;
+ cu->fp_spill_mask = 0;
+ cu->num_core_spills = 0;
- DoPromotion(cUnit);
+ DoPromotion(cu);
/* Get easily-accessable post-promotion copy of RegLocation for Method* */
- cUnit->methodLoc = cUnit->regLocation[cUnit->methodSReg];
+ cu->method_loc = cu->reg_location[cu->method_sreg];
- if (cUnit->printMe && !(cUnit->disableOpt & (1 << kPromoteRegs))) {
+ if (cu->verbose && !(cu->disable_opt & (1 << kPromoteRegs))) {
LOG(INFO) << "After Promotion";
- DumpRegLocTable(cUnit->regLocation, cUnit->numSSARegs);
+ DumpRegLocTable(cu->reg_location, cu->num_ssa_regs);
}
/* Set the frame size */
- cUnit->frameSize = ComputeFrameSize(cUnit);
+ cu->frame_size = ComputeFrameSize(cu);
}
} // namespace art
diff --git a/src/compiler/ralloc.h b/src/compiler/ralloc.h
index 51bee9f..b46bfb1 100644
--- a/src/compiler/ralloc.h
+++ b/src/compiler/ralloc.h
@@ -21,7 +21,7 @@
namespace art {
-void SimpleRegAlloc(CompilationUnit* cUnit);
+void SimpleRegAlloc(CompilationUnit* cu);
} // namespace art
diff --git a/src/compiler/ssa_transformation.cc b/src/compiler/ssa_transformation.cc
index 609a25e..0a71cb4 100644
--- a/src/compiler/ssa_transformation.cc
+++ b/src/compiler/ssa_transformation.cc
@@ -33,13 +33,13 @@
static BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb)
{
- BasicBlock* res = NeedsVisit(bb->fallThrough);
+ BasicBlock* res = NeedsVisit(bb->fall_through);
if (res == NULL) {
res = NeedsVisit(bb->taken);
if (res == NULL) {
- if (bb->successorBlockList.blockListType != kNotUsed) {
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
GrowableListIterator iterator;
- GrowableListIteratorInit(&bb->successorBlockList.blocks,
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
&iterator);
while (true) {
SuccessorBlockInfo *sbi = reinterpret_cast<SuccessorBlockInfo*>
@@ -54,306 +54,306 @@
return res;
}
-static void MarkPreOrder(CompilationUnit* cUnit, BasicBlock* block)
+static void MarkPreOrder(CompilationUnit* cu, BasicBlock* block)
{
block->visited = true;
- /* Enqueue the preOrder block id */
- InsertGrowableList(cUnit, &cUnit->dfsOrder, block->id);
+ /* Enqueue the pre_order block id */
+ InsertGrowableList(cu, &cu->dfs_order, block->id);
}
-static void RecordDFSOrders(CompilationUnit* cUnit, BasicBlock* block)
+static void RecordDFSOrders(CompilationUnit* cu, BasicBlock* block)
{
std::vector<BasicBlock*> succ;
- MarkPreOrder(cUnit, block);
+ MarkPreOrder(cu, block);
succ.push_back(block);
while (!succ.empty()) {
BasicBlock* curr = succ.back();
- BasicBlock* nextSuccessor = NextUnvisitedSuccessor(curr);
- if (nextSuccessor != NULL) {
- MarkPreOrder(cUnit, nextSuccessor);
- succ.push_back(nextSuccessor);
+ BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
+ if (next_successor != NULL) {
+ MarkPreOrder(cu, next_successor);
+ succ.push_back(next_successor);
continue;
}
- curr->dfsId = cUnit->dfsPostOrder.numUsed;
- InsertGrowableList(cUnit, &cUnit->dfsPostOrder, curr->id);
+ curr->dfs_id = cu->dfs_post_order.num_used;
+ InsertGrowableList(cu, &cu->dfs_post_order, curr->id);
succ.pop_back();
}
}
#if defined(TEST_DFS)
-/* Enter the node to the dfsOrder list then visit its successors */
-static void RecursiveRecordDFSOrders(CompilationUnit* cUnit, BasicBlock* block)
+/* Enter the node to the dfs_order list then visit its successors */
+static void RecursiveRecordDFSOrders(CompilationUnit* cu, BasicBlock* block)
{
if (block->visited || block->hidden) return;
block->visited = true;
// Can this block be reached only via previous block fallthrough?
- if ((block->blockType == kDalvikByteCode) &&
- (block->predecessors->numUsed == 1)) {
- DCHECK_GE(cUnit->dfsOrder.numUsed, 1U);
- int prevIdx = cUnit->dfsOrder.numUsed - 1;
- int prevId = cUnit->dfsOrder.elemList[prevIdx];
- BasicBlock* predBB = (BasicBlock*)block->predecessors->elemList[0];
+ if ((block->block_type == kDalvikByteCode) &&
+ (block->predecessors->num_used == 1)) {
+ DCHECK_GE(cu->dfs_order.num_used, 1U);
+ int prev_idx = cu->dfs_order.num_used - 1;
+ int prev_id = cu->dfs_order.elem_list[prev_idx];
+ BasicBlock* pred_bb = (BasicBlock*)block->predecessors->elem_list[0];
}
- /* Enqueue the preOrder block id */
- InsertGrowableList(cUnit, &cUnit->dfsOrder, block->id);
+ /* Enqueue the pre_order block id */
+ InsertGrowableList(cu, &cu->dfs_order, block->id);
- if (block->fallThrough) {
- RecursiveRecordDFSOrders(cUnit, block->fallThrough);
+ if (block->fall_through) {
+ RecursiveRecordDFSOrders(cu, block->fall_through);
}
- if (block->taken) RecursiveRecordDFSOrders(cUnit, block->taken);
- if (block->successorBlockList.blockListType != kNotUsed) {
+ if (block->taken) RecursiveRecordDFSOrders(cu, block->taken);
+ if (block->successor_block_list.block_list_type != kNotUsed) {
GrowableListIterator iterator;
- GrowableListIteratorInit(&block->successorBlockList.blocks,
+ GrowableListIteratorInit(&block->successor_block_list.blocks,
&iterator);
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
(SuccessorBlockInfo *) GrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- RecursiveRecordDFSOrders(cUnit, succBB);
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ RecursiveRecordDFSOrders(cu, succ_bb);
}
}
- /* Record postorder in basic block and enqueue normal id in dfsPostOrder */
- block->dfsId = cUnit->dfsPostOrder.numUsed;
- InsertGrowableList(cUnit, &cUnit->dfsPostOrder, block->id);
+ /* Record postorder in basic block and enqueue normal id in dfs_post_order */
+ block->dfs_id = cu->dfs_post_order.num_used;
+ InsertGrowableList(cu, &cu->dfs_post_order, block->id);
return;
}
#endif
/* Sort the blocks by the Depth-First-Search */
-static void ComputeDFSOrders(CompilationUnit* cUnit)
+static void ComputeDFSOrders(CompilationUnit* cu)
{
- /* Initialize or reset the DFS preOrder list */
- if (cUnit->dfsOrder.elemList == NULL) {
- CompilerInitGrowableList(cUnit, &cUnit->dfsOrder, cUnit->numBlocks,
+ /* Initialize or reset the DFS pre_order list */
+ if (cu->dfs_order.elem_list == NULL) {
+ CompilerInitGrowableList(cu, &cu->dfs_order, cu->num_blocks,
kListDfsOrder);
} else {
/* Just reset the used length on the counter */
- cUnit->dfsOrder.numUsed = 0;
+ cu->dfs_order.num_used = 0;
}
- /* Initialize or reset the DFS postOrder list */
- if (cUnit->dfsPostOrder.elemList == NULL) {
- CompilerInitGrowableList(cUnit, &cUnit->dfsPostOrder, cUnit->numBlocks,
+ /* Initialize or reset the DFS post_order list */
+ if (cu->dfs_post_order.elem_list == NULL) {
+ CompilerInitGrowableList(cu, &cu->dfs_post_order, cu->num_blocks,
kListDfsPostOrder);
} else {
/* Just reset the used length on the counter */
- cUnit->dfsPostOrder.numUsed = 0;
+ cu->dfs_post_order.num_used = 0;
}
#if defined(TEST_DFS)
// Reset visited flags
- DataFlowAnalysisDispatcher(cUnit, ClearVisitedFlag,
- kAllNodes, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
+ kAllNodes, false /* is_iterative */);
// Record pre and post order dfs
- RecursiveRecordDFSOrders(cUnit, cUnit->entryBlock);
+ RecursiveRecordDFSOrders(cu, cu->entry_block);
// Copy the results for later comparison and reset the lists
- GrowableList recursiveDfsOrder;
- GrowableList recursiveDfsPostOrder;
- CompilerInitGrowableList(cUnit, &recursiveDfsOrder, cUnit->dfsOrder.numUsed,
+ GrowableList recursive_dfs_order;
+ GrowableList recursive_dfs_post_order;
+ CompilerInitGrowableList(cu, &recursive_dfs_order, cu->dfs_order.num_used,
kListDfsOrder);
- for (unsigned int i = 0; i < cUnit->dfsOrder.numUsed; i++) {
- InsertGrowableList(cUnit, &recursiveDfsOrder,
- cUnit->dfsOrder.elemList[i]);
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ InsertGrowableList(cu, &recursive_dfs_order,
+ cu->dfs_order.elem_list[i]);
}
- cUnit->dfsOrder.numUsed = 0;
- CompilerInitGrowableList(cUnit, &recursiveDfsPostOrder,
- cUnit->dfsPostOrder.numUsed, kListDfsOrder);
- for (unsigned int i = 0; i < cUnit->dfsPostOrder.numUsed; i++) {
- InsertGrowableList(cUnit, &recursiveDfsPostOrder,
- cUnit->dfsPostOrder.elemList[i]);
+ cu->dfs_order.num_used = 0;
+ CompilerInitGrowableList(cu, &recursive_dfs_post_order,
+ cu->dfs_post_order.num_used, kListDfsOrder);
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ InsertGrowableList(cu, &recursive_dfs_post_order,
+ cu->dfs_post_order.elem_list[i]);
}
- cUnit->dfsPostOrder.numUsed = 0;
+ cu->dfs_post_order.num_used = 0;
#endif
// Reset visited flags from all nodes
- DataFlowAnalysisDispatcher(cUnit, ClearVisitedFlag,
- kAllNodes, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
+ kAllNodes, false /* is_iterative */);
// Record dfs orders
- RecordDFSOrders(cUnit, cUnit->entryBlock);
+ RecordDFSOrders(cu, cu->entry_block);
#if defined(TEST_DFS)
bool mismatch = false;
- mismatch |= (cUnit->dfsOrder.numUsed != recursiveDfsOrder.numUsed);
- for (unsigned int i = 0; i < cUnit->dfsOrder.numUsed; i++) {
- mismatch |= (cUnit->dfsOrder.elemList[i] !=
- recursiveDfsOrder.elemList[i]);
+ mismatch |= (cu->dfs_order.num_used != recursive_dfs_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ mismatch |= (cu->dfs_order.elem_list[i] !=
+ recursive_dfs_order.elem_list[i]);
}
- mismatch |= (cUnit->dfsPostOrder.numUsed != recursiveDfsPostOrder.numUsed);
- for (unsigned int i = 0; i < cUnit->dfsPostOrder.numUsed; i++) {
- mismatch |= (cUnit->dfsPostOrder.elemList[i] !=
- recursiveDfsPostOrder.elemList[i]);
+ mismatch |= (cu->dfs_post_order.num_used != recursive_dfs_post_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ mismatch |= (cu->dfs_post_order.elem_list[i] !=
+ recursive_dfs_post_order.elem_list[i]);
}
if (mismatch) {
LOG(INFO) << "Mismatch for "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
LOG(INFO) << "New dfs";
- for (unsigned int i = 0; i < cUnit->dfsOrder.numUsed; i++) {
- LOG(INFO) << i << " - " << cUnit->dfsOrder.elemList[i];
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ LOG(INFO) << i << " - " << cu->dfs_order.elem_list[i];
}
LOG(INFO) << "Recursive dfs";
- for (unsigned int i = 0; i < recursiveDfsOrder.numUsed; i++) {
- LOG(INFO) << i << " - " << recursiveDfsOrder.elemList[i];
+ for (unsigned int i = 0; i < recursive_dfs_order.num_used; i++) {
+ LOG(INFO) << i << " - " << recursive_dfs_order.elem_list[i];
}
LOG(INFO) << "New post dfs";
- for (unsigned int i = 0; i < cUnit->dfsPostOrder.numUsed; i++) {
- LOG(INFO) << i << " - " << cUnit->dfsPostOrder.elemList[i];
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ LOG(INFO) << i << " - " << cu->dfs_post_order.elem_list[i];
}
LOG(INFO) << "Recursive post dfs";
- for (unsigned int i = 0; i < recursiveDfsPostOrder.numUsed; i++) {
- LOG(INFO) << i << " - " << recursiveDfsPostOrder.elemList[i];
+ for (unsigned int i = 0; i < recursive_dfs_post_order.num_used; i++) {
+ LOG(INFO) << i << " - " << recursive_dfs_post_order.elem_list[i];
}
}
- CHECK_EQ(cUnit->dfsOrder.numUsed, recursiveDfsOrder.numUsed);
- for (unsigned int i = 0; i < cUnit->dfsOrder.numUsed; i++) {
- CHECK_EQ(cUnit->dfsOrder.elemList[i], recursiveDfsOrder.elemList[i]);
+ CHECK_EQ(cu->dfs_order.num_used, recursive_dfs_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ CHECK_EQ(cu->dfs_order.elem_list[i], recursive_dfs_order.elem_list[i]);
}
- CHECK_EQ(cUnit->dfsPostOrder.numUsed, recursiveDfsPostOrder.numUsed);
- for (unsigned int i = 0; i < cUnit->dfsPostOrder.numUsed; i++) {
- CHECK_EQ(cUnit->dfsPostOrder.elemList[i],
- recursiveDfsPostOrder.elemList[i]);
+ CHECK_EQ(cu->dfs_post_order.num_used, recursive_dfs_post_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ CHECK_EQ(cu->dfs_post_order.elem_list[i],
+ recursive_dfs_post_order.elem_list[i]);
}
#endif
- cUnit->numReachableBlocks = cUnit->dfsOrder.numUsed;
+ cu->num_reachable_blocks = cu->dfs_order.num_used;
}
/*
* Mark block bit on the per-Dalvik register vector to denote that Dalvik
* register idx is defined in BasicBlock bb.
*/
-static bool FillDefBlockMatrix(CompilationUnit* cUnit, BasicBlock* bb)
+static bool FillDefBlockMatrix(CompilationUnit* cu, BasicBlock* bb)
{
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->data_flow_info == NULL) return false;
ArenaBitVectorIterator iterator;
- BitVectorIteratorInit(bb->dataFlowInfo->defV, &iterator);
+ BitVectorIteratorInit(bb->data_flow_info->def_v, &iterator);
while (true) {
int idx = BitVectorIteratorNext(&iterator);
if (idx == -1) break;
/* Block bb defines register idx */
- SetBit(cUnit, cUnit->defBlockMatrix[idx], bb->id);
+ SetBit(cu, cu->def_block_matrix[idx], bb->id);
}
return true;
}
-static void ComputeDefBlockMatrix(CompilationUnit* cUnit)
+static void ComputeDefBlockMatrix(CompilationUnit* cu)
{
- int numRegisters = cUnit->numDalvikRegisters;
- /* Allocate numDalvikRegisters bit vector pointers */
- cUnit->defBlockMatrix = static_cast<ArenaBitVector**>
- (NewMem(cUnit, sizeof(ArenaBitVector *) * numRegisters, true, kAllocDFInfo));
+ int num_registers = cu->num_dalvik_registers;
+ /* Allocate num_dalvik_registers bit vector pointers */
+ cu->def_block_matrix = static_cast<ArenaBitVector**>
+ (NewMem(cu, sizeof(ArenaBitVector *) * num_registers, true, kAllocDFInfo));
int i;
- /* Initialize numRegister vectors with numBlocks bits each */
- for (i = 0; i < numRegisters; i++) {
- cUnit->defBlockMatrix[i] = AllocBitVector(cUnit, cUnit->numBlocks,
+ /* Initialize num_register vectors with num_blocks bits each */
+ for (i = 0; i < num_registers; i++) {
+ cu->def_block_matrix[i] = AllocBitVector(cu, cu->num_blocks,
false, kBitMapBMatrix);
}
- DataFlowAnalysisDispatcher(cUnit, FindLocalLiveIn,
- kAllNodes, false /* isIterative */);
- DataFlowAnalysisDispatcher(cUnit, FillDefBlockMatrix,
- kAllNodes, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, FindLocalLiveIn,
+ kAllNodes, false /* is_iterative */);
+ DataFlowAnalysisDispatcher(cu, FillDefBlockMatrix,
+ kAllNodes, false /* is_iterative */);
/*
* Also set the incoming parameters as defs in the entry block.
* Only need to handle the parameters for the outer method.
*/
- int numRegs = cUnit->numDalvikRegisters;
- int inReg = numRegs - cUnit->numIns;
- for (; inReg < numRegs; inReg++) {
- SetBit(cUnit, cUnit->defBlockMatrix[inReg], cUnit->entryBlock->id);
+ int num_regs = cu->num_dalvik_registers;
+ int in_reg = num_regs - cu->num_ins;
+ for (; in_reg < num_regs; in_reg++) {
+ SetBit(cu, cu->def_block_matrix[in_reg], cu->entry_block->id);
}
}
/* Compute the post-order traversal of the CFG */
-static void ComputeDomPostOrderTraversal(CompilationUnit* cUnit, BasicBlock* bb)
+static void ComputeDomPostOrderTraversal(CompilationUnit* cu, BasicBlock* bb)
{
- ArenaBitVectorIterator bvIterator;
- BitVectorIteratorInit(bb->iDominated, &bvIterator);
- GrowableList* blockList = &cUnit->blockList;
+ ArenaBitVectorIterator bv_iterator;
+ BitVectorIteratorInit(bb->i_dominated, &bv_iterator);
+ GrowableList* block_list = &cu->block_list;
/* Iterate through the dominated blocks first */
while (true) {
//TUNING: hot call to BitVectorIteratorNext
- int bbIdx = BitVectorIteratorNext(&bvIterator);
- if (bbIdx == -1) break;
- BasicBlock* dominatedBB =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, bbIdx));
- ComputeDomPostOrderTraversal(cUnit, dominatedBB);
+ int bb_idx = BitVectorIteratorNext(&bv_iterator);
+ if (bb_idx == -1) break;
+ BasicBlock* dominated_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, bb_idx));
+ ComputeDomPostOrderTraversal(cu, dominated_bb);
}
/* Enter the current block id */
- InsertGrowableList(cUnit, &cUnit->domPostOrderTraversal, bb->id);
+ InsertGrowableList(cu, &cu->dom_post_order_traversal, bb->id);
/* hacky loop detection */
if (bb->taken && IsBitSet(bb->dominators, bb->taken->id)) {
- cUnit->hasLoop = true;
+ cu->has_loop = true;
}
}
-static void CheckForDominanceFrontier(CompilationUnit* cUnit, BasicBlock* domBB,
- const BasicBlock* succBB)
+static void CheckForDominanceFrontier(CompilationUnit* cu, BasicBlock* dom_bb,
+ const BasicBlock* succ_bb)
{
/*
* TODO - evaluate whether phi will ever need to be inserted into exit
* blocks.
*/
- if (succBB->iDom != domBB &&
- succBB->blockType == kDalvikByteCode &&
- succBB->hidden == false) {
- SetBit(cUnit, domBB->domFrontier, succBB->id);
+ if (succ_bb->i_dom != dom_bb &&
+ succ_bb->block_type == kDalvikByteCode &&
+ succ_bb->hidden == false) {
+ SetBit(cu, dom_bb->dom_frontier, succ_bb->id);
}
}
/* Worker function to compute the dominance frontier */
-static bool ComputeDominanceFrontier(CompilationUnit* cUnit, BasicBlock* bb)
+static bool ComputeDominanceFrontier(CompilationUnit* cu, BasicBlock* bb)
{
- GrowableList* blockList = &cUnit->blockList;
+ GrowableList* block_list = &cu->block_list;
/* Calculate DF_local */
if (bb->taken) {
- CheckForDominanceFrontier(cUnit, bb, bb->taken);
+ CheckForDominanceFrontier(cu, bb, bb->taken);
}
- if (bb->fallThrough) {
- CheckForDominanceFrontier(cUnit, bb, bb->fallThrough);
+ if (bb->fall_through) {
+ CheckForDominanceFrontier(cu, bb, bb->fall_through);
}
- if (bb->successorBlockList.blockListType != kNotUsed) {
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
GrowableListIterator iterator;
- GrowableListIteratorInit(&bb->successorBlockList.blocks,
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
&iterator);
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- CheckForDominanceFrontier(cUnit, bb, succBB);
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ CheckForDominanceFrontier(cu, bb, succ_bb);
}
}
/* Calculate DF_up */
- ArenaBitVectorIterator bvIterator;
- BitVectorIteratorInit(bb->iDominated, &bvIterator);
+ ArenaBitVectorIterator bv_iterator;
+ BitVectorIteratorInit(bb->i_dominated, &bv_iterator);
while (true) {
//TUNING: hot call to BitVectorIteratorNext
- int dominatedIdx = BitVectorIteratorNext(&bvIterator);
- if (dominatedIdx == -1) break;
- BasicBlock* dominatedBB =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, dominatedIdx));
- ArenaBitVectorIterator dfIterator;
- BitVectorIteratorInit(dominatedBB->domFrontier, &dfIterator);
+ int dominated_idx = BitVectorIteratorNext(&bv_iterator);
+ if (dominated_idx == -1) break;
+ BasicBlock* dominated_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dominated_idx));
+ ArenaBitVectorIterator df_iterator;
+ BitVectorIteratorInit(dominated_bb->dom_frontier, &df_iterator);
while (true) {
//TUNING: hot call to BitVectorIteratorNext
- int dfUpIdx = BitVectorIteratorNext(&dfIterator);
- if (dfUpIdx == -1) break;
- BasicBlock* dfUpBlock =
- reinterpret_cast<BasicBlock*>( GrowableListGetElement(blockList, dfUpIdx));
- CheckForDominanceFrontier(cUnit, bb, dfUpBlock);
+ int df_up_idx = BitVectorIteratorNext(&df_iterator);
+ if (df_up_idx == -1) break;
+ BasicBlock* df_up_block =
+ reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, df_up_idx));
+ CheckForDominanceFrontier(cu, bb, df_up_block);
}
}
@@ -361,27 +361,27 @@
}
/* Worker function for initializing domination-related data structures */
-static bool InitializeDominationInfo(CompilationUnit* cUnit, BasicBlock* bb)
+static bool InitializeDominationInfo(CompilationUnit* cu, BasicBlock* bb)
{
- int numTotalBlocks = cUnit->blockList.numUsed;
+ int num_total_blocks = cu->block_list.num_used;
if (bb->dominators == NULL ) {
- bb->dominators = AllocBitVector(cUnit, numTotalBlocks,
+ bb->dominators = AllocBitVector(cu, num_total_blocks,
false /* expandable */,
kBitMapDominators);
- bb->iDominated = AllocBitVector(cUnit, numTotalBlocks,
+ bb->i_dominated = AllocBitVector(cu, num_total_blocks,
false /* expandable */,
kBitMapIDominated);
- bb->domFrontier = AllocBitVector(cUnit, numTotalBlocks,
+ bb->dom_frontier = AllocBitVector(cu, num_total_blocks,
false /* expandable */,
kBitMapDomFrontier);
} else {
ClearAllBits(bb->dominators);
- ClearAllBits(bb->iDominated);
- ClearAllBits(bb->domFrontier);
+ ClearAllBits(bb->i_dominated);
+ ClearAllBits(bb->dom_frontier);
}
/* Set all bits in the dominator vector */
- SetInitialBits(bb->dominators, numTotalBlocks);
+ SetInitialBits(bb->dominators, num_total_blocks);
return true;
}
@@ -391,34 +391,34 @@
* is only used when kDebugVerifyDataflow is active and should compute
* the same dominator sets as ComputeBlockDominiators.
*/
-static bool SlowComputeBlockDominators(CompilationUnit* cUnit, BasicBlock* bb)
+static bool SlowComputeBlockDominators(CompilationUnit* cu, BasicBlock* bb)
{
- GrowableList* blockList = &cUnit->blockList;
- int numTotalBlocks = blockList->numUsed;
- ArenaBitVector* tempBlockV = cUnit->tempBlockV;
+ GrowableList* block_list = &cu->block_list;
+ int num_total_blocks = block_list->num_used;
+ ArenaBitVector* temp_block_v = cu->temp_block_v;
GrowableListIterator iter;
/*
* The dominator of the entry block has been preset to itself and we need
* to skip the calculation here.
*/
- if (bb == cUnit->entryBlock) return false;
+ if (bb == cu->entry_block) return false;
- SetInitialBits(tempBlockV, numTotalBlocks);
+ SetInitialBits(temp_block_v, num_total_blocks);
/* Iterate through the predecessors */
GrowableListIteratorInit(bb->predecessors, &iter);
while (true) {
- BasicBlock* predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
- if (!predBB) break;
- /* tempBlockV = tempBlockV ^ dominators */
- if (predBB->dominators != NULL) {
- IntersectBitVectors(tempBlockV, tempBlockV, predBB->dominators);
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ /* temp_block_v = temp_block_v ^ dominators */
+ if (pred_bb->dominators != NULL) {
+ IntersectBitVectors(temp_block_v, temp_block_v, pred_bb->dominators);
}
}
- SetBit(cUnit, tempBlockV, bb->id);
- if (CompareBitVectors(tempBlockV, bb->dominators)) {
- CopyBitVector(bb->dominators, tempBlockV);
+ SetBit(cu, temp_block_v, bb->id);
+ if (CompareBitVectors(temp_block_v, bb->dominators)) {
+ CopyBitVector(bb->dominators, temp_block_v);
return true;
}
return false;
@@ -427,64 +427,64 @@
/*
* Worker function to compute the idom. This implementation is only
* used when kDebugVerifyDataflow is active and should compute the
- * same iDom as ComputeblockIDom.
+ * same i_dom as ComputeblockIDom.
*/
-static bool SlowComputeBlockIDom(CompilationUnit* cUnit, BasicBlock* bb)
+static bool SlowComputeBlockIDom(CompilationUnit* cu, BasicBlock* bb)
{
- GrowableList* blockList = &cUnit->blockList;
- ArenaBitVector* tempBlockV = cUnit->tempBlockV;
- ArenaBitVectorIterator bvIterator;
- BasicBlock* iDom;
+ GrowableList* block_list = &cu->block_list;
+ ArenaBitVector* temp_block_v = cu->temp_block_v;
+ ArenaBitVectorIterator bv_iterator;
+ BasicBlock* i_dom;
- if (bb == cUnit->entryBlock) return false;
+ if (bb == cu->entry_block) return false;
- CopyBitVector(tempBlockV, bb->dominators);
- ClearBit(tempBlockV, bb->id);
- BitVectorIteratorInit(tempBlockV, &bvIterator);
+ CopyBitVector(temp_block_v, bb->dominators);
+ ClearBit(temp_block_v, bb->id);
+ BitVectorIteratorInit(temp_block_v, &bv_iterator);
/* Should not see any dead block */
- DCHECK_NE(CountSetBits(tempBlockV), 0);
- if (CountSetBits(tempBlockV) == 1) {
- iDom = reinterpret_cast<BasicBlock*>
- (GrowableListGetElement(blockList, BitVectorIteratorNext(&bvIterator)));
- bb->iDom = iDom;
+ DCHECK_NE(CountSetBits(temp_block_v), 0);
+ if (CountSetBits(temp_block_v) == 1) {
+ i_dom = reinterpret_cast<BasicBlock*>
+ (GrowableListGetElement(block_list, BitVectorIteratorNext(&bv_iterator)));
+ bb->i_dom = i_dom;
} else {
- int iDomIdx = BitVectorIteratorNext(&bvIterator);
- DCHECK_NE(iDomIdx, -1);
+ int i_dom_idx = BitVectorIteratorNext(&bv_iterator);
+ DCHECK_NE(i_dom_idx, -1);
while (true) {
- int nextDom = BitVectorIteratorNext(&bvIterator);
- if (nextDom == -1) break;
- BasicBlock* nextDomBB =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, nextDom));
- /* iDom dominates nextDom - set new iDom */
- if (IsBitSet(nextDomBB->dominators, iDomIdx)) {
- iDomIdx = nextDom;
+ int next_dom = BitVectorIteratorNext(&bv_iterator);
+ if (next_dom == -1) break;
+ BasicBlock* next_dom_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, next_dom));
+ /* i_dom dominates next_dom - set new i_dom */
+ if (IsBitSet(next_dom_bb->dominators, i_dom_idx)) {
+ i_dom_idx = next_dom;
}
}
- iDom = reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, iDomIdx));
+ i_dom = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, i_dom_idx));
/* Set the immediate dominator block for bb */
- bb->iDom = iDom;
+ bb->i_dom = i_dom;
}
- /* Add bb to the iDominated set of the immediate dominator block */
- SetBit(cUnit, iDom->iDominated, bb->id);
+ /* Add bb to the i_dominated set of the immediate dominator block */
+ SetBit(cu, i_dom->i_dominated, bb->id);
return true;
}
/*
- * Walk through the ordered iDom list until we reach common parent.
- * Given the ordering of iDomList, this common parent represents the
+ * Walk through the ordered i_dom list until we reach common parent.
+ * Given the ordering of i_dom_list, this common parent represents the
* last element of the intersection of block1 and block2 dominators.
*/
-static int FindCommonParent(CompilationUnit *cUnit, int block1, int block2)
+static int FindCommonParent(CompilationUnit *cu, int block1, int block2)
{
while (block1 != block2) {
while (block1 < block2) {
- block1 = cUnit->iDomList[block1];
+ block1 = cu->i_dom_list[block1];
DCHECK_NE(block1, NOTVISITED);
}
while (block2 < block1) {
- block2 = cUnit->iDomList[block2];
+ block2 = cu->i_dom_list[block2];
DCHECK_NE(block2, NOTVISITED);
}
}
@@ -492,13 +492,13 @@
}
/* Worker function to compute each block's immediate dominator */
-static bool ComputeblockIDom(CompilationUnit* cUnit, BasicBlock* bb)
+static bool ComputeblockIDom(CompilationUnit* cu, BasicBlock* bb)
{
GrowableListIterator iter;
int idom = -1;
/* Special-case entry block */
- if (bb == cUnit->entryBlock) {
+ if (bb == cu->entry_block) {
return false;
}
@@ -507,144 +507,144 @@
/* Find the first processed predecessor */
while (true) {
- BasicBlock* predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
- CHECK(predBB != NULL);
- if (cUnit->iDomList[predBB->dfsId] != NOTVISITED) {
- idom = predBB->dfsId;
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ CHECK(pred_bb != NULL);
+ if (cu->i_dom_list[pred_bb->dfs_id] != NOTVISITED) {
+ idom = pred_bb->dfs_id;
break;
}
}
/* Scan the rest of the predecessors */
while (true) {
- BasicBlock* predBB = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
- if (!predBB) break;
- if (cUnit->iDomList[predBB->dfsId] == NOTVISITED) {
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ if (cu->i_dom_list[pred_bb->dfs_id] == NOTVISITED) {
continue;
} else {
- idom = FindCommonParent(cUnit, predBB->dfsId, idom);
+ idom = FindCommonParent(cu, pred_bb->dfs_id, idom);
}
}
DCHECK_NE(idom, NOTVISITED);
/* Did something change? */
- if (cUnit->iDomList[bb->dfsId] != idom) {
- cUnit->iDomList[bb->dfsId] = idom;
+ if (cu->i_dom_list[bb->dfs_id] != idom) {
+ cu->i_dom_list[bb->dfs_id] = idom;
return true;
}
return false;
}
/* Worker function to compute each block's domintors */
-static bool ComputeBlockDominiators(CompilationUnit* cUnit, BasicBlock* bb)
+static bool ComputeBlockDominiators(CompilationUnit* cu, BasicBlock* bb)
{
- if (bb == cUnit->entryBlock) {
+ if (bb == cu->entry_block) {
ClearAllBits(bb->dominators);
} else {
- CopyBitVector(bb->dominators, bb->iDom->dominators);
+ CopyBitVector(bb->dominators, bb->i_dom->dominators);
}
- SetBit(cUnit, bb->dominators, bb->id);
+ SetBit(cu, bb->dominators, bb->id);
return false;
}
-static bool SetDominators(CompilationUnit* cUnit, BasicBlock* bb)
+static bool SetDominators(CompilationUnit* cu, BasicBlock* bb)
{
- if (bb != cUnit->entryBlock) {
- int iDomDFSIdx = cUnit->iDomList[bb->dfsId];
- DCHECK_NE(iDomDFSIdx, NOTVISITED);
- int iDomIdx = cUnit->dfsPostOrder.elemList[iDomDFSIdx];
- BasicBlock* iDom =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cUnit->blockList, iDomIdx));
- if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
- DCHECK_EQ(bb->iDom->id, iDom->id);
+ if (bb != cu->entry_block) {
+ int idom_dfs_idx = cu->i_dom_list[bb->dfs_id];
+ DCHECK_NE(idom_dfs_idx, NOTVISITED);
+ int i_dom_idx = cu->dfs_post_order.elem_list[idom_dfs_idx];
+ BasicBlock* i_dom =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, i_dom_idx));
+ if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
+ DCHECK_EQ(bb->i_dom->id, i_dom->id);
}
- bb->iDom = iDom;
- /* Add bb to the iDominated set of the immediate dominator block */
- SetBit(cUnit, iDom->iDominated, bb->id);
+ bb->i_dom = i_dom;
+ /* Add bb to the i_dominated set of the immediate dominator block */
+ SetBit(cu, i_dom->i_dominated, bb->id);
}
return false;
}
/* Compute dominators, immediate dominator, and dominance fronter */
-static void ComputeDominators(CompilationUnit* cUnit)
+static void ComputeDominators(CompilationUnit* cu)
{
- int numReachableBlocks = cUnit->numReachableBlocks;
- int numTotalBlocks = cUnit->blockList.numUsed;
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ int num_total_blocks = cu->block_list.num_used;
/* Initialize domination-related data structures */
- DataFlowAnalysisDispatcher(cUnit, InitializeDominationInfo,
- kReachableNodes, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, InitializeDominationInfo,
+ kReachableNodes, false /* is_iterative */);
- /* Initalize & Clear iDomList */
- if (cUnit->iDomList == NULL) {
- cUnit->iDomList = static_cast<int*>(NewMem(cUnit, sizeof(int) * numReachableBlocks,
+ /* Initalize & Clear i_dom_list */
+ if (cu->i_dom_list == NULL) {
+ cu->i_dom_list = static_cast<int*>(NewMem(cu, sizeof(int) * num_reachable_blocks,
false, kAllocDFInfo));
}
- for (int i = 0; i < numReachableBlocks; i++) {
- cUnit->iDomList[i] = NOTVISITED;
+ for (int i = 0; i < num_reachable_blocks; i++) {
+ cu->i_dom_list[i] = NOTVISITED;
}
- /* For post-order, last block is entry block. Set its iDom to istelf */
- DCHECK_EQ(cUnit->entryBlock->dfsId, numReachableBlocks-1);
- cUnit->iDomList[cUnit->entryBlock->dfsId] = cUnit->entryBlock->dfsId;
+ /* For post-order, last block is entry block. Set its i_dom to istelf */
+ DCHECK_EQ(cu->entry_block->dfs_id, num_reachable_blocks-1);
+ cu->i_dom_list[cu->entry_block->dfs_id] = cu->entry_block->dfs_id;
/* Compute the immediate dominators */
- DataFlowAnalysisDispatcher(cUnit, ComputeblockIDom,
+ DataFlowAnalysisDispatcher(cu, ComputeblockIDom,
kReversePostOrderTraversal,
- true /* isIterative */);
+ true /* is_iterative */);
/* Set the dominator for the root node */
- ClearAllBits(cUnit->entryBlock->dominators);
- SetBit(cUnit, cUnit->entryBlock->dominators, cUnit->entryBlock->id);
+ ClearAllBits(cu->entry_block->dominators);
+ SetBit(cu, cu->entry_block->dominators, cu->entry_block->id);
- if (cUnit->tempBlockV == NULL) {
- cUnit->tempBlockV = AllocBitVector(cUnit, numTotalBlocks,
+ if (cu->temp_block_v == NULL) {
+ cu->temp_block_v = AllocBitVector(cu, num_total_blocks,
false /* expandable */,
kBitMapTmpBlockV);
} else {
- ClearAllBits(cUnit->tempBlockV);
+ ClearAllBits(cu->temp_block_v);
}
- cUnit->entryBlock->iDom = NULL;
+ cu->entry_block->i_dom = NULL;
/* For testing, compute sets using alternate mechanism */
- if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
+ if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
// Use alternate mechanism to compute dominators for comparison
- DataFlowAnalysisDispatcher(cUnit, SlowComputeBlockDominators,
+ DataFlowAnalysisDispatcher(cu, SlowComputeBlockDominators,
kPreOrderDFSTraversal,
- true /* isIterative */);
+ true /* is_iterative */);
- DataFlowAnalysisDispatcher(cUnit, SlowComputeBlockIDom,
+ DataFlowAnalysisDispatcher(cu, SlowComputeBlockIDom,
kReachableNodes,
- false /* isIterative */);
+ false /* is_iterative */);
}
- DataFlowAnalysisDispatcher(cUnit, SetDominators,
+ DataFlowAnalysisDispatcher(cu, SetDominators,
kReachableNodes,
- false /* isIterative */);
+ false /* is_iterative */);
- DataFlowAnalysisDispatcher(cUnit, ComputeBlockDominiators,
+ DataFlowAnalysisDispatcher(cu, ComputeBlockDominiators,
kReversePostOrderTraversal,
- false /* isIterative */);
+ false /* is_iterative */);
/*
* Now go ahead and compute the post order traversal based on the
- * iDominated sets.
+ * i_dominated sets.
*/
- if (cUnit->domPostOrderTraversal.elemList == NULL) {
- CompilerInitGrowableList(cUnit, &cUnit->domPostOrderTraversal,
- numReachableBlocks, kListDomPostOrderTraversal);
+ if (cu->dom_post_order_traversal.elem_list == NULL) {
+ CompilerInitGrowableList(cu, &cu->dom_post_order_traversal,
+ num_reachable_blocks, kListDomPostOrderTraversal);
} else {
- cUnit->domPostOrderTraversal.numUsed = 0;
+ cu->dom_post_order_traversal.num_used = 0;
}
- ComputeDomPostOrderTraversal(cUnit, cUnit->entryBlock);
- DCHECK_EQ(cUnit->domPostOrderTraversal.numUsed, static_cast<unsigned>(cUnit->numReachableBlocks));
+ ComputeDomPostOrderTraversal(cu, cu->entry_block);
+ DCHECK_EQ(cu->dom_post_order_traversal.num_used, static_cast<unsigned>(cu->num_reachable_blocks));
/* Now compute the dominance frontier for each block */
- DataFlowAnalysisDispatcher(cUnit, ComputeDominanceFrontier,
+ DataFlowAnalysisDispatcher(cu, ComputeDominanceFrontier,
kPostOrderDOMTraversal,
- false /* isIterative */);
+ false /* is_iterative */);
}
/*
@@ -654,15 +654,15 @@
static void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
const ArenaBitVector* src2)
{
- if (dest->storageSize != src1->storageSize ||
- dest->storageSize != src2->storageSize ||
+ if (dest->storage_size != src1->storage_size ||
+ dest->storage_size != src2->storage_size ||
dest->expandable != src1->expandable ||
dest->expandable != src2->expandable) {
LOG(FATAL) << "Incompatible set properties";
}
unsigned int idx;
- for (idx = 0; idx < dest->storageSize; idx++) {
+ for (idx = 0; idx < dest->storage_size; idx++) {
dest->storage[idx] |= src1->storage[idx] & ~src2->storage[idx];
}
}
@@ -672,120 +672,120 @@
* The calculated result is used for phi-node pruning - where we only need to
* insert a phi node if the variable is live-in to the block.
*/
-static bool ComputeBlockLiveIns(CompilationUnit* cUnit, BasicBlock* bb)
+static bool ComputeBlockLiveIns(CompilationUnit* cu, BasicBlock* bb)
{
- ArenaBitVector* tempDalvikRegisterV = cUnit->tempDalvikRegisterV;
+ ArenaBitVector* temp_dalvik_register_v = cu->temp_dalvik_register_v;
- if (bb->dataFlowInfo == NULL) return false;
- CopyBitVector(tempDalvikRegisterV, bb->dataFlowInfo->liveInV);
- if (bb->taken && bb->taken->dataFlowInfo)
- ComputeSuccLineIn(tempDalvikRegisterV, bb->taken->dataFlowInfo->liveInV,
- bb->dataFlowInfo->defV);
- if (bb->fallThrough && bb->fallThrough->dataFlowInfo)
- ComputeSuccLineIn(tempDalvikRegisterV,
- bb->fallThrough->dataFlowInfo->liveInV,
- bb->dataFlowInfo->defV);
- if (bb->successorBlockList.blockListType != kNotUsed) {
+ if (bb->data_flow_info == NULL) return false;
+ CopyBitVector(temp_dalvik_register_v, bb->data_flow_info->live_in_v);
+ if (bb->taken && bb->taken->data_flow_info)
+ ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
+ bb->data_flow_info->def_v);
+ if (bb->fall_through && bb->fall_through->data_flow_info)
+ ComputeSuccLineIn(temp_dalvik_register_v,
+ bb->fall_through->data_flow_info->live_in_v,
+ bb->data_flow_info->def_v);
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
GrowableListIterator iterator;
- GrowableListIteratorInit(&bb->successorBlockList.blocks,
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
&iterator);
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- if (succBB->dataFlowInfo) {
- ComputeSuccLineIn(tempDalvikRegisterV,
- succBB->dataFlowInfo->liveInV,
- bb->dataFlowInfo->defV);
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ if (succ_bb->data_flow_info) {
+ ComputeSuccLineIn(temp_dalvik_register_v,
+ succ_bb->data_flow_info->live_in_v,
+ bb->data_flow_info->def_v);
}
}
}
- if (CompareBitVectors(tempDalvikRegisterV, bb->dataFlowInfo->liveInV)) {
- CopyBitVector(bb->dataFlowInfo->liveInV, tempDalvikRegisterV);
+ if (CompareBitVectors(temp_dalvik_register_v, bb->data_flow_info->live_in_v)) {
+ CopyBitVector(bb->data_flow_info->live_in_v, temp_dalvik_register_v);
return true;
}
return false;
}
/* Insert phi nodes to for each variable to the dominance frontiers */
-static void InsertPhiNodes(CompilationUnit* cUnit)
+static void InsertPhiNodes(CompilationUnit* cu)
{
- int dalvikReg;
- const GrowableList* blockList = &cUnit->blockList;
- ArenaBitVector* phiBlocks =
- AllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapPhi);
- ArenaBitVector* tmpBlocks =
- AllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapTmpBlocks);
- ArenaBitVector* inputBlocks =
- AllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapInputBlocks);
+ int dalvik_reg;
+ const GrowableList* block_list = &cu->block_list;
+ ArenaBitVector* phi_blocks =
+ AllocBitVector(cu, cu->num_blocks, false, kBitMapPhi);
+ ArenaBitVector* tmp_blocks =
+ AllocBitVector(cu, cu->num_blocks, false, kBitMapTmpBlocks);
+ ArenaBitVector* input_blocks =
+ AllocBitVector(cu, cu->num_blocks, false, kBitMapInputBlocks);
- cUnit->tempDalvikRegisterV =
- AllocBitVector(cUnit, cUnit->numDalvikRegisters, false,
+ cu->temp_dalvik_register_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false,
kBitMapRegisterV);
- DataFlowAnalysisDispatcher(cUnit, ComputeBlockLiveIns,
- kPostOrderDFSTraversal, true /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, ComputeBlockLiveIns,
+ kPostOrderDFSTraversal, true /* is_iterative */);
/* Iterate through each Dalvik register */
- for (dalvikReg = cUnit->numDalvikRegisters - 1; dalvikReg >= 0; dalvikReg--) {
+ for (dalvik_reg = cu->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
bool change;
ArenaBitVectorIterator iterator;
- CopyBitVector(inputBlocks, cUnit->defBlockMatrix[dalvikReg]);
- ClearAllBits(phiBlocks);
+ CopyBitVector(input_blocks, cu->def_block_matrix[dalvik_reg]);
+ ClearAllBits(phi_blocks);
/* Calculate the phi blocks for each Dalvik register */
do {
change = false;
- ClearAllBits(tmpBlocks);
- BitVectorIteratorInit(inputBlocks, &iterator);
+ ClearAllBits(tmp_blocks);
+ BitVectorIteratorInit(input_blocks, &iterator);
while (true) {
int idx = BitVectorIteratorNext(&iterator);
if (idx == -1) break;
- BasicBlock* defBB =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, idx));
+ BasicBlock* def_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, idx));
- /* Merge the dominance frontier to tmpBlocks */
+ /* Merge the dominance frontier to tmp_blocks */
//TUNING: hot call to UnifyBitVetors
- if (defBB->domFrontier != NULL) {
- UnifyBitVetors(tmpBlocks, tmpBlocks, defBB->domFrontier);
+ if (def_bb->dom_frontier != NULL) {
+ UnifyBitVetors(tmp_blocks, tmp_blocks, def_bb->dom_frontier);
}
}
- if (CompareBitVectors(phiBlocks, tmpBlocks)) {
+ if (CompareBitVectors(phi_blocks, tmp_blocks)) {
change = true;
- CopyBitVector(phiBlocks, tmpBlocks);
+ CopyBitVector(phi_blocks, tmp_blocks);
/*
* Iterate through the original blocks plus the new ones in
* the dominance frontier.
*/
- CopyBitVector(inputBlocks, phiBlocks);
- UnifyBitVetors(inputBlocks, inputBlocks,
- cUnit->defBlockMatrix[dalvikReg]);
+ CopyBitVector(input_blocks, phi_blocks);
+ UnifyBitVetors(input_blocks, input_blocks,
+ cu->def_block_matrix[dalvik_reg]);
}
} while (change);
/*
- * Insert a phi node for dalvikReg in the phiBlocks if the Dalvik
+ * Insert a phi node for dalvik_reg in the phi_blocks if the Dalvik
* register is in the live-in set.
*/
- BitVectorIteratorInit(phiBlocks, &iterator);
+ BitVectorIteratorInit(phi_blocks, &iterator);
while (true) {
int idx = BitVectorIteratorNext(&iterator);
if (idx == -1) break;
- BasicBlock* phiBB =
- reinterpret_cast<BasicBlock*>(GrowableListGetElement(blockList, idx));
+ BasicBlock* phi_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, idx));
/* Variable will be clobbered before being used - no need for phi */
- if (!IsBitSet(phiBB->dataFlowInfo->liveInV, dalvikReg)) continue;
- MIR *phi = static_cast<MIR*>(NewMem(cUnit, sizeof(MIR), true, kAllocDFInfo));
+ if (!IsBitSet(phi_bb->data_flow_info->live_in_v, dalvik_reg)) continue;
+ MIR *phi = static_cast<MIR*>(NewMem(cu, sizeof(MIR), true, kAllocDFInfo));
phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
- phi->dalvikInsn.vA = dalvikReg;
- phi->offset = phiBB->startOffset;
- phi->meta.phiNext = cUnit->phiList;
- cUnit->phiList = phi;
- PrependMIR(phiBB, phi);
+ phi->dalvikInsn.vA = dalvik_reg;
+ phi->offset = phi_bb->start_offset;
+ phi->meta.phi_next = cu->phi_list;
+ cu->phi_list = phi;
+ PrependMIR(phi_bb, phi);
}
}
}
@@ -794,140 +794,140 @@
* Worker function to insert phi-operands with latest SSA names from
* predecessor blocks
*/
-static bool InsertPhiNodeOperands(CompilationUnit* cUnit, BasicBlock* bb)
+static bool InsertPhiNodeOperands(CompilationUnit* cu, BasicBlock* bb)
{
GrowableListIterator iter;
MIR *mir;
std::vector<int> uses;
- std::vector<int> incomingArc;
+ std::vector<int> incoming_arc;
/* Phi nodes are at the beginning of each block */
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
return true;
- int ssaReg = mir->ssaRep->defs[0];
- DCHECK_GE(ssaReg, 0); // Shouldn't see compiler temps here
- int vReg = SRegToVReg(cUnit, ssaReg);
+ int ssa_reg = mir->ssa_rep->defs[0];
+ DCHECK_GE(ssa_reg, 0); // Shouldn't see compiler temps here
+ int v_reg = SRegToVReg(cu, ssa_reg);
uses.clear();
- incomingArc.clear();
+ incoming_arc.clear();
/* Iterate through the predecessors */
GrowableListIteratorInit(bb->predecessors, &iter);
while (true) {
- BasicBlock* predBB =
+ BasicBlock* pred_bb =
reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
- if (!predBB) break;
- int ssaReg = predBB->dataFlowInfo->vRegToSSAMap[vReg];
- uses.push_back(ssaReg);
- incomingArc.push_back(predBB->id);
+ if (!pred_bb) break;
+ int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
+ uses.push_back(ssa_reg);
+ incoming_arc.push_back(pred_bb->id);
}
/* Count the number of SSA registers for a Dalvik register */
- int numUses = uses.size();
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses =
- static_cast<int*>(NewMem(cUnit, sizeof(int) * numUses, false, kAllocDFInfo));
- mir->ssaRep->fpUse =
- static_cast<bool*>(NewMem(cUnit, sizeof(bool) * numUses, true, kAllocDFInfo));
+ int num_uses = uses.size();
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses =
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false, kAllocDFInfo));
+ mir->ssa_rep->fp_use =
+ static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true, kAllocDFInfo));
int* incoming =
- static_cast<int*>(NewMem(cUnit, sizeof(int) * numUses, false, kAllocDFInfo));
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false, kAllocDFInfo));
// TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
/* Set the uses array for the phi node */
- int *usePtr = mir->ssaRep->uses;
- for (int i = 0; i < numUses; i++) {
- *usePtr++ = uses[i];
- *incoming++ = incomingArc[i];
+ int *use_ptr = mir->ssa_rep->uses;
+ for (int i = 0; i < num_uses; i++) {
+ *use_ptr++ = uses[i];
+ *incoming++ = incoming_arc[i];
}
}
return true;
}
-static void DoDFSPreOrderSSARename(CompilationUnit* cUnit, BasicBlock* block)
+static void DoDFSPreOrderSSARename(CompilationUnit* cu, BasicBlock* block)
{
if (block->visited || block->hidden) return;
block->visited = true;
/* Process this block */
- DoSSAConversion(cUnit, block);
- int mapSize = sizeof(int) * cUnit->numDalvikRegisters;
+ DoSSAConversion(cu, block);
+ int map_size = sizeof(int) * cu->num_dalvik_registers;
/* Save SSA map snapshot */
- int* savedSSAMap = static_cast<int*>(NewMem(cUnit, mapSize, false, kAllocDalvikToSSAMap));
- memcpy(savedSSAMap, cUnit->vRegToSSAMap, mapSize);
+ int* saved_ssa_map = static_cast<int*>(NewMem(cu, map_size, false, kAllocDalvikToSSAMap));
+ memcpy(saved_ssa_map, cu->vreg_to_ssa_map, map_size);
- if (block->fallThrough) {
- DoDFSPreOrderSSARename(cUnit, block->fallThrough);
+ if (block->fall_through) {
+ DoDFSPreOrderSSARename(cu, block->fall_through);
/* Restore SSA map snapshot */
- memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
+ memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
}
if (block->taken) {
- DoDFSPreOrderSSARename(cUnit, block->taken);
+ DoDFSPreOrderSSARename(cu, block->taken);
/* Restore SSA map snapshot */
- memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
+ memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
}
- if (block->successorBlockList.blockListType != kNotUsed) {
+ if (block->successor_block_list.block_list_type != kNotUsed) {
GrowableListIterator iterator;
- GrowableListIteratorInit(&block->successorBlockList.blocks, &iterator);
+ GrowableListIteratorInit(&block->successor_block_list.blocks, &iterator);
while (true) {
- SuccessorBlockInfo *successorBlockInfo =
+ SuccessorBlockInfo *successor_block_info =
reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- DoDFSPreOrderSSARename(cUnit, succBB);
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ DoDFSPreOrderSSARename(cu, succ_bb);
/* Restore SSA map snapshot */
- memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
+ memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
}
}
- cUnit->vRegToSSAMap = savedSSAMap;
+ cu->vreg_to_ssa_map = saved_ssa_map;
return;
}
/* Perform SSA transformation for the whole method */
-void SSATransformation(CompilationUnit* cUnit)
+void SSATransformation(CompilationUnit* cu)
{
/* Compute the DFS order */
- ComputeDFSOrders(cUnit);
+ ComputeDFSOrders(cu);
- if (!cUnit->disableDataflow) {
+ if (!cu->disable_dataflow) {
/* Compute the dominator info */
- ComputeDominators(cUnit);
+ ComputeDominators(cu);
}
/* Allocate data structures in preparation for SSA conversion */
- CompilerInitializeSSAConversion(cUnit);
+ CompilerInitializeSSAConversion(cu);
- if (!cUnit->disableDataflow) {
+ if (!cu->disable_dataflow) {
/* Find out the "Dalvik reg def x block" relation */
- ComputeDefBlockMatrix(cUnit);
+ ComputeDefBlockMatrix(cu);
/* Insert phi nodes to dominance frontiers for all variables */
- InsertPhiNodes(cUnit);
+ InsertPhiNodes(cu);
}
/* Rename register names by local defs and phi nodes */
- DataFlowAnalysisDispatcher(cUnit, ClearVisitedFlag,
- kAllNodes, false /* isIterative */);
- DoDFSPreOrderSSARename(cUnit, cUnit->entryBlock);
+ DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
+ kAllNodes, false /* is_iterative */);
+ DoDFSPreOrderSSARename(cu, cu->entry_block);
- if (!cUnit->disableDataflow) {
+ if (!cu->disable_dataflow) {
/*
* Shared temp bit vector used by each block to count the number of defs
* from all the predecessor blocks.
*/
- cUnit->tempSSARegisterV = AllocBitVector(cUnit, cUnit->numSSARegs,
+ cu->temp_ssa_register_v = AllocBitVector(cu, cu->num_ssa_regs,
false, kBitMapTempSSARegisterV);
- cUnit->tempSSABlockIdV =
- static_cast<int*>(NewMem(cUnit, sizeof(int) * cUnit->numSSARegs, false, kAllocDFInfo));
+ cu->temp_ssa_block_id_v =
+ static_cast<int*>(NewMem(cu, sizeof(int) * cu->num_ssa_regs, false, kAllocDFInfo));
/* Insert phi-operands with latest SSA names from predecessor blocks */
- DataFlowAnalysisDispatcher(cUnit, InsertPhiNodeOperands,
- kReachableNodes, false /* isIterative */);
+ DataFlowAnalysisDispatcher(cu, InsertPhiNodeOperands,
+ kReachableNodes, false /* is_iterative */);
}
}
diff --git a/src/compiler/ssa_transformation.h b/src/compiler/ssa_transformation.h
index aef7782..7f0d7f8 100644
--- a/src/compiler/ssa_transformation.h
+++ b/src/compiler/ssa_transformation.h
@@ -21,7 +21,7 @@
namespace art {
-void SSATransformation(CompilationUnit* cUnit);
+void SSATransformation(CompilationUnit* cu);
} // namespace art