64-bit prep
Preparation for 64-bit roll.
o Eliminated storing pointers in 32-bit int slots in LIR.
o General size reductions of common structures to reduce impact
of doubled pointer sizes:
- BasicBlock struct was 72 bytes, now is 48.
- MIR struct was 72 bytes, now is 64.
- RegLocation was 12 bytes, now is 8.
o Generally replaced uses of BasicBlock* pointers with 16-bit Ids.
o Replaced several doubly-linked lists with singly-linked to save
one stored pointer per node.
o We had quite a few uses of uintptr_t's that were a holdover from
the JIT (which used pointers to mapped dex & actual code cache
addresses rather than trace-relative offsets). Replaced those with
uint32_t's.
o Clean up handling of embedded data for switch tables and array data.
o Miscellaneous cleanup.
I anticipate one or two additional CLs to reduce the size of MIR and LIR
structs.
Change-Id: I58e426d3f8e5efe64c1146b2823453da99451230
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 3c646c4..cc40e99 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1031,8 +1031,7 @@
} else if (LIKELY(!lir->flags.is_nop)) {
const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
uint32_t bits = encoder->skeleton;
- int i;
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
uint32_t operand;
uint32_t value;
operand = lir->operands[i];
@@ -1088,7 +1087,7 @@
case kFmtDfp: {
DCHECK(ARM_DOUBLEREG(operand));
DCHECK_EQ((operand & 0x1), 0U);
- int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
+ uint32_t reg_name = (operand & ARM_FP_REG_MASK) >> 1;
/* Snag the 1-bit slice and position it */
value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
/* Extract and position the 4-bit slice */
@@ -1155,9 +1154,9 @@
LIR* lir;
LIR* prev_lir;
int assembler_retries = 0;
- int starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
+ CodeOffset starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
data_offset_ = (starting_offset + 0x3) & ~0x3;
- int offset_adjustment;
+ int32_t offset_adjustment;
AssignDataOffsets();
/*
@@ -1200,10 +1199,10 @@
* we revert to a multiple-instruction materialization sequence.
*/
LIR *lir_target = lir->target;
- uintptr_t pc = (lir->offset + 4) & ~3;
- uintptr_t target = lir_target->offset +
+ CodeOffset pc = (lir->offset + 4) & ~3;
+ CodeOffset target = lir_target->offset +
((lir_target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (res != kSuccess) {
/*
* In this case, we're just estimating and will do it again for real. Ensure offset
@@ -1281,10 +1280,10 @@
}
case kFixupCBxZ: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (delta > 126 || delta < 0) {
/*
* Convert to cmp rx,#0 / b[eq/ne] tgt pair
@@ -1351,10 +1350,10 @@
}
case kFixupCondBranch: {
LIR *target_lir = lir->target;
- int delta = 0;
+ int32_t delta = 0;
DCHECK(target_lir);
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
delta = target - pc;
if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
@@ -1370,10 +1369,10 @@
}
case kFixupT2Branch: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
lir->operands[0] = delta >> 1;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 0) {
// Useless branch
@@ -1387,10 +1386,10 @@
}
case kFixupT1Branch: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (delta > 2046 || delta < -2048) {
// Convert to Thumb2BCond w/ kArmCondAl
offset_adjustment -= lir->flags.size;
@@ -1416,14 +1415,14 @@
case kFixupBlx1: {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
/* cur_pc is Thumb */
- uintptr_t cur_pc = (lir->offset + 4) & ~3;
- uintptr_t target = lir->operands[1];
+ CodeOffset cur_pc = (lir->offset + 4) & ~3;
+ CodeOffset target = lir->operands[1];
/* Match bit[1] in target with base */
if (cur_pc & 0x2) {
target |= 0x2;
}
- int delta = target - cur_pc;
+ int32_t delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1433,10 +1432,10 @@
case kFixupBl1: {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
/* Both cur_pc and target are Thumb */
- uintptr_t cur_pc = lir->offset + 4;
- uintptr_t target = lir->operands[1];
+ CodeOffset cur_pc = lir->offset + 4;
+ CodeOffset target = lir->operands[1];
- int delta = target - cur_pc;
+ int32_t delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1444,20 +1443,19 @@
break;
}
case kFixupAdr: {
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[2]));
LIR* target = lir->target;
- int target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int disp = target_disp - ((lir->offset + 4) & ~3);
+ int32_t disp = target_disp - ((lir->offset + 4) & ~3);
if (disp < 4096) {
lir->operands[1] = disp;
} else {
// convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
// TUNING: if this case fires often, it can be improved. Not expected to be common.
LIR *new_mov16L =
- RawLIR(lir->dalvik_offset, kThumb2MovImm16LST,
- lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ RawLIR(lir->dalvik_offset, kThumb2MovImm16LST, lir->operands[0], 0,
+ WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
new_mov16L->flags.size = EncodingMap[new_mov16L->opcode].size;
new_mov16L->flags.fixup = kFixupMovImmLST;
new_mov16L->offset = lir->offset;
@@ -1467,11 +1465,9 @@
offset_adjustment += new_mov16L->flags.size;
InsertFixupBefore(prev_lir, lir, new_mov16L);
prev_lir = new_mov16L; // Now we've got a new prev.
-
LIR *new_mov16H =
- RawLIR(lir->dalvik_offset, kThumb2MovImm16HST,
- lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ RawLIR(lir->dalvik_offset, kThumb2MovImm16HST, lir->operands[0], 0,
+ WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
new_mov16H->flags.size = EncodingMap[new_mov16H->opcode].size;
new_mov16H->flags.fixup = kFixupMovImmHST;
new_mov16H->offset = lir->offset;
@@ -1499,27 +1495,27 @@
}
case kFixupMovImmLST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
break;
}
case kFixupMovImmHST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] =
((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
break;
}
case kFixupAlign4: {
- int required_size = lir->offset & 0x2;
+ int32_t required_size = lir->offset & 0x2;
if (lir->flags.size != required_size) {
offset_adjustment += required_size - lir->flags.size;
lir->flags.size = required_size;
@@ -1647,7 +1643,7 @@
void ArmMir2Lir::AssignDataOffsets() {
/* Set up offsets for literals */
- int offset = data_offset_;
+ CodeOffset offset = data_offset_;
offset = AssignLiteralOffset(offset);
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 401da2a..51aca85 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -92,7 +92,7 @@
}
/* Find the next MIR, which may be in a following basic block */
-// TODO: should this be a utility in mir_graph?
+// TODO: make this a utility in mir_graph.
MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
MIR* orig_mir = mir;
@@ -103,7 +103,7 @@
if (mir != NULL) {
return mir;
} else {
- bb = bb->fall_through;
+ bb = mir_graph_->GetBasicBlock(bb->fall_through);
*p_bb = bb;
if (bb) {
mir = bb->first_mir_insn;
@@ -128,7 +128,7 @@
MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
OpSize size, bool long_or_double, bool is_object) {
- int field_offset;
+ int32_t field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -153,7 +153,7 @@
MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
OpSize size, bool long_or_double, bool is_object) {
- int field_offset;
+ int32_t field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -320,9 +320,9 @@
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
+ uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -338,7 +338,7 @@
r_key = tmp;
}
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, rBase, 0, WrapPointer(tab_rec));
// Set up r_idx
int r_idx = AllocTemp();
LoadConstant(r_idx, size);
@@ -368,7 +368,7 @@
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
+ uint32_t size = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
@@ -377,7 +377,7 @@
rl_src = LoadValue(rl_src, kCoreReg);
int table_base = AllocTemp();
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, table_base, 0, WrapPointer(tab_rec));
int low_key = s4FromSwitchData(&table[2]);
int keyReg;
// Remove the bias, if necessary
@@ -433,7 +433,7 @@
LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
rARM_LR);
// Materialize a pointer to the fill data image
- NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index aa5782b..0a3bfc1 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -74,7 +74,6 @@
uint32_t EncodeRange(LIR* head_lir, LIR* tail_lir, uint32_t starting_offset);
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(uintptr_t start_addr);
void EncodeLIR(LIR* lir);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
@@ -120,7 +119,7 @@
void GenDivZeroCheck(int reg_lo, int reg_hi);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
- void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
@@ -132,8 +131,8 @@
int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Required for target - single operation generators.
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 08d6778..480e021 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -176,7 +176,7 @@
void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
- LIR* target = &block_label_list_[bb->taken->id];
+ LIR* target = &block_label_list_[bb->taken];
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index b1772fd..69ea4e9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -122,8 +122,8 @@
int32_t val_hi = High32Bits(val);
DCHECK_GE(ModifiedImmediate(val_lo), 0);
DCHECK_GE(ModifiedImmediate(val_hi), 0);
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
int32_t low_reg = rl_src1.low_reg;
int32_t high_reg = rl_src1.high_reg;
@@ -178,23 +178,6 @@
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
- // Temporary debugging code
- int dest_sreg = mir->ssa_rep->defs[0];
- if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
- LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
- LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
- LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
- if (mir->ssa_rep->num_uses == 1) {
- LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
- } else {
- LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
- << mir->ssa_rep->uses[2];
- }
- CHECK(false) << "Invalid target sreg on Select.";
- }
- // End temporary debugging code
RegLocation rl_dest = mir_graph_->GetDest(mir);
rl_src = LoadValue(rl_src, kCoreReg);
if (mir->ssa_rep->num_uses == 1) {
@@ -270,8 +253,8 @@
return;
}
}
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 933c1a3..3395ae7 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -282,8 +282,8 @@
return buf;
}
-static int ExpandImmediate(int value) {
- int mode = (value & 0xf00) >> 8;
+static int32_t ExpandImmediate(int value) {
+ int32_t mode = (value & 0xf00) >> 8;
uint32_t bits = value & 0xff;
switch (mode) {
case 0:
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 00de8de..a2ac6ef 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -22,14 +22,14 @@
/* This file contains codegen for the Thumb ISA. */
-static int EncodeImmSingle(int value) {
- int res;
- int bit_a = (value & 0x80000000) >> 31;
- int not_bit_b = (value & 0x40000000) >> 30;
- int bit_b = (value & 0x20000000) >> 29;
- int b_smear = (value & 0x3e000000) >> 25;
- int slice = (value & 0x01f80000) >> 19;
- int zeroes = (value & 0x0007ffff);
+static int32_t EncodeImmSingle(int32_t value) {
+ int32_t res;
+ int32_t bit_a = (value & 0x80000000) >> 31;
+ int32_t not_bit_b = (value & 0x40000000) >> 30;
+ int32_t bit_b = (value & 0x20000000) >> 29;
+ int32_t b_smear = (value & 0x3e000000) >> 25;
+ int32_t slice = (value & 0x01f80000) >> 19;
+ int32_t zeroes = (value & 0x0007ffff);
if (zeroes != 0)
return -1;
if (bit_b) {
@@ -47,15 +47,15 @@
* Determine whether value can be encoded as a Thumb2 floating point
* immediate. If not, return -1. If so return encoded 8-bit value.
*/
-static int EncodeImmDouble(int64_t value) {
- int res;
- int bit_a = (value & 0x8000000000000000ll) >> 63;
- int not_bit_b = (value & 0x4000000000000000ll) >> 62;
- int bit_b = (value & 0x2000000000000000ll) >> 61;
- int b_smear = (value & 0x3fc0000000000000ll) >> 54;
- int slice = (value & 0x003f000000000000ll) >> 48;
+static int32_t EncodeImmDouble(int64_t value) {
+ int32_t res;
+ int32_t bit_a = (value & 0x8000000000000000ll) >> 63;
+ int32_t not_bit_b = (value & 0x4000000000000000ll) >> 62;
+ int32_t bit_b = (value & 0x2000000000000000ll) >> 61;
+ int32_t b_smear = (value & 0x3fc0000000000000ll) >> 54;
+ int32_t slice = (value & 0x003f000000000000ll) >> 48;
uint64_t zeroes = (value & 0x0000ffffffffffffll);
- if (zeroes != 0)
+ if (zeroes != 0ull)
return -1;
if (bit_b) {
if ((not_bit_b != 0) || (b_smear != 0xff))
@@ -96,8 +96,8 @@
static int LeadingZeros(uint32_t val) {
uint32_t alt;
- int n;
- int count;
+ int32_t n;
+ int32_t count;
count = 16;
n = 32;
@@ -117,8 +117,8 @@
* immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
*/
int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
- int z_leading;
- int z_trailing;
+ int32_t z_leading;
+ int32_t z_trailing;
uint32_t b0 = value & 0xff;
/* Note: case of value==0 must use 0:000:0:0000000 encoding */
@@ -421,12 +421,12 @@
LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
LIR* res;
bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
+ int32_t abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
ArmOpcode alt_opcode = kThumbBkpt;
bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
- int mod_imm = ModifiedImmediate(value);
- int mod_imm_neg = ModifiedImmediate(-value);
+ int32_t mod_imm = ModifiedImmediate(value);
+ int32_t mod_imm_neg = ModifiedImmediate(-value);
switch (op) {
case kOpLsl:
@@ -544,7 +544,7 @@
/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
+ int32_t abs_value = (neg) ? -value : value;
bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
ArmOpcode opcode = kThumbBkpt;
switch (op) {