64-bit prep
Preparation for 64-bit roll.
o Eliminated storing pointers in 32-bit int slots in LIR.
o General size reductions of common structures to reduce impact
of doubled pointer sizes:
- BasicBlock struct was 72 bytes, now is 48.
- MIR struct was 72 bytes, now is 64.
- RegLocation was 12 bytes, now is 8.
o Generally replaced uses of BasicBlock* pointers with 16-bit Ids.
o Replaced several doubly-linked lists with singly-linked to save
one stored pointer per node.
o We had quite a few uses of uintptr_t's that were a holdover from
the JIT (which used pointers to mapped dex & actual code cache
addresses rather than trace-relative offsets). Replaced those with
uint32_t's.
o Clean up handling of embedded data for switch tables and array data.
o Miscellaneous cleanup.
I anticipate one or two additional CLs to reduce the size of MIR and LIR
structs.
Change-Id: I58e426d3f8e5efe64c1146b2823453da99451230
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 3c646c4..cc40e99 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1031,8 +1031,7 @@
} else if (LIKELY(!lir->flags.is_nop)) {
const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
uint32_t bits = encoder->skeleton;
- int i;
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
uint32_t operand;
uint32_t value;
operand = lir->operands[i];
@@ -1088,7 +1087,7 @@
case kFmtDfp: {
DCHECK(ARM_DOUBLEREG(operand));
DCHECK_EQ((operand & 0x1), 0U);
- int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
+ uint32_t reg_name = (operand & ARM_FP_REG_MASK) >> 1;
/* Snag the 1-bit slice and position it */
value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
/* Extract and position the 4-bit slice */
@@ -1155,9 +1154,9 @@
LIR* lir;
LIR* prev_lir;
int assembler_retries = 0;
- int starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
+ CodeOffset starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
data_offset_ = (starting_offset + 0x3) & ~0x3;
- int offset_adjustment;
+ int32_t offset_adjustment;
AssignDataOffsets();
/*
@@ -1200,10 +1199,10 @@
* we revert to a multiple-instruction materialization sequence.
*/
LIR *lir_target = lir->target;
- uintptr_t pc = (lir->offset + 4) & ~3;
- uintptr_t target = lir_target->offset +
+ CodeOffset pc = (lir->offset + 4) & ~3;
+ CodeOffset target = lir_target->offset +
((lir_target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (res != kSuccess) {
/*
* In this case, we're just estimating and will do it again for real. Ensure offset
@@ -1281,10 +1280,10 @@
}
case kFixupCBxZ: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (delta > 126 || delta < 0) {
/*
* Convert to cmp rx,#0 / b[eq/ne] tgt pair
@@ -1351,10 +1350,10 @@
}
case kFixupCondBranch: {
LIR *target_lir = lir->target;
- int delta = 0;
+ int32_t delta = 0;
DCHECK(target_lir);
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
delta = target - pc;
if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
@@ -1370,10 +1369,10 @@
}
case kFixupT2Branch: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
lir->operands[0] = delta >> 1;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 0) {
// Useless branch
@@ -1387,10 +1386,10 @@
}
case kFixupT1Branch: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (delta > 2046 || delta < -2048) {
// Convert to Thumb2BCond w/ kArmCondAl
offset_adjustment -= lir->flags.size;
@@ -1416,14 +1415,14 @@
case kFixupBlx1: {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
/* cur_pc is Thumb */
- uintptr_t cur_pc = (lir->offset + 4) & ~3;
- uintptr_t target = lir->operands[1];
+ CodeOffset cur_pc = (lir->offset + 4) & ~3;
+ CodeOffset target = lir->operands[1];
/* Match bit[1] in target with base */
if (cur_pc & 0x2) {
target |= 0x2;
}
- int delta = target - cur_pc;
+ int32_t delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1433,10 +1432,10 @@
case kFixupBl1: {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
/* Both cur_pc and target are Thumb */
- uintptr_t cur_pc = lir->offset + 4;
- uintptr_t target = lir->operands[1];
+ CodeOffset cur_pc = lir->offset + 4;
+ CodeOffset target = lir->operands[1];
- int delta = target - cur_pc;
+ int32_t delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1444,20 +1443,19 @@
break;
}
case kFixupAdr: {
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[2]));
LIR* target = lir->target;
- int target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int disp = target_disp - ((lir->offset + 4) & ~3);
+ int32_t disp = target_disp - ((lir->offset + 4) & ~3);
if (disp < 4096) {
lir->operands[1] = disp;
} else {
// convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
// TUNING: if this case fires often, it can be improved. Not expected to be common.
LIR *new_mov16L =
- RawLIR(lir->dalvik_offset, kThumb2MovImm16LST,
- lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ RawLIR(lir->dalvik_offset, kThumb2MovImm16LST, lir->operands[0], 0,
+ WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
new_mov16L->flags.size = EncodingMap[new_mov16L->opcode].size;
new_mov16L->flags.fixup = kFixupMovImmLST;
new_mov16L->offset = lir->offset;
@@ -1467,11 +1465,9 @@
offset_adjustment += new_mov16L->flags.size;
InsertFixupBefore(prev_lir, lir, new_mov16L);
prev_lir = new_mov16L; // Now we've got a new prev.
-
LIR *new_mov16H =
- RawLIR(lir->dalvik_offset, kThumb2MovImm16HST,
- lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ RawLIR(lir->dalvik_offset, kThumb2MovImm16HST, lir->operands[0], 0,
+ WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
new_mov16H->flags.size = EncodingMap[new_mov16H->opcode].size;
new_mov16H->flags.fixup = kFixupMovImmHST;
new_mov16H->offset = lir->offset;
@@ -1499,27 +1495,27 @@
}
case kFixupMovImmLST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
break;
}
case kFixupMovImmHST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] =
((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
break;
}
case kFixupAlign4: {
- int required_size = lir->offset & 0x2;
+ int32_t required_size = lir->offset & 0x2;
if (lir->flags.size != required_size) {
offset_adjustment += required_size - lir->flags.size;
lir->flags.size = required_size;
@@ -1647,7 +1643,7 @@
void ArmMir2Lir::AssignDataOffsets() {
/* Set up offsets for literals */
- int offset = data_offset_;
+ CodeOffset offset = data_offset_;
offset = AssignLiteralOffset(offset);
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 401da2a..51aca85 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -92,7 +92,7 @@
}
/* Find the next MIR, which may be in a following basic block */
-// TODO: should this be a utility in mir_graph?
+// TODO: make this a utility in mir_graph.
MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
MIR* orig_mir = mir;
@@ -103,7 +103,7 @@
if (mir != NULL) {
return mir;
} else {
- bb = bb->fall_through;
+ bb = mir_graph_->GetBasicBlock(bb->fall_through);
*p_bb = bb;
if (bb) {
mir = bb->first_mir_insn;
@@ -128,7 +128,7 @@
MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
OpSize size, bool long_or_double, bool is_object) {
- int field_offset;
+ int32_t field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -153,7 +153,7 @@
MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
OpSize size, bool long_or_double, bool is_object) {
- int field_offset;
+ int32_t field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -320,9 +320,9 @@
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
+ uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -338,7 +338,7 @@
r_key = tmp;
}
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, rBase, 0, WrapPointer(tab_rec));
// Set up r_idx
int r_idx = AllocTemp();
LoadConstant(r_idx, size);
@@ -368,7 +368,7 @@
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
+ uint32_t size = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
@@ -377,7 +377,7 @@
rl_src = LoadValue(rl_src, kCoreReg);
int table_base = AllocTemp();
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, table_base, 0, WrapPointer(tab_rec));
int low_key = s4FromSwitchData(&table[2]);
int keyReg;
// Remove the bias, if necessary
@@ -433,7 +433,7 @@
LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
rARM_LR);
// Materialize a pointer to the fill data image
- NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index aa5782b..0a3bfc1 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -74,7 +74,6 @@
uint32_t EncodeRange(LIR* head_lir, LIR* tail_lir, uint32_t starting_offset);
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(uintptr_t start_addr);
void EncodeLIR(LIR* lir);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
@@ -120,7 +119,7 @@
void GenDivZeroCheck(int reg_lo, int reg_hi);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
- void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
@@ -132,8 +131,8 @@
int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Required for target - single operation generators.
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 08d6778..480e021 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -176,7 +176,7 @@
void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
- LIR* target = &block_label_list_[bb->taken->id];
+ LIR* target = &block_label_list_[bb->taken];
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index b1772fd..69ea4e9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -122,8 +122,8 @@
int32_t val_hi = High32Bits(val);
DCHECK_GE(ModifiedImmediate(val_lo), 0);
DCHECK_GE(ModifiedImmediate(val_hi), 0);
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
int32_t low_reg = rl_src1.low_reg;
int32_t high_reg = rl_src1.high_reg;
@@ -178,23 +178,6 @@
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
- // Temporary debugging code
- int dest_sreg = mir->ssa_rep->defs[0];
- if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
- LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
- LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
- LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
- if (mir->ssa_rep->num_uses == 1) {
- LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
- } else {
- LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
- << mir->ssa_rep->uses[2];
- }
- CHECK(false) << "Invalid target sreg on Select.";
- }
- // End temporary debugging code
RegLocation rl_dest = mir_graph_->GetDest(mir);
rl_src = LoadValue(rl_src, kCoreReg);
if (mir->ssa_rep->num_uses == 1) {
@@ -270,8 +253,8 @@
return;
}
}
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 933c1a3..3395ae7 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -282,8 +282,8 @@
return buf;
}
-static int ExpandImmediate(int value) {
- int mode = (value & 0xf00) >> 8;
+static int32_t ExpandImmediate(int value) {
+ int32_t mode = (value & 0xf00) >> 8;
uint32_t bits = value & 0xff;
switch (mode) {
case 0:
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 00de8de..a2ac6ef 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -22,14 +22,14 @@
/* This file contains codegen for the Thumb ISA. */
-static int EncodeImmSingle(int value) {
- int res;
- int bit_a = (value & 0x80000000) >> 31;
- int not_bit_b = (value & 0x40000000) >> 30;
- int bit_b = (value & 0x20000000) >> 29;
- int b_smear = (value & 0x3e000000) >> 25;
- int slice = (value & 0x01f80000) >> 19;
- int zeroes = (value & 0x0007ffff);
+static int32_t EncodeImmSingle(int32_t value) {
+ int32_t res;
+ int32_t bit_a = (value & 0x80000000) >> 31;
+ int32_t not_bit_b = (value & 0x40000000) >> 30;
+ int32_t bit_b = (value & 0x20000000) >> 29;
+ int32_t b_smear = (value & 0x3e000000) >> 25;
+ int32_t slice = (value & 0x01f80000) >> 19;
+ int32_t zeroes = (value & 0x0007ffff);
if (zeroes != 0)
return -1;
if (bit_b) {
@@ -47,15 +47,15 @@
* Determine whether value can be encoded as a Thumb2 floating point
* immediate. If not, return -1. If so return encoded 8-bit value.
*/
-static int EncodeImmDouble(int64_t value) {
- int res;
- int bit_a = (value & 0x8000000000000000ll) >> 63;
- int not_bit_b = (value & 0x4000000000000000ll) >> 62;
- int bit_b = (value & 0x2000000000000000ll) >> 61;
- int b_smear = (value & 0x3fc0000000000000ll) >> 54;
- int slice = (value & 0x003f000000000000ll) >> 48;
+static int32_t EncodeImmDouble(int64_t value) {
+ int32_t res;
+ int32_t bit_a = (value & 0x8000000000000000ll) >> 63;
+ int32_t not_bit_b = (value & 0x4000000000000000ll) >> 62;
+ int32_t bit_b = (value & 0x2000000000000000ll) >> 61;
+ int32_t b_smear = (value & 0x3fc0000000000000ll) >> 54;
+ int32_t slice = (value & 0x003f000000000000ll) >> 48;
uint64_t zeroes = (value & 0x0000ffffffffffffll);
- if (zeroes != 0)
+ if (zeroes != 0ull)
return -1;
if (bit_b) {
if ((not_bit_b != 0) || (b_smear != 0xff))
@@ -96,8 +96,8 @@
static int LeadingZeros(uint32_t val) {
uint32_t alt;
- int n;
- int count;
+ int32_t n;
+ int32_t count;
count = 16;
n = 32;
@@ -117,8 +117,8 @@
* immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
*/
int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
- int z_leading;
- int z_trailing;
+ int32_t z_leading;
+ int32_t z_trailing;
uint32_t b0 = value & 0xff;
/* Note: case of value==0 must use 0:000:0:0000000 encoding */
@@ -421,12 +421,12 @@
LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
LIR* res;
bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
+ int32_t abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
ArmOpcode alt_opcode = kThumbBkpt;
bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
- int mod_imm = ModifiedImmediate(value);
- int mod_imm_neg = ModifiedImmediate(-value);
+ int32_t mod_imm = ModifiedImmediate(value);
+ int32_t mod_imm_neg = ModifiedImmediate(-value);
switch (op) {
case kOpLsl:
@@ -544,7 +544,7 @@
/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
+ int32_t abs_value = (neg) ? -value : value;
bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 617f357..2ce8f58 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -160,7 +160,8 @@
break;
case kPseudoDalvikByteCodeBoundary:
if (lir->operands[0] == 0) {
- lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
+ // NOTE: only used for debug listings.
+ lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string"));
}
LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
<< lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
@@ -369,6 +370,17 @@
buf.push_back((data >> 24) & 0xff);
}
+// Push 8 bytes on 64-bit systems; 4 on 32-bit systems.
+static void PushPointer(std::vector<uint8_t>&buf, void const* pointer) {
+ uintptr_t data = reinterpret_cast<uintptr_t>(pointer);
+ if (sizeof(void*) == sizeof(uint64_t)) {
+ PushWord(buf, (data >> (sizeof(void*) * 4)) & 0xFFFFFFFF);
+ PushWord(buf, data & 0xFFFFFFFF);
+ } else {
+ PushWord(buf, data);
+ }
+}
+
static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
while (buf.size() < offset) {
buf.push_back(0);
@@ -395,9 +407,8 @@
static_cast<InvokeType>(data_lir->operands[1]),
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
- // unique based on target to ensure code deduplication works
- uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- PushWord(code_buffer_, unique_patch_value);
+ // unique value based on target to ensure code deduplication works
+ PushPointer(code_buffer_, &id);
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
@@ -411,9 +422,8 @@
static_cast<InvokeType>(data_lir->operands[1]),
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
- // unique based on target to ensure code deduplication works
- uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- PushWord(code_buffer_, unique_patch_value);
+ // unique value based on target to ensure code deduplication works
+ PushPointer(code_buffer_, &id);
data_lir = NEXT_LIR(data_lir);
}
}
@@ -449,7 +459,7 @@
LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
}
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
- const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2]));
for (int elems = 0; elems < tab_rec->table[1]; elems++) {
int disp = tab_rec->targets[elems]->offset - bx_offset;
if (cu_->verbose) {
@@ -490,7 +500,7 @@
}
}
-static int AssignLiteralOffsetCommon(LIR* lir, int offset) {
+static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
for (; lir != NULL; lir = lir->next) {
lir->offset = offset;
offset += 4;
@@ -498,6 +508,17 @@
return offset;
}
+static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset) {
+ unsigned int element_size = sizeof(void*);
+ // Align to natural pointer size.
+ offset = (offset + (element_size - 1)) & ~(element_size - 1);
+ for (; lir != NULL; lir = lir->next) {
+ lir->offset = offset;
+ offset += element_size;
+ }
+ return offset;
+}
+
// Make sure we have a code address for every declared catch entry
bool Mir2Lir::VerifyCatchEntries() {
bool success = true;
@@ -607,8 +628,8 @@
table_index = (table_index + 1) % entries_;
}
in_use_[table_index] = true;
- SetNativeOffset(table_index, native_offset);
- DCHECK_EQ(native_offset, GetNativeOffset(table_index));
+ SetCodeOffset(table_index, native_offset);
+ DCHECK_EQ(native_offset, GetCodeOffset(table_index));
SetReferences(table_index, references);
}
@@ -617,7 +638,7 @@
return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
}
- uint32_t GetNativeOffset(size_t table_index) {
+ uint32_t GetCodeOffset(size_t table_index) {
uint32_t native_offset = 0;
size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
for (size_t i = 0; i < native_offset_width_; i++) {
@@ -626,7 +647,7 @@
return native_offset;
}
- void SetNativeOffset(size_t table_index, uint32_t native_offset) {
+ void SetCodeOffset(size_t table_index, uint32_t native_offset) {
size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
for (size_t i = 0; i < native_offset_width_; i++) {
(*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
@@ -681,17 +702,17 @@
}
/* Determine the offset of each literal field */
-int Mir2Lir::AssignLiteralOffset(int offset) {
+int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
offset = AssignLiteralOffsetCommon(literal_list_, offset);
- offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
- offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
+ offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset);
+ offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset);
return offset;
}
-int Mir2Lir::AssignSwitchTablesOffset(int offset) {
+int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
while (true) {
- Mir2Lir::SwitchTable *tab_rec = iterator.Next();
+ Mir2Lir::SwitchTable* tab_rec = iterator.Next();
if (tab_rec == NULL) break;
tab_rec->offset = offset;
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -705,7 +726,7 @@
return offset;
}
-int Mir2Lir::AssignFillArrayDataOffset(int offset) {
+int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
while (true) {
Mir2Lir::FillArrayData *tab_rec = iterator.Next();
@@ -725,7 +746,7 @@
* branch table during the assembly phase. All resource flags
* are set to prevent code motion. KeyVal is just there for debugging.
*/
-LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
+LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) {
LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
LIR* res = boundary_lir;
if (cu_->verbose) {
@@ -743,10 +764,10 @@
return res;
}
-void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
+void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
const uint16_t* table = tab_rec->table;
- int base_vaddr = tab_rec->vaddr;
- const int *targets = reinterpret_cast<const int*>(&table[4]);
+ DexOffset base_vaddr = tab_rec->vaddr;
+ const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]);
int entries = table[1];
int low_key = s4FromSwitchData(&table[2]);
for (int i = 0; i < entries; i++) {
@@ -754,12 +775,12 @@
}
}
-void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
+void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
const uint16_t* table = tab_rec->table;
- int base_vaddr = tab_rec->vaddr;
+ DexOffset base_vaddr = tab_rec->vaddr;
int entries = table[1];
- const int* keys = reinterpret_cast<const int*>(&table[2]);
- const int* targets = &keys[entries];
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+ const int32_t* targets = &keys[entries];
for (int i = 0; i < entries; i++) {
tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
}
@@ -792,8 +813,8 @@
*/
uint16_t ident = table[0];
int entries = table[1];
- const int* keys = reinterpret_cast<const int*>(&table[2]);
- const int* targets = &keys[entries];
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+ const int32_t* targets = &keys[entries];
LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident
<< ", entries: " << std::dec << entries;
for (int i = 0; i < entries; i++) {
@@ -812,7 +833,7 @@
* Total size is (4+size*2) 16-bit code units.
*/
uint16_t ident = table[0];
- const int* targets = reinterpret_cast<const int*>(&table[4]);
+ const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]);
int entries = table[1];
int low_key = s4FromSwitchData(&table[2]);
LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
@@ -824,8 +845,9 @@
}
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
-void Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
- NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
+void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+ // NOTE: only used for debug listings.
+ NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
@@ -883,6 +905,7 @@
intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
tempreg_info_(arena, 20, kGrowableArrayMisc),
reginfo_map_(arena, 64, kGrowableArrayMisc),
+ pointer_storage_(arena, 128, kGrowableArrayMisc),
data_offset_(0),
total_size_(0),
block_label_list_(NULL),
@@ -900,6 +923,9 @@
promotion_map_ = static_cast<PromotionMap*>
(arena_->Alloc((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) *
sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
+ // Reserve pointer id 0 for NULL.
+ size_t null_idx = WrapPointer(NULL);
+ DCHECK_EQ(null_idx, 0U);
}
void Mir2Lir::Materialize() {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2670c23..2b3404a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -40,7 +40,7 @@
barrier->u.m.def_mask = ENCODE_ALL;
}
-// FIXME: need to do some work to split out targets with
+// TODO: need to do some work to split out targets with
// condition codes and those without
LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
DCHECK_NE(cu_->instruction_set, kMips);
@@ -503,7 +503,7 @@
ResetRegPool();
ResetDefTracking();
LIR* lab = suspend_launchpads_.Get(i);
- LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
+ LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
current_dalvik_offset_ = lab->operands[1];
AppendLIR(lab);
int r_tgt = CallHelperSetup(helper_offset);
@@ -518,12 +518,12 @@
ResetRegPool();
ResetDefTracking();
LIR* lab = intrinsic_launchpads_.Get(i);
- CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
+ CallInfo* info = reinterpret_cast<CallInfo*>(UnwrapPointer(lab->operands[0]));
current_dalvik_offset_ = info->offset;
AppendLIR(lab);
// NOTE: GenInvoke handles MarkSafepointPC
GenInvoke(info);
- LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
+ LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[2]));
if (resume_lab != NULL) {
OpUnconditionalBranch(resume_lab);
}
@@ -1351,7 +1351,7 @@
}
// Returns the index of the lowest set bit in 'x'.
-static int LowestSetBit(unsigned int x) {
+static int32_t LowestSetBit(uint32_t x) {
int bit_posn = 0;
while ((x & 0xf) == 0) {
bit_posn += 4;
@@ -1752,8 +1752,8 @@
FlushAllRegs();
LIR* branch = OpTestSuspend(NULL);
LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
- LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
- reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
+ LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
+ current_dalvik_offset_);
branch->target = target;
suspend_launchpads_.Insert(target);
}
@@ -1766,8 +1766,8 @@
}
OpTestSuspend(target);
LIR* launch_pad =
- RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
- reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
+ RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
+ current_dalvik_offset_);
FlushAllRegs();
OpUnconditionalBranch(launch_pad);
suspend_launchpads_.Insert(launch_pad);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 0a0cc17..3def7f5 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -908,7 +908,7 @@
LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
FreeTemp(reg_max);
@@ -919,7 +919,7 @@
reg_max = AllocTemp();
LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
FreeTemp(reg_max);
@@ -1085,7 +1085,7 @@
}
int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
// NOTE: not a safepoint
@@ -1095,7 +1095,7 @@
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
}
LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
- launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
+ launch_pad->operands[2] = WrapPointer(resume_tgt);
// Record that we've already inlined & null checked
info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
RegLocation rl_return = GetReturn(false);
@@ -1123,7 +1123,7 @@
LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
// TUNING: check if rl_cmp.s_reg_low is already null checked
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
// NOTE: not a safepoint
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 6bfccfd..ea8b7a6 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -489,12 +489,12 @@
LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC);
InsertLIRBefore(lir, curr_pc);
LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
- LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0,
- reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+ LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
InsertLIRBefore(lir, delta_hi);
InsertLIRBefore(lir, anchor);
- LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0,
- reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+ LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
InsertLIRBefore(lir, delta_lo);
LIR* addu = RawLIR(dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
InsertLIRBefore(lir, addu);
@@ -512,7 +512,7 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) {
+AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
@@ -538,8 +538,8 @@
* and is found in lir->target. If operands[3] is non-NULL,
* then it is a Switch/Data table.
*/
- int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
@@ -565,21 +565,21 @@
res = kRetryAll;
}
} else if (lir->opcode == kMipsDeltaLo) {
- int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = delta & 0xffff;
} else if (lir->opcode == kMipsDeltaHi) {
- int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = (delta >> 16) & 0xffff;
} else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -592,8 +592,8 @@
}
} else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -606,8 +606,8 @@
}
} else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -619,8 +619,8 @@
lir->operands[2] = delta >> 2;
}
} else if (lir->opcode == kMipsJal) {
- uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
- uintptr_t target = lir->operands[0];
+ CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
+ CodeOffset target = lir->operands[0];
/* ensure PC-region branch can be used */
DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
if (target & 0x3) {
@@ -629,11 +629,11 @@
lir->operands[0] = target >> 2;
} else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
LIR *target_lir = lir->target;
- uintptr_t target = start_addr + target_lir->offset;
+ CodeOffset target = start_addr + target_lir->offset;
lir->operands[1] = target >> 16;
} else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
LIR *target_lir = lir->target;
- uintptr_t target = start_addr + target_lir->offset;
+ CodeOffset target = start_addr + target_lir->offset;
lir->operands[2] = lir->operands[2] + target;
}
}
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 9a5ca2c..18c8cf8 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -59,14 +59,14 @@
* done:
*
*/
-void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tab_rec =
+ SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
@@ -101,8 +101,7 @@
// Remember base label so offsets can be computed later
tab_rec->anchor = base_label;
int rBase = AllocTemp();
- NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
OpRegRegReg(kOpAdd, rEnd, rEnd, rBase);
// Grab switch test value
@@ -138,20 +137,20 @@
* jr r_RA
* done:
*/
-void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tab_rec =
+ SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -196,8 +195,7 @@
// Materialize the table base pointer
int rBase = AllocTemp();
- NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
// Load the displacement from the switch table
int r_disp = AllocTemp();
@@ -222,10 +220,10 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
+void MipsMir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tab_rec =
+ FillArrayData* tab_rec =
reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData),
ArenaAllocator::kAllocData));
tab_rec->table = table;
@@ -252,8 +250,7 @@
LIR* base_label = NewLIR0(kPseudoTargetLabel);
// Materialize a pointer to the fill data image
- NewLIR4(kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR4(kMipsDelta, rMIPS_ARG1, 0, WrapPointer(base_label), WrapPointer(tab_rec));
// And go...
ClobberCalleeSave();
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 387fef3..0be20e8 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -74,7 +74,7 @@
void AssembleLIR();
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
const char* GetTargetInstFmt(int opcode);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 5d9ae33..2ba2c84 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -93,7 +93,7 @@
} else if ((value < 0) && (value >= -32768)) {
res = NewLIR3(kMipsAddiu, r_dest, r_ZERO, value);
} else {
- res = NewLIR2(kMipsLui, r_dest, value>>16);
+ res = NewLIR2(kMipsLui, r_dest, value >> 16);
if (value & 0xffff)
NewLIR3(kMipsOri, r_dest, r_dest, value);
}
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index f293700..1a30b7a 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -43,7 +43,7 @@
}
}
-inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
+inline LIR* Mir2Lir::RawLIR(DexOffset dalvik_offset, int opcode, int op0,
int op1, int op2, int op3, int op4, LIR* target) {
LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
insn->dalvik_offset = dalvik_offset;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 2b26c3d..197e200 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -241,9 +241,9 @@
case Instruction::GOTO_16:
case Instruction::GOTO_32:
if (mir_graph_->IsBackedge(bb, bb->taken)) {
- GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken->id]);
+ GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
} else {
- OpUnconditionalBranch(&label_list[bb->taken->id]);
+ OpUnconditionalBranch(&label_list[bb->taken]);
}
break;
@@ -272,23 +272,22 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- LIR* taken = &label_list[bb->taken->id];
- LIR* fall_through = &label_list[bb->fall_through->id];
+ LIR* taken = &label_list[bb->taken];
+ LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const && rl_src[1].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
mir_graph_->ConstantValue(rl_src[1].orig_sreg));
- BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target)) {
+ BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+ if (mir_graph_->IsBackedge(bb, target_id)) {
GenSuspendTest(opt_flags);
}
- OpUnconditionalBranch(&label_list[target->id]);
+ OpUnconditionalBranch(&label_list[target_id]);
} else {
if (mir_graph_->IsBackwardsBranch(bb)) {
GenSuspendTest(opt_flags);
}
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken,
- fall_through);
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
}
break;
}
@@ -299,16 +298,16 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- LIR* taken = &label_list[bb->taken->id];
- LIR* fall_through = &label_list[bb->fall_through->id];
+ LIR* taken = &label_list[bb->taken];
+ LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
- BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target)) {
+ BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+ if (mir_graph_->IsBackedge(bb, target_id)) {
GenSuspendTest(opt_flags);
}
- OpUnconditionalBranch(&label_list[target->id]);
+ OpUnconditionalBranch(&label_list[target_id]);
} else {
if (mir_graph_->IsBackwardsBranch(bb)) {
GenSuspendTest(opt_flags);
@@ -831,8 +830,9 @@
while (curr_bb != NULL) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
- if ((curr_bb->fall_through != NULL) && (curr_bb->fall_through != next_bb)) {
- OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through->id]);
+ BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
+ if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 5df2672..d629b44 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -30,6 +30,14 @@
namespace art {
+/*
+ * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to
+ * add type safety (see runtime/offsets.h).
+ */
+typedef uint32_t DexOffset; // Dex offset in code units.
+typedef uint16_t NarrowDexOffset; // For use in structs, Dex offsets range from 0 .. 0xffff.
+typedef uint32_t CodeOffset; // Native code offset in bytes.
+
// Set to 1 to measure cost of suspend check.
#define NO_SUSPEND 0
@@ -119,8 +127,8 @@
};
struct LIR {
- int offset; // Offset of this instruction.
- uint16_t dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words).
+ CodeOffset offset; // Offset of this instruction.
+ NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words).
int16_t opcode;
LIR* next;
LIR* prev;
@@ -134,10 +142,10 @@
unsigned int fixup:8; // Fixup kind.
} flags;
union {
- UseDefMasks m; // Use & Def masks used during optimization.
- AssemblyInfo a; // Instruction encoding used during assembly phase.
+ UseDefMasks m; // Use & Def masks used during optimization.
+ AssemblyInfo a; // Instruction encoding used during assembly phase.
} u;
- int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2].
+ int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2].
};
// Target-specific initialization.
@@ -184,19 +192,23 @@
class Mir2Lir : public Backend {
public:
- struct SwitchTable {
- int offset;
- const uint16_t* table; // Original dex table.
- int vaddr; // Dalvik offset of switch opcode.
- LIR* anchor; // Reference instruction for relative offsets.
- LIR** targets; // Array of case targets.
+ /*
+ * Auxiliary information describing the location of data embedded in the Dalvik
+ * byte code stream.
+ */
+ struct EmbeddedData {
+ CodeOffset offset; // Code offset of data block.
+ const uint16_t* table; // Original dex data.
+ DexOffset vaddr; // Dalvik offset of parent opcode.
};
- struct FillArrayData {
- int offset;
- const uint16_t* table; // Original dex table.
- int size;
- int vaddr; // Dalvik offset of FILL_ARRAY_DATA opcode.
+ struct FillArrayData : EmbeddedData {
+ int32_t size;
+ };
+
+ struct SwitchTable : EmbeddedData {
+ LIR* anchor; // Reference instruction for relative offsets.
+ LIR** targets; // Array of case targets.
};
/* Static register use counts */
@@ -260,6 +272,34 @@
return (opcode < 0);
}
+ /*
+ * LIR operands are 32-bit integers. Sometimes, (especially for managing
+ * instructions which require PC-relative fixups), we need the operands to carry
+ * pointers. To do this, we assign these pointers an index in pointer_storage_, and
+ * hold that index in the operand array.
+ * TUNING: If use of these utilities becomes more common on 32-bit builds, it
+ * may be worth conditionally-compiling a set of identity functions here.
+ */
+ uint32_t WrapPointer(void* pointer) {
+ uint32_t res = pointer_storage_.Size();
+ pointer_storage_.Insert(pointer);
+ return res;
+ }
+
+ void* UnwrapPointer(size_t index) {
+ return pointer_storage_.Get(index);
+ }
+
+ // strdup(), but allocates from the arena.
+ char* ArenaStrdup(const char* str) {
+ size_t len = strlen(str) + 1;
+ char* res = reinterpret_cast<char*>(arena_->Alloc(len, ArenaAllocator::kAllocMisc));
+ if (res != NULL) {
+ strncpy(res, str, len);
+ }
+ return res;
+ }
+
// Shared by all targets - implemented in codegen_util.cc
void AppendLIR(LIR* lir);
void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
@@ -277,7 +317,7 @@
void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
void DumpPromotionMap();
void CodegenDump();
- LIR* RawLIR(int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
+ LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
LIR* NewLIR0(int opcode);
LIR* NewLIR1(int opcode, int dest);
@@ -292,7 +332,7 @@
void ProcessSwitchTables();
void DumpSparseSwitchTable(const uint16_t* table);
void DumpPackedSwitchTable(const uint16_t* table);
- void MarkBoundary(int offset, const char* inst_str);
+ void MarkBoundary(DexOffset offset, const char* inst_str);
void NopLIR(LIR* lir);
void UnlinkLIR(LIR* lir);
bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
@@ -307,12 +347,12 @@
bool VerifyCatchEntries();
void CreateMappingTables();
void CreateNativeGcMap();
- int AssignLiteralOffset(int offset);
- int AssignSwitchTablesOffset(int offset);
- int AssignFillArrayDataOffset(int offset);
- LIR* InsertCaseLabel(int vaddr, int keyVal);
- void MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec);
- void MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec);
+ int AssignLiteralOffset(CodeOffset offset);
+ int AssignSwitchTablesOffset(CodeOffset offset);
+ int AssignFillArrayDataOffset(CodeOffset offset);
+ LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
+ void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
+ void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
// Shared by all targets - implemented in local_optimizations.cc
void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src);
@@ -642,7 +682,7 @@
virtual void GenEntrySequence(RegLocation* ArgLocs,
RegLocation rl_method) = 0;
virtual void GenExitSequence() = 0;
- virtual void GenFillArrayData(uint32_t table_offset,
+ virtual void GenFillArrayData(DexOffset table_offset,
RegLocation rl_src) = 0;
virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) = 0;
@@ -655,9 +695,9 @@
int second_bit) = 0;
virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
- virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset,
+ virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) = 0;
- virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset,
+ virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) = 0;
virtual void GenSpecialCase(BasicBlock* bb, MIR* mir,
SpecialCaseHandler special_case) = 0;
@@ -672,13 +712,10 @@
// Required for target - single operation generators.
virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
- virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2,
- LIR* target) = 0;
- virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
- LIR* target) = 0;
+ virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) = 0;
+ virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target) = 0;
virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
- virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg,
- LIR* target) = 0;
+ virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) = 0;
virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0;
virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0;
@@ -690,16 +727,13 @@
virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0;
virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0;
virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0;
- virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1,
- int r_src2) = 0;
+ virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) = 0;
virtual LIR* OpTestSuspend(LIR* target) = 0;
virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0;
virtual LIR* OpVldm(int rBase, int count) = 0;
virtual LIR* OpVstm(int rBase, int count) = 0;
- virtual void OpLea(int rBase, int reg1, int reg2, int scale,
- int offset) = 0;
- virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
- int src_hi) = 0;
+ virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset) = 0;
+ virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi) = 0;
virtual void OpTlsCmp(ThreadOffset offset, int val) = 0;
virtual bool InexpensiveConstantInt(int32_t value) = 0;
virtual bool InexpensiveConstantFloat(int32_t value) = 0;
@@ -752,6 +786,7 @@
GrowableArray<LIR*> intrinsic_launchpads_;
GrowableArray<RegisterInfo*> tempreg_info_;
GrowableArray<RegisterInfo*> reginfo_map_;
+ GrowableArray<void*> pointer_storage_;
/*
* Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
* Native PC is on the return address of the safepointed operation. Dex PC is for
@@ -763,9 +798,9 @@
* immediately preceed the instruction.
*/
std::vector<uint32_t> dex2pc_mapping_table_;
- int current_code_offset_; // Working byte offset of machine instructons.
- int data_offset_; // starting offset of literal pool.
- int total_size_; // header + code size.
+ CodeOffset current_code_offset_; // Working byte offset of machine instructons.
+ CodeOffset data_offset_; // starting offset of literal pool.
+ size_t total_size_; // header + code size.
LIR* block_label_list_;
PromotionMap* promotion_map_;
/*
@@ -777,8 +812,8 @@
* in the CompilationUnit struct before codegen for each instruction.
* The low-level LIR creation utilites will pull it from here. Rework this.
*/
- int current_dalvik_offset_;
- int estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
+ DexOffset current_dalvik_offset_;
+ size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
RegisterPool* reg_pool_;
/*
* Sanity checking for the register temp tracking. The same ssa
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 7927ff9..41a57af 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -66,10 +66,9 @@
LOG(INFO) << "================================================";
for (int i = 0; i < num_regs; i++) {
LOG(INFO) << StringPrintf(
- "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
+ "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d",
p[i].reg, p[i].is_temp, p[i].in_use, p[i].pair, p[i].partner,
- p[i].live, p[i].dirty, p[i].s_reg, reinterpret_cast<uintptr_t>(p[i].def_start),
- reinterpret_cast<uintptr_t>(p[i].def_end));
+ p[i].live, p[i].dirty, p[i].s_reg);
}
LOG(INFO) << "================================================";
}
@@ -769,9 +768,9 @@
RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int new_regs;
- int low_reg;
- int high_reg;
+ int32_t new_regs;
+ int32_t low_reg;
+ int32_t high_reg;
loc = UpdateLocWide(loc);
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 064ff31..fb8e75f 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1090,11 +1090,13 @@
int base_or_table, uint8_t index, int scale, int table_or_disp) {
int disp;
if (entry->opcode == kX86PcRelLoadRA) {
- Mir2Lir::SwitchTable *tab_rec = reinterpret_cast<Mir2Lir::SwitchTable*>(table_or_disp);
+ Mir2Lir::EmbeddedData *tab_rec =
+ reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(table_or_disp));
disp = tab_rec->offset;
} else {
DCHECK(entry->opcode == kX86PcRelAdr);
- Mir2Lir::FillArrayData *tab_rec = reinterpret_cast<Mir2Lir::FillArrayData*>(base_or_table);
+ Mir2Lir::EmbeddedData *tab_rec =
+ reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(base_or_table));
disp = tab_rec->offset;
}
if (entry->skeleton.prefix1 != 0) {
@@ -1161,7 +1163,7 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) {
+AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
@@ -1181,13 +1183,13 @@
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
int delta = 0;
- uintptr_t pc;
+ CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
pc = lir->offset + 2 /* opcode + rel8 */;
} else {
pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
}
- uintptr_t target = target_lir->offset;
+ CodeOffset target = target_lir->offset;
delta = target - pc;
if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
if (kVerbosePcFixup) {
@@ -1211,8 +1213,8 @@
case kX86Jcc32: {
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
- uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (kVerbosePcFixup) {
LOG(INFO) << "Source:";
@@ -1228,13 +1230,13 @@
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
int delta = 0;
- uintptr_t pc;
+ CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
pc = lir->offset + 2 /* opcode + rel8 */;
} else {
pc = lir->offset + 5 /* opcode + rel32 */;
}
- uintptr_t target = target_lir->offset;
+ CodeOffset target = target_lir->offset;
delta = target - pc;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
// Useless branch
@@ -1257,8 +1259,8 @@
case kX86Jmp32: {
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
- uintptr_t pc = lir->offset + 5 /* opcode + rel32 */;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
lir->operands[0] = delta;
break;
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 7fad6f0..17924b0 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -31,15 +31,15 @@
* The sparse table in the literal pool is an array of <key,displacement>
* pairs.
*/
-void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
}
int entries = table[1];
- const int* keys = reinterpret_cast<const int*>(&table[2]);
- const int* targets = &keys[entries];
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+ const int32_t* targets = &keys[entries];
rl_src = LoadValue(rl_src, kCoreReg);
for (int i = 0; i < entries; i++) {
int key = keys[i];
@@ -66,15 +66,15 @@
* jmp r_start_of_method
* done:
*/
-void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tab_rec =
- static_cast<SwitchTable *>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+ SwitchTable* tab_rec =
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
@@ -103,8 +103,7 @@
// Load the displacement from the switch table
int disp_reg = AllocTemp();
- NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2, WrapPointer(tab_rec));
// Add displacement to start of method
OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
// ..and go!
@@ -126,10 +125,10 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
+void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tab_rec =
+ FillArrayData* tab_rec =
static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
@@ -144,7 +143,7 @@
LoadValueDirectFixed(rl_src, rX86_ARG0);
// Materialize a pointer to the fill data image
NewLIR1(kX86StartOfMethod, rX86_ARG2);
- NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR2(kX86PcRelAdr, rX86_ARG1, WrapPointer(tab_rec));
NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0,
rX86_ARG1, true);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index c266e39..b1d95ff 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -74,7 +74,7 @@
void AssembleLIR();
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
const char* GetTargetInstFmt(int opcode);
@@ -119,7 +119,7 @@
void GenDivZeroCheck(int reg_lo, int reg_hi);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
- void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
@@ -129,8 +129,8 @@
int lit, int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Single operation generators.
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f736b5e..c9d6bfc 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -284,8 +284,8 @@
void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
LIR* branch = NULL;
RegLocation rl_src1;
RegLocation rl_src2;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 14f5348..324d975 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -166,7 +166,7 @@
}
void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
- LIR* taken = &block_label_list_[bb->taken->id];
+ LIR* taken = &block_label_list_[bb->taken];
RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
FlushAllRegs();
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 0f005da..901ac9e 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -223,7 +223,7 @@
buf += StringPrintf("%d", operand);
break;
case 'p': {
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
buf += StringPrintf("0x%08x", tab_rec->offset);
break;
}
@@ -238,7 +238,7 @@
break;
case 't':
buf += StringPrintf("0x%08x (L%p)",
- reinterpret_cast<uint32_t>(base_addr)
+ reinterpret_cast<uintptr_t>(base_addr)
+ lir->offset + operand, lir->target);
break;
default: