64-bit prep
Preparation for 64-bit roll.
o Eliminated storing pointers in 32-bit int slots in LIR.
o General size reductions of common structures to reduce impact
of doubled pointer sizes:
- BasicBlock struct was 72 bytes, now is 48.
- MIR struct was 72 bytes, now is 64.
- RegLocation was 12 bytes, now is 8.
o Generally replaced uses of BasicBlock* pointers with 16-bit Ids.
o Replaced several doubly-linked lists with singly-linked to save
one stored pointer per node.
o We had quite a few uses of uintptr_t's that were a holdover from
the JIT (which used pointers to mapped dex & actual code cache
addresses rather than trace-relative offsets). Replaced those with
uint32_t's.
o Clean up handling of embedded data for switch tables and array data.
o Miscellaneous cleanup.
I anticipate one or two additional CLs to reduce the size of MIR and LIR
structs.
Change-Id: I58e426d3f8e5efe64c1146b2823453da99451230
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 2b26c3d..197e200 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -241,9 +241,9 @@
case Instruction::GOTO_16:
case Instruction::GOTO_32:
if (mir_graph_->IsBackedge(bb, bb->taken)) {
- GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken->id]);
+ GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
} else {
- OpUnconditionalBranch(&label_list[bb->taken->id]);
+ OpUnconditionalBranch(&label_list[bb->taken]);
}
break;
@@ -272,23 +272,22 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- LIR* taken = &label_list[bb->taken->id];
- LIR* fall_through = &label_list[bb->fall_through->id];
+ LIR* taken = &label_list[bb->taken];
+ LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const && rl_src[1].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
mir_graph_->ConstantValue(rl_src[1].orig_sreg));
- BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target)) {
+ BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+ if (mir_graph_->IsBackedge(bb, target_id)) {
GenSuspendTest(opt_flags);
}
- OpUnconditionalBranch(&label_list[target->id]);
+ OpUnconditionalBranch(&label_list[target_id]);
} else {
if (mir_graph_->IsBackwardsBranch(bb)) {
GenSuspendTest(opt_flags);
}
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken,
- fall_through);
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
}
break;
}
@@ -299,16 +298,16 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- LIR* taken = &label_list[bb->taken->id];
- LIR* fall_through = &label_list[bb->fall_through->id];
+ LIR* taken = &label_list[bb->taken];
+ LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
- BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target)) {
+ BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+ if (mir_graph_->IsBackedge(bb, target_id)) {
GenSuspendTest(opt_flags);
}
- OpUnconditionalBranch(&label_list[target->id]);
+ OpUnconditionalBranch(&label_list[target_id]);
} else {
if (mir_graph_->IsBackwardsBranch(bb)) {
GenSuspendTest(opt_flags);
@@ -831,8 +830,9 @@
while (curr_bb != NULL) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
- if ((curr_bb->fall_through != NULL) && (curr_bb->fall_through != next_bb)) {
- OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through->id]);
+ BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
+ if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {