64-bit prep

Preparation for 64-bit roll.
  o Eliminated storing pointers in 32-bit int slots in LIR.
  o General size reductions of common structures to reduce impact
    of doubled pointer sizes:
    - BasicBlock struct was 72 bytes, now is 48.
    - MIR struct was 72 bytes, now is 64.
    - RegLocation was 12 bytes, now is 8.
  o Generally replaced uses of BasicBlock* pointers with 16-bit Ids.
  o Replaced several doubly-linked lists with singly-linked to save
    one stored pointer per node.
  o We had quite a few uses of uintptr_t's that were a holdover from
    the JIT (which used pointers to mapped dex & actual code cache
    addresses rather than trace-relative offsets).  Replaced those with
    uint32_t's.
  o Clean up handling of embedded data for switch tables and array data.
  o Miscellaneous cleanup.

I anticipate one or two additional CLs to reduce the size of MIR and LIR
structs.

Change-Id: I58e426d3f8e5efe64c1146b2823453da99451230
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index 24a7ce9..53e6eb6 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -39,7 +39,7 @@
             bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
 
         // Return the position of the next set bit.  -1 means end-of-element reached.
-        int Next() {
+        int32_t Next() {
           // Did anything obviously change since we started?
           DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
           DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
@@ -79,7 +79,7 @@
         const uint32_t bit_size_;       // Size of vector in bits.
     };
 
-    ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits, bool expandable,
+    ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
                    OatBitMapKind kind = kBitMapMisc);
     ~ArenaBitVector() {}
 
@@ -88,13 +88,13 @@
     }
     static void operator delete(void* p) {}  // Nop.
 
-    void SetBit(unsigned int num);
-    void ClearBit(unsigned int num);
+    void SetBit(uint32_t num);
+    void ClearBit(uint32_t num);
     void MarkAllBits(bool set);
     void DebugBitVector(char* msg, int length);
-    bool IsBitSet(unsigned int num);
+    bool IsBitSet(uint32_t num);
     void ClearAllBits();
-    void SetInitialBits(unsigned int num_bits);
+    void SetInitialBits(uint32_t num_bits);
     void Copy(ArenaBitVector* src) {
       memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
     }
@@ -106,7 +106,7 @@
         (expandable_ == src->IsExpandable()) &&
         (memcmp(storage_, src->GetRawStorage(), storage_size_ * 4) == 0);
     }
-    int NumSetBits();
+    int32_t NumSetBits();
 
     uint32_t GetStorageSize() const { return storage_size_; }
     bool IsExpandable() const { return expandable_; }
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 17b5bb5..05ca1b5 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -55,6 +55,7 @@
 };
 
 enum BBType {
+  kNullBlock,
   kEntryBlock,
   kDalvikByteCode,
   kExitBlock,
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 6607562..bdc3154 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -90,14 +90,14 @@
   InstructionSet instruction_set;
 
   // TODO: much of this info available elsewhere.  Go to the original source?
-  int num_dalvik_registers;        // method->registers_size.
+  uint16_t num_dalvik_registers;        // method->registers_size.
   const uint16_t* insns;
-  int num_ins;
-  int num_outs;
-  int num_regs;            // Unlike num_dalvik_registers, does not include ins.
+  uint16_t num_ins;
+  uint16_t num_outs;
+  uint16_t num_regs;            // Unlike num_dalvik_registers, does not include ins.
 
   // TODO: may want to move this to MIRGraph.
-  int num_compiler_temps;
+  uint16_t num_compiler_temps;
 
   // If non-empty, apply optimizer/debug flags only to matching methods.
   std::string compiler_method_match;
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 236c6f4..74f36dd 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -25,7 +25,7 @@
 inline BasicBlock* DataflowIterator::ForwardSingleNext() {
   BasicBlock* res = NULL;
   if (idx_ < end_idx_) {
-    int bb_id = block_id_list_->Get(idx_++);
+    BasicBlockId bb_id = block_id_list_->Get(idx_++);
     res = mir_graph_->GetBasicBlock(bb_id);
   }
   return res;
@@ -40,7 +40,7 @@
     changed_ = false;
   }
   if (idx_ < end_idx_) {
-    int bb_id = block_id_list_->Get(idx_++);
+    BasicBlockId bb_id = block_id_list_->Get(idx_++);
     res = mir_graph_->GetBasicBlock(bb_id);
   }
   return res;
@@ -50,7 +50,7 @@
 inline BasicBlock* DataflowIterator::ReverseSingleNext() {
   BasicBlock* res = NULL;
   if (idx_ >= 0) {
-    int bb_id = block_id_list_->Get(idx_--);
+    BasicBlockId bb_id = block_id_list_->Get(idx_--);
     res = mir_graph_->GetBasicBlock(bb_id);
   }
   return res;
@@ -65,7 +65,7 @@
     changed_ = false;
   }
   if (idx_ >= 0) {
-    int bb_id = block_id_list_->Get(idx_--);
+    BasicBlockId bb_id = block_id_list_->Get(idx_--);
     res = mir_graph_->GetBasicBlock(bb_id);
   }
   return res;
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 1dab54e..26e3665 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -39,7 +39,7 @@
       virtual ~DataflowIterator() {}
 
     protected:
-      DataflowIterator(MIRGraph* mir_graph, int start_idx, int end_idx)
+      DataflowIterator(MIRGraph* mir_graph, int32_t start_idx, int32_t end_idx)
           : mir_graph_(mir_graph),
             start_idx_(start_idx),
             end_idx_(end_idx),
@@ -53,10 +53,10 @@
       virtual BasicBlock* ReverseRepeatNext(bool had_change) ALWAYS_INLINE;
 
       MIRGraph* const mir_graph_;
-      const int start_idx_;
-      const int end_idx_;
-      GrowableArray<int>* block_id_list_;
-      int idx_;
+      const int32_t start_idx_;
+      const int32_t end_idx_;
+      GrowableArray<BasicBlockId>* block_id_list_;
+      int32_t idx_;
       bool changed_;
   };  // DataflowIterator
 
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 8597172..89af06e 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -864,7 +864,7 @@
   if (ending_bb->last_mir_insn != NULL) {
     uint32_t ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
     while ((ending_flags & AN_BRANCH) == 0) {
-      ending_bb = ending_bb->fall_through;
+      ending_bb = GetBasicBlock(ending_bb->fall_through);
       ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
     }
   }
@@ -876,13 +876,14 @@
    */
   int loop_scale_factor = 1;
   // Simple for and while loops
-  if ((ending_bb->taken != NULL) && (ending_bb->fall_through == NULL)) {
-    if ((ending_bb->taken->taken == bb) || (ending_bb->taken->fall_through == bb)) {
+  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
+    if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
+        (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
       loop_scale_factor = 25;
     }
   }
   // Simple do-while loop
-  if ((ending_bb->taken != NULL) && (ending_bb->taken == bb)) {
+  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
     loop_scale_factor = 25;
   }
 
@@ -922,7 +923,7 @@
     if (tbb == ending_bb) {
       done = true;
     } else {
-      tbb = tbb->fall_through;
+      tbb = GetBasicBlock(tbb->fall_through);
     }
   }
   if (has_math && computational_block && (loop_scale_factor > 1)) {
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 3d29908..9c8ce23 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1295,23 +1295,23 @@
 
 /* Verify if all the successor is connected with all the claimed predecessors */
 bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
-  GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+  GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
 
   while (true) {
-    BasicBlock *pred_bb = iter.Next();
+    BasicBlock *pred_bb = GetBasicBlock(iter.Next());
     if (!pred_bb) break;
     bool found = false;
-    if (pred_bb->taken == bb) {
+    if (pred_bb->taken == bb->id) {
         found = true;
-    } else if (pred_bb->fall_through == bb) {
+    } else if (pred_bb->fall_through == bb->id) {
         found = true;
-    } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
-      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(pred_bb->successor_block_list.blocks);
+    } else if (pred_bb->successor_block_list_type != kNotUsed) {
+      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(pred_bb->successor_blocks);
       while (true) {
         SuccessorBlockInfo *successor_block_info = iterator.Next();
         if (successor_block_info == NULL) break;
-        BasicBlock *succ_bb = successor_block_info->block;
-        if (succ_bb == bb) {
+        BasicBlockId succ_bb = successor_block_info->block;
+        if (succ_bb == bb->id) {
             found = true;
             break;
         }
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index fb306de..cf758fc 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -130,11 +130,14 @@
 
 
 /* Split an existing block from the specified code offset into two */
-BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset,
+BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
                                  BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
+  DCHECK_GT(code_offset, orig_block->start_offset);
   MIR* insn = orig_block->first_mir_insn;
+  MIR* prev = NULL;
   while (insn) {
     if (insn->offset == code_offset) break;
+    prev = insn;
     insn = insn->next;
   }
   if (insn == NULL) {
@@ -156,39 +159,42 @@
 
   /* Handle the taken path */
   bottom_block->taken = orig_block->taken;
-  if (bottom_block->taken) {
-    orig_block->taken = NULL;
-    bottom_block->taken->predecessors->Delete(orig_block);
-    bottom_block->taken->predecessors->Insert(bottom_block);
+  if (bottom_block->taken != NullBasicBlockId) {
+    orig_block->taken = NullBasicBlockId;
+    BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken);
+    bb_taken->predecessors->Delete(orig_block->id);
+    bb_taken->predecessors->Insert(bottom_block->id);
   }
 
   /* Handle the fallthrough path */
   bottom_block->fall_through = orig_block->fall_through;
-  orig_block->fall_through = bottom_block;
-  bottom_block->predecessors->Insert(orig_block);
-  if (bottom_block->fall_through) {
-    bottom_block->fall_through->predecessors->Delete(orig_block);
-    bottom_block->fall_through->predecessors->Insert(bottom_block);
+  orig_block->fall_through = bottom_block->id;
+  bottom_block->predecessors->Insert(orig_block->id);
+  if (bottom_block->fall_through != NullBasicBlockId) {
+    BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through);
+    bb_fall_through->predecessors->Delete(orig_block->id);
+    bb_fall_through->predecessors->Insert(bottom_block->id);
   }
 
   /* Handle the successor list */
-  if (orig_block->successor_block_list.block_list_type != kNotUsed) {
-    bottom_block->successor_block_list = orig_block->successor_block_list;
-    orig_block->successor_block_list.block_list_type = kNotUsed;
-    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_block_list.blocks);
+  if (orig_block->successor_block_list_type != kNotUsed) {
+    bottom_block->successor_block_list_type = orig_block->successor_block_list_type;
+    bottom_block->successor_blocks = orig_block->successor_blocks;
+    orig_block->successor_block_list_type = kNotUsed;
+    orig_block->successor_blocks = NULL;
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_blocks);
     while (true) {
       SuccessorBlockInfo *successor_block_info = iterator.Next();
       if (successor_block_info == NULL) break;
-      BasicBlock *bb = successor_block_info->block;
-      bb->predecessors->Delete(orig_block);
-      bb->predecessors->Insert(bottom_block);
+      BasicBlock *bb = GetBasicBlock(successor_block_info->block);
+      bb->predecessors->Delete(orig_block->id);
+      bb->predecessors->Insert(bottom_block->id);
     }
   }
 
-  orig_block->last_mir_insn = insn->prev;
+  orig_block->last_mir_insn = prev;
+  prev->next = NULL;
 
-  insn->prev->next = NULL;
-  insn->prev = NULL;
   /*
    * Update the immediate predecessor block pointer so that outgoing edges
    * can be applied to the proper block.
@@ -225,7 +231,7 @@
  * (by the caller)
  * Utilizes a map for fast lookup of the typical cases.
  */
-BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create,
+BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
                                 BasicBlock** immed_pred_block_p) {
   if (code_offset >= cu_->code_item->insns_size_in_code_units_) {
     return NULL;
@@ -261,7 +267,7 @@
 /* Identify code range in try blocks and set up the empty catch blocks */
 void MIRGraph::ProcessTryCatchBlocks() {
   int tries_size = current_code_item_->tries_size_;
-  int offset;
+  DexOffset offset;
 
   if (tries_size == 0) {
     return;
@@ -270,8 +276,8 @@
   for (int i = 0; i < tries_size; i++) {
     const DexFile::TryItem* pTry =
         DexFile::GetTryItems(*current_code_item_, i);
-    int start_offset = pTry->start_addr_;
-    int end_offset = start_offset + pTry->insn_count_;
+    DexOffset start_offset = pTry->start_addr_;
+    DexOffset end_offset = start_offset + pTry->insn_count_;
     for (offset = start_offset; offset < end_offset; offset++) {
       try_block_addr_->SetBit(offset);
     }
@@ -292,10 +298,10 @@
 }
 
 /* Process instructions with the kBranch flag */
-BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
-                                       int flags, const uint16_t* code_ptr,
+BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
+                                       int width, int flags, const uint16_t* code_ptr,
                                        const uint16_t* code_end) {
-  int target = cur_offset;
+  DexOffset target = cur_offset;
   switch (insn->dalvikInsn.opcode) {
     case Instruction::GOTO:
     case Instruction::GOTO_16:
@@ -326,8 +332,8 @@
   CountBranch(target);
   BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true,
                                       /* immed_pred_block_p */ &cur_block);
-  cur_block->taken = taken_block;
-  taken_block->predecessors->Insert(cur_block);
+  cur_block->taken = taken_block->id;
+  taken_block->predecessors->Insert(cur_block->id);
 
   /* Always terminate the current block for conditional branches */
   if (flags & Instruction::kContinue) {
@@ -349,8 +355,8 @@
                                              true,
                                              /* immed_pred_block_p */
                                              &cur_block);
-    cur_block->fall_through = fallthrough_block;
-    fallthrough_block->predecessors->Insert(cur_block);
+    cur_block->fall_through = fallthrough_block->id;
+    fallthrough_block->predecessors->Insert(cur_block->id);
   } else if (code_ptr < code_end) {
     FindBlock(cur_offset + width, /* split */ false, /* create */ true,
                 /* immed_pred_block_p */ NULL);
@@ -359,7 +365,7 @@
 }
 
 /* Process instructions with the kSwitch flag */
-void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
                                 int flags) {
   const uint16_t* switch_data =
       reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
@@ -403,14 +409,13 @@
     first_key = 0;   // To make the compiler happy
   }
 
-  if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+  if (cur_block->successor_block_list_type != kNotUsed) {
     LOG(FATAL) << "Successor block list already in use: "
-               << static_cast<int>(cur_block->successor_block_list.block_list_type);
+               << static_cast<int>(cur_block->successor_block_list_type);
   }
-  cur_block->successor_block_list.block_list_type =
-      (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
-      kPackedSwitch : kSparseSwitch;
-  cur_block->successor_block_list.blocks =
+  cur_block->successor_block_list_type =
+      (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?  kPackedSwitch : kSparseSwitch;
+  cur_block->successor_blocks =
       new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
 
   for (i = 0; i < size; i++) {
@@ -419,24 +424,24 @@
     SuccessorBlockInfo *successor_block_info =
         static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
                                                        ArenaAllocator::kAllocSuccessor));
-    successor_block_info->block = case_block;
+    successor_block_info->block = case_block->id;
     successor_block_info->key =
         (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
         first_key + i : keyTable[i];
-    cur_block->successor_block_list.blocks->Insert(successor_block_info);
-    case_block->predecessors->Insert(cur_block);
+    cur_block->successor_blocks->Insert(successor_block_info);
+    case_block->predecessors->Insert(cur_block->id);
   }
 
   /* Fall-through case */
   BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* split */ false,
                                             /* create */ true, /* immed_pred_block_p */ NULL);
-  cur_block->fall_through = fallthrough_block;
-  fallthrough_block->predecessors->Insert(cur_block);
+  cur_block->fall_through = fallthrough_block->id;
+  fallthrough_block->predecessors->Insert(cur_block->id);
 }
 
 /* Process instructions with the kThrow flag */
-BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
-                                      int flags, ArenaBitVector* try_block_addr,
+BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
+                                      int width, int flags, ArenaBitVector* try_block_addr,
                                       const uint16_t* code_ptr, const uint16_t* code_end) {
   bool in_try_block = try_block_addr->IsBitSet(cur_offset);
 
@@ -444,14 +449,14 @@
   if (in_try_block) {
     CatchHandlerIterator iterator(*current_code_item_, cur_offset);
 
-    if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+    if (cur_block->successor_block_list_type != kNotUsed) {
       LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
       LOG(FATAL) << "Successor block list already in use: "
-                 << static_cast<int>(cur_block->successor_block_list.block_list_type);
+                 << static_cast<int>(cur_block->successor_block_list_type);
     }
 
-    cur_block->successor_block_list.block_list_type = kCatch;
-    cur_block->successor_block_list.blocks =
+    cur_block->successor_block_list_type = kCatch;
+    cur_block->successor_blocks =
         new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks);
 
     for (; iterator.HasNext(); iterator.Next()) {
@@ -463,17 +468,17 @@
       }
       SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
           (arena_->Alloc(sizeof(SuccessorBlockInfo), ArenaAllocator::kAllocSuccessor));
-      successor_block_info->block = catch_block;
+      successor_block_info->block = catch_block->id;
       successor_block_info->key = iterator.GetHandlerTypeIndex();
-      cur_block->successor_block_list.blocks->Insert(successor_block_info);
-      catch_block->predecessors->Insert(cur_block);
+      cur_block->successor_blocks->Insert(successor_block_info);
+      catch_block->predecessors->Insert(cur_block->id);
     }
   } else {
     BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
-    cur_block->taken = eh_block;
+    cur_block->taken = eh_block->id;
     block_list_.Insert(eh_block);
     eh_block->start_offset = cur_offset;
-    eh_block->predecessors->Insert(cur_block);
+    eh_block->predecessors->Insert(cur_block->id);
   }
 
   if (insn->dalvikInsn.opcode == Instruction::THROW) {
@@ -509,8 +514,8 @@
   BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
   block_list_.Insert(new_block);
   new_block->start_offset = insn->offset;
-  cur_block->fall_through = new_block;
-  new_block->predecessors->Insert(cur_block);
+  cur_block->fall_through = new_block->id;
+  new_block->predecessors->Insert(cur_block->id);
   MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
   *new_insn = *insn;
   insn->dalvikInsn.opcode =
@@ -551,9 +556,14 @@
     DCHECK(entry_block_ == NULL);
     DCHECK(exit_block_ == NULL);
     DCHECK_EQ(num_blocks_, 0);
+    // Use id 0 to represent a null block.
+    BasicBlock* null_block = NewMemBB(kNullBlock, num_blocks_++);
+    DCHECK_EQ(null_block->id, NullBasicBlockId);
+    null_block->hidden = true;
+    block_list_.Insert(null_block);
     entry_block_ = NewMemBB(kEntryBlock, num_blocks_++);
-    exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
     block_list_.Insert(entry_block_);
+    exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
     block_list_.Insert(exit_block_);
     // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
     cu_->dex_file = &dex_file;
@@ -578,12 +588,12 @@
 
   /* Current block to record parsed instructions */
   BasicBlock *cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
-  DCHECK_EQ(current_offset_, 0);
+  DCHECK_EQ(current_offset_, 0U);
   cur_block->start_offset = current_offset_;
   block_list_.Insert(cur_block);
-  // FIXME: this needs to insert at the insert point rather than entry block.
-  entry_block_->fall_through = cur_block;
-  cur_block->predecessors->Insert(entry_block_);
+  // TODO: for inlining support, insert at the insert point rather than entry block.
+  entry_block_->fall_through = cur_block->id;
+  cur_block->predecessors->Insert(entry_block_->id);
 
     /* Identify code range in try blocks and set up the empty catch blocks */
   ProcessTryCatchBlocks();
@@ -648,8 +658,8 @@
         // It is a simple nop - treat normally.
         AppendMIR(cur_block, insn);
       } else {
-        DCHECK(cur_block->fall_through == NULL);
-        DCHECK(cur_block->taken == NULL);
+        DCHECK(cur_block->fall_through == NullBasicBlockId);
+        DCHECK(cur_block->taken == NullBasicBlockId);
         // Unreachable instruction, mark for no continuation.
         flags &= ~Instruction::kContinue;
       }
@@ -667,8 +677,8 @@
                                    width, flags, code_ptr, code_end);
     } else if (flags & Instruction::kReturn) {
       cur_block->terminated_by_return = true;
-      cur_block->fall_through = exit_block_;
-      exit_block_->predecessors->Insert(cur_block);
+      cur_block->fall_through = exit_block_->id;
+      exit_block_->predecessors->Insert(cur_block->id);
       /*
        * Terminate the current block if there are instructions
        * afterwards.
@@ -697,13 +707,13 @@
        * instruction is not an unconditional branch, connect them through
        * the fall-through link.
        */
-      DCHECK(cur_block->fall_through == NULL ||
-             cur_block->fall_through == next_block ||
-             cur_block->fall_through == exit_block_);
+      DCHECK(cur_block->fall_through == NullBasicBlockId ||
+             GetBasicBlock(cur_block->fall_through) == next_block ||
+             GetBasicBlock(cur_block->fall_through) == exit_block_);
 
-      if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
-        cur_block->fall_through = next_block;
-        next_block->predecessors->Insert(cur_block);
+      if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) {
+        cur_block->fall_through = next_block->id;
+        next_block->predecessors->Insert(cur_block->id);
       }
       cur_block = next_block;
     }
@@ -735,7 +745,7 @@
   std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
   ReplaceSpecialChars(fname);
   fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
-                      GetEntryBlock()->fall_through->start_offset);
+                      GetBasicBlock(GetEntryBlock()->fall_through)->start_offset);
   file = fopen(fname.c_str(), "w");
   if (file == NULL) {
     return;
@@ -782,31 +792,30 @@
 
     char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
 
-    if (bb->taken) {
+    if (bb->taken != NullBasicBlockId) {
       GetBlockName(bb, block_name1);
-      GetBlockName(bb->taken, block_name2);
+      GetBlockName(GetBasicBlock(bb->taken), block_name2);
       fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
               block_name1, block_name2);
     }
-    if (bb->fall_through) {
+    if (bb->fall_through != NullBasicBlockId) {
       GetBlockName(bb, block_name1);
-      GetBlockName(bb->fall_through, block_name2);
+      GetBlockName(GetBasicBlock(bb->fall_through), block_name2);
       fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
     }
 
-    if (bb->successor_block_list.block_list_type != kNotUsed) {
+    if (bb->successor_block_list_type != kNotUsed) {
       fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
               bb->start_offset, bb->id,
-              (bb->successor_block_list.block_list_type == kCatch) ?
-               "Mrecord" : "record");
-      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+              (bb->successor_block_list_type == kCatch) ?  "Mrecord" : "record");
+      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
       SuccessorBlockInfo *successor_block_info = iterator.Next();
 
       int succ_id = 0;
       while (true) {
         if (successor_block_info == NULL) break;
 
-        BasicBlock *dest_block = successor_block_info->block;
+        BasicBlock *dest_block = GetBasicBlock(successor_block_info->block);
         SuccessorBlockInfo *next_successor_block_info = iterator.Next();
 
         fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
@@ -823,16 +832,16 @@
       fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
               block_name1, bb->start_offset, bb->id);
 
-      if (bb->successor_block_list.block_list_type == kPackedSwitch ||
-          bb->successor_block_list.block_list_type == kSparseSwitch) {
-        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
+      if (bb->successor_block_list_type == kPackedSwitch ||
+          bb->successor_block_list_type == kSparseSwitch) {
+        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks);
 
         succ_id = 0;
         while (true) {
           SuccessorBlockInfo *successor_block_info = iter.Next();
           if (successor_block_info == NULL) break;
 
-          BasicBlock *dest_block = successor_block_info->block;
+          BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
 
           GetBlockName(dest_block, block_name2);
           fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
@@ -848,7 +857,7 @@
       fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
               block_name1, block_name1);
       if (bb->i_dom) {
-        GetBlockName(bb->i_dom, block_name2);
+        GetBlockName(GetBasicBlock(bb->i_dom), block_name2);
         fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
       }
     }
@@ -862,10 +871,9 @@
   if (bb->first_mir_insn == NULL) {
     DCHECK(bb->last_mir_insn == NULL);
     bb->last_mir_insn = bb->first_mir_insn = mir;
-    mir->prev = mir->next = NULL;
+    mir->next = NULL;
   } else {
     bb->last_mir_insn->next = mir;
-    mir->prev = bb->last_mir_insn;
     mir->next = NULL;
     bb->last_mir_insn = mir;
   }
@@ -876,25 +884,19 @@
   if (bb->first_mir_insn == NULL) {
     DCHECK(bb->last_mir_insn == NULL);
     bb->last_mir_insn = bb->first_mir_insn = mir;
-    mir->prev = mir->next = NULL;
+    mir->next = NULL;
   } else {
-    bb->first_mir_insn->prev = mir;
     mir->next = bb->first_mir_insn;
-    mir->prev = NULL;
     bb->first_mir_insn = mir;
   }
 }
 
 /* Insert a MIR instruction after the specified MIR */
 void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) {
-  new_mir->prev = current_mir;
   new_mir->next = current_mir->next;
   current_mir->next = new_mir;
 
-  if (new_mir->next) {
-    /* Is not the last MIR in the block */
-    new_mir->next->prev = new_mir;
-  } else {
+  if (bb->last_mir_insn == current_mir) {
     /* Is the last MIR in the block */
     bb->last_mir_insn = new_mir;
   }
@@ -924,8 +926,9 @@
     opcode = insn.opcode;
   } else if (opcode == kMirOpNop) {
     str.append("[");
-    insn.opcode = mir->meta.original_opcode;
-    opcode = mir->meta.original_opcode;
+    // Recover original opcode.
+    insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode();
+    opcode = insn.opcode;
     nop = true;
   }
 
@@ -938,7 +941,7 @@
   }
 
   if (opcode == kMirOpPhi) {
-    int* incoming = reinterpret_cast<int*>(insn.vB);
+    BasicBlockId* incoming = mir->meta.phi_incoming;
     str.append(StringPrintf(" %s = (%s",
                GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
                GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
@@ -1088,7 +1091,7 @@
 }
 
 const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
-  // FIXME: use current code unit for inline support.
+  // TODO: for inlining support, use current code unit.
   const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
   return cu_->dex_file->GetShorty(method_id.proto_idx_);
 }
@@ -1118,13 +1121,13 @@
         bb->start_offset,
         bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
         bb->last_mir_insn ? "" : " empty");
-    if (bb->taken) {
-      LOG(INFO) << "  Taken branch: block " << bb->taken->id
-                << "(0x" << std::hex << bb->taken->start_offset << ")";
+    if (bb->taken != NullBasicBlockId) {
+      LOG(INFO) << "  Taken branch: block " << bb->taken
+                << "(0x" << std::hex << GetBasicBlock(bb->taken)->start_offset << ")";
     }
-    if (bb->fall_through) {
-      LOG(INFO) << "  Fallthrough : block " << bb->fall_through->id
-                << " (0x" << std::hex << bb->fall_through->start_offset << ")";
+    if (bb->fall_through != NullBasicBlockId) {
+      LOG(INFO) << "  Fallthrough : block " << bb->fall_through
+                << " (0x" << std::hex << GetBasicBlock(bb->fall_through)->start_offset << ")";
     }
   }
 }
@@ -1144,7 +1147,6 @@
     info->result.location = kLocInvalid;
   } else {
     info->result = GetRawDest(move_result_mir);
-    move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode;
     move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
   }
   info->num_arg_words = mir->ssa_rep->num_uses;
@@ -1168,10 +1170,10 @@
   bb->block_type = block_type;
   bb->id = block_id;
   // TUNING: better estimate of the exit block predecessors?
-  bb->predecessors = new (arena_) GrowableArray<BasicBlock*>(arena_,
+  bb->predecessors = new (arena_) GrowableArray<BasicBlockId>(arena_,
                                                              (block_type == kExitBlock) ? 2048 : 2,
                                                              kGrowableArrayPredecessors);
-  bb->successor_block_list.block_list_type = kNotUsed;
+  bb->successor_block_list_type = kNotUsed;
   block_id_map_.Put(block_id, block_id);
   return bb;
 }
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 5d01489..8dda7c4 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -183,6 +183,9 @@
 
 #define BLOCK_NAME_LEN 80
 
+typedef uint16_t BasicBlockId;
+static const BasicBlockId NullBasicBlockId = 0;
+
 /*
  * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
  * it is useful to have compiler-generated temporary registers and have them treated
@@ -190,15 +193,15 @@
  * name of compiler-introduced temporaries.
  */
 struct CompilerTemp {
-  int s_reg;
+  int32_t s_reg;
 };
 
 // When debug option enabled, records effectiveness of null and range check elimination.
 struct Checkstats {
-  int null_checks;
-  int null_checks_eliminated;
-  int range_checks;
-  int range_checks_eliminated;
+  int32_t null_checks;
+  int32_t null_checks_eliminated;
+  int32_t range_checks;
+  int32_t range_checks_eliminated;
 };
 
 // Dataflow attributes of a basic block.
@@ -207,7 +210,7 @@
   ArenaBitVector* def_v;
   ArenaBitVector* live_in_v;
   ArenaBitVector* phi_v;
-  int* vreg_to_ssa_map;
+  int32_t* vreg_to_ssa_map;
   ArenaBitVector* ending_null_check_v;
 };
 
@@ -220,11 +223,11 @@
  * we may want to revisit in the future.
  */
 struct SSARepresentation {
-  int num_uses;
-  int* uses;
+  int16_t num_uses;
+  int16_t num_defs;
+  int32_t* uses;
   bool* fp_use;
-  int num_defs;
-  int* defs;
+  int32_t* defs;
   bool* fp_def;
 };
 
@@ -233,51 +236,53 @@
  * wrapper around a Dalvik byte code.
  */
 struct MIR {
+  /*
+   * TODO: remove embedded DecodedInstruction to save space, keeping only opcode.  Recover
+   * additional fields on as-needed basis.  Question: how to support MIR Pseudo-ops; probably
+   * need to carry aux data pointer.
+   */
   DecodedInstruction dalvikInsn;
-  uint32_t width;                 // NOTE: only need 16 bits for width.
-  unsigned int offset;
-  int m_unit_index;               // From which method was this MIR included
-  MIR* prev;
+  uint16_t width;                 // Note: width can include switch table or fill array data.
+  NarrowDexOffset offset;         // Offset of the instruction in code units.
+  uint16_t optimization_flags;
+  int16_t m_unit_index;           // From which method was this MIR included
   MIR* next;
   SSARepresentation* ssa_rep;
-  int optimization_flags;
   union {
+    // Incoming edges for phi node.
+    BasicBlockId* phi_incoming;
     // Establish link between two halves of throwing instructions.
     MIR* throw_insn;
-    // Saved opcode for NOP'd MIRs
-    Instruction::Code original_opcode;
   } meta;
 };
 
 struct SuccessorBlockInfo;
 
 struct BasicBlock {
-  int id;
-  int dfs_id;
-  bool visited;
-  bool hidden;
-  bool catch_entry;
-  bool explicit_throw;
-  bool conditional_branch;
-  bool terminated_by_return;        // Block ends with a Dalvik return opcode.
-  bool dominates_return;            // Is a member of return extended basic block.
-  uint16_t start_offset;
+  BasicBlockId id;
+  BasicBlockId dfs_id;
+  NarrowDexOffset start_offset;     // Offset in code units.
+  BasicBlockId fall_through;
+  BasicBlockId taken;
+  BasicBlockId i_dom;               // Immediate dominator.
   uint16_t nesting_depth;
-  BBType block_type;
+  BBType block_type:4;
+  BlockListType successor_block_list_type:4;
+  bool visited:1;
+  bool hidden:1;
+  bool catch_entry:1;
+  bool explicit_throw:1;
+  bool conditional_branch:1;
+  bool terminated_by_return:1;        // Block ends with a Dalvik return opcode.
+  bool dominates_return:1;            // Is a member of return extended basic block.
   MIR* first_mir_insn;
   MIR* last_mir_insn;
-  BasicBlock* fall_through;
-  BasicBlock* taken;
-  BasicBlock* i_dom;                // Immediate dominator.
   BasicBlockDataFlow* data_flow_info;
-  GrowableArray<BasicBlock*>* predecessors;
   ArenaBitVector* dominators;
   ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
   ArenaBitVector* dom_frontier;     // Dominance frontier.
-  struct {                          // For one-to-many successors like.
-    BlockListType block_list_type;  // switch and exception handling.
-    GrowableArray<SuccessorBlockInfo*>* blocks;
-  } successor_block_list;
+  GrowableArray<BasicBlockId>* predecessors;
+  GrowableArray<SuccessorBlockInfo*>* successor_blocks;
 };
 
 /*
@@ -285,9 +290,8 @@
  * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For swtich
  * blocks, key is the case value.
  */
-// TODO: make class with placement new.
 struct SuccessorBlockInfo {
-  BasicBlock* block;
+  BasicBlockId block;
   int key;
 };
 
@@ -296,6 +300,15 @@
  * the type of an SSA name (and, can also be used by code generators to record where the
  * value is located (i.e. - physical register, frame, spill, etc.).  For each SSA name (SReg)
  * there is a RegLocation.
+ * A note on SSA names:
+ *   o SSA names for Dalvik vRegs v0..vN will be assigned 0..N.  These represent the "vN_0"
+ *     names.  Negative SSA names represent special values not present in the Dalvik byte code.
+ *     For example, SSA name -1 represents an invalid SSA name, and SSA name -2 represents the
+ *     the Method pointer.  SSA names < -2 are reserved for future use.
+ *   o The vN_0 names for non-argument Dalvik should in practice never be used (as they would
+ *     represent the read of an undefined local variable).  The first definition of the
+ *     underlying Dalvik vReg will result in a vN_1 name.
+ *
  * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation.  With
  * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
  */
@@ -311,9 +324,9 @@
   unsigned home:1;      // Does this represent the home location?
   uint8_t low_reg;      // First physical register.
   uint8_t high_reg;     // 2nd physical register (if wide).
-  int32_t s_reg_low;    // SSA name for low Dalvik word.
-  int32_t orig_sreg;    // TODO: remove after Bitcode gen complete
-                        // and consolodate usage w/ s_reg_low.
+  int16_t s_reg_low;    // SSA name for low Dalvik word.
+  int16_t orig_sreg;    // TODO: remove after Bitcode gen complete
+                        // and consolidate usage w/ s_reg_low.
 };
 
 /*
@@ -334,7 +347,7 @@
   RegLocation target;    // Target of following move_result.
   bool skip_this;
   bool is_range;
-  int offset;            // Dalvik offset.
+  DexOffset offset;      // Offset in code units.
 };
 
 
@@ -361,7 +374,7 @@
                     uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
 
   /* Find existing block */
-  BasicBlock* FindBlock(unsigned int code_offset) {
+  BasicBlock* FindBlock(DexOffset code_offset) {
     return FindBlock(code_offset, false, false, NULL);
   }
 
@@ -394,7 +407,7 @@
   }
 
   BasicBlock* GetBasicBlock(int block_id) const {
-    return block_list_.Get(block_id);
+    return (block_id == NullBasicBlockId) ? NULL : block_list_.Get(block_id);
   }
 
   size_t GetBasicBlockListCount() const {
@@ -405,15 +418,15 @@
     return &block_list_;
   }
 
-  GrowableArray<int>* GetDfsOrder() {
+  GrowableArray<BasicBlockId>* GetDfsOrder() {
     return dfs_order_;
   }
 
-  GrowableArray<int>* GetDfsPostOrder() {
+  GrowableArray<BasicBlockId>* GetDfsPostOrder() {
     return dfs_post_order_;
   }
 
-  GrowableArray<int>* GetDomPostOrder() {
+  GrowableArray<BasicBlockId>* GetDomPostOrder() {
     return dom_post_order_traversal_;
   }
 
@@ -477,6 +490,12 @@
   }
 
   void SetNumSSARegs(int new_num) {
+     /*
+      * TODO: It's theoretically possible to exceed 32767, though any cases which did
+      * would be filtered out with current settings.  When orig_sreg field is removed
+      * from RegLocation, expand s_reg_low to handle all possible cases and remove DCHECK().
+      */
+    DCHECK_EQ(new_num, static_cast<int16_t>(new_num));
     num_ssa_regs_ = new_num;
   }
 
@@ -561,15 +580,16 @@
     return special_case_;
   }
 
-  bool IsBackedge(BasicBlock* branch_bb, BasicBlock* target_bb) {
-    return ((target_bb != NULL) && (target_bb->start_offset <= branch_bb->start_offset));
+  bool IsBackedge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
+    return ((target_bb_id != NullBasicBlockId) &&
+            (GetBasicBlock(target_bb_id)->start_offset <= branch_bb->start_offset));
   }
 
   bool IsBackwardsBranch(BasicBlock* branch_bb) {
     return IsBackedge(branch_bb, branch_bb->taken) || IsBackedge(branch_bb, branch_bb->fall_through);
   }
 
-  void CountBranch(int target_offset) {
+  void CountBranch(DexOffset target_offset) {
     if (target_offset <= current_offset_) {
       backward_branches_++;
     } else {
@@ -640,6 +660,9 @@
   void DumpMIRGraph();
   CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
   BasicBlock* NewMemBB(BBType block_type, int block_id);
+  MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
+  BasicBlock* NextDominatedBlock(BasicBlock* bb);
+  bool LayoutBlocks(BasicBlock* bb);
 
   /*
    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
@@ -668,15 +691,16 @@
   bool InvokeUsesMethodStar(MIR* mir);
   int ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction);
   bool ContentIsInsn(const uint16_t* code_ptr);
-  BasicBlock* SplitBlock(unsigned int code_offset, BasicBlock* orig_block,
+  BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
                          BasicBlock** immed_pred_block_p);
-  BasicBlock* FindBlock(unsigned int code_offset, bool split, bool create,
+  BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
                         BasicBlock** immed_pred_block_p);
   void ProcessTryCatchBlocks();
-  BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+  BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
                                int flags, const uint16_t* code_ptr, const uint16_t* code_end);
-  void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags);
-  BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+  void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
+                        int flags);
+  BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
                               int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
                               const uint16_t* code_end);
   int AddNewSReg(int v_reg);
@@ -732,9 +756,9 @@
   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
   unsigned int num_reachable_blocks_;
-  GrowableArray<int>* dfs_order_;
-  GrowableArray<int>* dfs_post_order_;
-  GrowableArray<int>* dom_post_order_traversal_;
+  GrowableArray<BasicBlockId>* dfs_order_;
+  GrowableArray<BasicBlockId>* dfs_post_order_;
+  GrowableArray<BasicBlockId>* dom_post_order_traversal_;
   int* i_dom_list_;
   ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
   ArenaBitVector* temp_block_v_;
@@ -752,11 +776,11 @@
   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
   std::vector<MIRLocation> method_stack_;        // Include stack
   int current_method_;
-  int current_offset_;                           // Dex offset in code units
+  DexOffset current_offset_;                     // Offset in code units
   int def_count_;                                // Used to estimate size of ssa name storage.
   int* opcode_count_;                            // Dex opcode coverage stats.
   int num_ssa_regs_;                             // Number of names following SSA transformation.
-  std::vector<BasicBlock*> extended_basic_blocks_;  // Heads of block "traces".
+  std::vector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
   int method_sreg_;
   unsigned int attributes_;
   Checkstats* checkstats_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 05e428e..3cd158f 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -103,12 +103,12 @@
 }
 
 /* Advance to next strictly dominated MIR node in an extended basic block */
-static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
+MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
   BasicBlock* bb = *p_bb;
   if (mir != NULL) {
     mir = mir->next;
     if (mir == NULL) {
-      bb = bb->fall_through;
+      bb = GetBasicBlock(bb->fall_through);
       if ((bb == NULL) || Predecessors(bb) != 1) {
         mir = NULL;
       } else {
@@ -147,19 +147,21 @@
   return mir;
 }
 
-static BasicBlock* NextDominatedBlock(BasicBlock* bb) {
+BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
   if (bb->block_type == kDead) {
     return NULL;
   }
   DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
       || (bb->block_type == kExitBlock));
-  if (((bb->taken != NULL) && (bb->fall_through == NULL)) &&
-      ((bb->taken->block_type == kDalvikByteCode) || (bb->taken->block_type == kExitBlock))) {
+  BasicBlock* bb_taken = GetBasicBlock(bb->taken);
+  BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
+  if (((bb_taken != NULL) && (bb_fall_through == NULL)) &&
+      ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
     // Follow simple unconditional branches.
-    bb = bb->taken;
+    bb = bb_taken;
   } else {
     // Follow simple fallthrough
-    bb = (bb->taken != NULL) ? NULL : bb->fall_through;
+    bb = (bb_taken != NULL) ? NULL : bb_fall_through;
   }
   if (bb == NULL || (Predecessors(bb) != 1)) {
     return NULL;
@@ -311,11 +313,13 @@
         case Instruction::IF_GTZ:
         case Instruction::IF_LEZ:
           // If we've got a backwards branch to return, no need to suspend check.
-          if ((IsBackedge(bb, bb->taken) && bb->taken->dominates_return) ||
-              (IsBackedge(bb, bb->fall_through) && bb->fall_through->dominates_return)) {
+          if ((IsBackedge(bb, bb->taken) && GetBasicBlock(bb->taken)->dominates_return) ||
+              (IsBackedge(bb, bb->fall_through) &&
+                          GetBasicBlock(bb->fall_through)->dominates_return)) {
             mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
             if (cu_->verbose) {
-              LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;
+              LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex
+                        << mir->offset;
             }
           }
           break;
@@ -328,15 +332,15 @@
       if (!(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) &&
           ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
           (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
-        BasicBlock* ft = bb->fall_through;
+        BasicBlock* ft = GetBasicBlock(bb->fall_through);
         DCHECK(ft != NULL);
-        BasicBlock* ft_ft = ft->fall_through;
-        BasicBlock* ft_tk = ft->taken;
+        BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
+        BasicBlock* ft_tk = GetBasicBlock(ft->taken);
 
-        BasicBlock* tk = bb->taken;
+        BasicBlock* tk = GetBasicBlock(bb->taken);
         DCHECK(tk != NULL);
-        BasicBlock* tk_ft = tk->fall_through;
-        BasicBlock* tk_tk = tk->taken;
+        BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
+        BasicBlock* tk_tk = GetBasicBlock(tk->taken);
 
         /*
          * In the select pattern, the taken edge goes to a block that unconditionally
@@ -434,7 +438,7 @@
                 int dead_def = if_false->ssa_rep->defs[0];
                 int live_def = if_true->ssa_rep->defs[0];
                 mir->ssa_rep->defs[0] = live_def;
-                int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
+                BasicBlockId* incoming = phi->meta.phi_incoming;
                 for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
                   if (phi->ssa_rep->uses[i] == live_def) {
                     incoming[i] = bb->id;
@@ -449,7 +453,7 @@
                 }
               }
               phi->ssa_rep->num_uses--;
-              bb->taken = NULL;
+              bb->taken = NullBasicBlockId;
               tk->block_type = kDead;
               for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
                 tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
@@ -500,7 +504,7 @@
 }
 
 /* Try to make common case the fallthrough path */
-static bool LayoutBlocks(struct BasicBlock* bb) {
+bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
   // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback
   if (!bb->explicit_throw) {
     return false;
@@ -511,13 +515,13 @@
     if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
       break;
     }
-    BasicBlock* prev = walker->predecessors->Get(0);
+    BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0));
     if (prev->conditional_branch) {
-      if (prev->fall_through == walker) {
+      if (GetBasicBlock(prev->fall_through) == walker) {
         // Already done - return
         break;
       }
-      DCHECK_EQ(walker, prev->taken);
+      DCHECK_EQ(walker, GetBasicBlock(prev->taken));
       // Got one.  Flip it and exit
       Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
       switch (opcode) {
@@ -536,7 +540,7 @@
         default: LOG(FATAL) << "Unexpected opcode " << opcode;
       }
       prev->last_mir_insn->dalvikInsn.opcode = opcode;
-      BasicBlock* t_bb = prev->taken;
+      BasicBlockId t_bb = prev->taken;
       prev->taken = prev->fall_through;
       prev->fall_through = t_bb;
       break;
@@ -556,8 +560,9 @@
         || (bb->block_type == kExceptionHandling)
         || (bb->block_type == kExitBlock)
         || (bb->block_type == kDead)
-        || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
-        || (bb->successor_block_list.block_list_type != kNotUsed)
+        || (bb->taken == NullBasicBlockId)
+        || (GetBasicBlock(bb->taken)->block_type != kExceptionHandling)
+        || (bb->successor_block_list_type != kNotUsed)
         || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
       break;
     }
@@ -578,19 +583,18 @@
       break;
     }
     // OK - got one.  Combine
-    BasicBlock* bb_next = bb->fall_through;
+    BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
     DCHECK(!bb_next->catch_entry);
     DCHECK_EQ(Predecessors(bb_next), 1U);
-    MIR* t_mir = bb->last_mir_insn->prev;
     // Overwrite the kOpCheck insn with the paired opcode
     DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
     *bb->last_mir_insn = *throw_insn;
-    bb->last_mir_insn->prev = t_mir;
     // Use the successor info from the next block
-    bb->successor_block_list = bb_next->successor_block_list;
+    bb->successor_block_list_type = bb_next->successor_block_list_type;
+    bb->successor_blocks = bb_next->successor_blocks;
     // Use the ending block linkage from the next block
     bb->fall_through = bb_next->fall_through;
-    bb->taken->block_type = kDead;  // Kill the unused exception block
+    GetBasicBlock(bb->taken)->block_type = kDead;  // Kill the unused exception block
     bb->taken = bb_next->taken;
     // Include the rest of the instructions
     bb->last_mir_insn = bb_next->last_mir_insn;
@@ -631,20 +635,20 @@
       temp_ssa_register_v_->SetBit(this_reg);
     }
   } else if (bb->predecessors->Size() == 1) {
-    BasicBlock* pred_bb = bb->predecessors->Get(0);
+    BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
     temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
     if (pred_bb->block_type == kDalvikByteCode) {
       // Check to see if predecessor had an explicit null-check.
       MIR* last_insn = pred_bb->last_mir_insn;
       Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
       if (last_opcode == Instruction::IF_EQZ) {
-        if (pred_bb->fall_through == bb) {
+        if (pred_bb->fall_through == bb->id) {
           // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
           // it can't be null.
           temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
         }
       } else if (last_opcode == Instruction::IF_NEZ) {
-        if (pred_bb->taken == bb) {
+        if (pred_bb->taken == bb->id) {
           // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
           // null.
           temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
@@ -653,12 +657,12 @@
     }
   } else {
     // Starting state is intersection of all incoming arcs
-    GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
-    BasicBlock* pred_bb = iter.Next();
+    GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
+    BasicBlock* pred_bb = GetBasicBlock(iter.Next());
     DCHECK(pred_bb != NULL);
     temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
     while (true) {
-      pred_bb = iter.Next();
+      pred_bb = GetBasicBlock(iter.Next());
       if (!pred_bb) break;
       if ((pred_bb->data_flow_info == NULL) ||
           (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
@@ -691,9 +695,9 @@
       } else {
         if (next_mir) {
           LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
-        } else if (bb->fall_through) {
+        } else if (bb->fall_through != NullBasicBlockId) {
           // Look in next basic block
-          struct BasicBlock* next_bb = bb->fall_through;
+          struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through);
           for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
             tmir =tmir->next) {
             if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
@@ -834,7 +838,7 @@
   }
   // Must be head of extended basic block.
   BasicBlock* start_bb = bb;
-  extended_basic_blocks_.push_back(bb);
+  extended_basic_blocks_.push_back(bb->id);
   bool terminated_by_return = false;
   // Visit blocks strictly dominated by this head.
   while (bb != NULL) {
@@ -864,7 +868,7 @@
     }
     // Perform extended basic block optimizations.
     for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
-      BasicBlockOpt(extended_basic_blocks_[i]);
+      BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i]));
     }
   }
   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index df10f7e..963cbeb 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -132,7 +132,7 @@
   ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
 
   ::llvm::SwitchInst* sw =
-    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
+    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through),
                              payload->case_count);
 
   for (uint16_t i = 0; i < payload->case_count; ++i) {
@@ -143,8 +143,8 @@
   ::llvm::MDNode* switch_node =
       ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
   sw->setMetadata("SwitchTable", switch_node);
-  bb->taken = NULL;
-  bb->fall_through = NULL;
+  bb->taken = NullBasicBlockId;
+  bb->fall_through = NullBasicBlockId;
 }
 
 void MirConverter::ConvertSparseSwitch(BasicBlock* bb,
@@ -159,7 +159,7 @@
   ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
 
   ::llvm::SwitchInst* sw =
-    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
+    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through),
                              payload->case_count);
 
   for (size_t i = 0; i < payload->case_count; ++i) {
@@ -170,8 +170,8 @@
   ::llvm::MDNode* switch_node =
       ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
   sw->setMetadata("SwitchTable", switch_node);
-  bb->taken = NULL;
-  bb->fall_through = NULL;
+  bb->taken = NullBasicBlockId;
+  bb->fall_through = NullBasicBlockId;
 }
 
 void MirConverter::ConvertSget(int32_t field_index,
@@ -311,22 +311,22 @@
 
 void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir,
                                     ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2) {
-  if (bb->taken->start_offset <= mir->offset) {
+  if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= mir->offset) {
     EmitSuspendCheck();
   }
   ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
   ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
   ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
   cond_value->setName(StringPrintf("t%d", temp_name_++));
-  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
-                           GetLLVMBlock(bb->fall_through->id));
+  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken),
+                           GetLLVMBlock(bb->fall_through));
   // Don't redo the fallthrough branch in the BB driver
-  bb->fall_through = NULL;
+  bb->fall_through = NullBasicBlockId;
 }
 
 void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb,
                                         MIR* mir, ConditionCode cc, RegLocation rl_src1) {
-  if (bb->taken->start_offset <= mir->offset) {
+  if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= mir->offset) {
     EmitSuspendCheck();
   }
   ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
@@ -337,10 +337,10 @@
     src2 = irb_->getInt32(0);
   }
   ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
-  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
-                           GetLLVMBlock(bb->fall_through->id));
+  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken),
+                           GetLLVMBlock(bb->fall_through));
   // Don't redo the fallthrough branch in the BB driver
-  bb->fall_through = NULL;
+  bb->fall_through = NullBasicBlockId;
 }
 
 ::llvm::Value* MirConverter::GenDivModOp(bool is_div, bool is_long,
@@ -941,10 +941,10 @@
     case Instruction::GOTO:
     case Instruction::GOTO_16:
     case Instruction::GOTO_32: {
-        if (bb->taken->start_offset <= bb->start_offset) {
+        if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= bb->start_offset) {
           EmitSuspendCheck();
         }
-        irb_->CreateBr(GetLLVMBlock(bb->taken->id));
+        irb_->CreateBr(GetLLVMBlock(bb->taken));
       }
       break;
 
@@ -1190,11 +1190,11 @@
        * If it might rethrow, force termination
        * of the following block.
        */
-      if (bb->fall_through == NULL) {
+      if (bb->fall_through == NullBasicBlockId) {
         irb_->CreateUnreachable();
       } else {
-        bb->fall_through->fall_through = NULL;
-        bb->fall_through->taken = NULL;
+        mir_graph_->GetBasicBlock(bb->fall_through)->fall_through = NullBasicBlockId;
+        mir_graph_->GetBasicBlock(bb->fall_through)->taken = NullBasicBlockId;
       }
       break;
 
@@ -1552,7 +1552,7 @@
     if (rl_dest.high_word) {
       continue;  // No Phi node - handled via low word
     }
-    int* incoming = reinterpret_cast<int*>(mir->dalvikInsn.vB);
+    BasicBlockId* incoming = mir->meta.phi_incoming;
     ::llvm::Type* phi_type =
         LlvmTypeFromLocRec(rl_dest);
     ::llvm::PHINode* phi = irb_->CreatePHI(phi_type, mir->ssa_rep->num_uses);
@@ -1597,8 +1597,8 @@
       break;
     }
     case kMirOpNop:
-      if ((mir == bb->last_mir_insn) && (bb->taken == NULL) &&
-          (bb->fall_through == NULL)) {
+      if ((mir == bb->last_mir_insn) && (bb->taken == NullBasicBlockId) &&
+          (bb->fall_through == NullBasicBlockId)) {
         irb_->CreateUnreachable();
       }
       break;
@@ -1718,25 +1718,23 @@
       SSARepresentation* ssa_rep = work_half->ssa_rep;
       work_half->ssa_rep = mir->ssa_rep;
       mir->ssa_rep = ssa_rep;
-      work_half->meta.original_opcode = work_half->dalvikInsn.opcode;
       work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-      if (bb->successor_block_list.block_list_type == kCatch) {
+      if (bb->successor_block_list_type == kCatch) {
         ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
             art::llvm::IntrinsicHelper::CatchTargets);
         ::llvm::Value* switch_key =
             irb_->CreateCall(intr, irb_->getInt32(mir->offset));
-        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
+        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks);
         // New basic block to use for work half
         ::llvm::BasicBlock* work_bb =
             ::llvm::BasicBlock::Create(*context_, "", func_);
         ::llvm::SwitchInst* sw =
-            irb_->CreateSwitch(switch_key, work_bb,
-                                     bb->successor_block_list.blocks->Size());
+            irb_->CreateSwitch(switch_key, work_bb, bb->successor_blocks->Size());
         while (true) {
           SuccessorBlockInfo *successor_block_info = iter.Next();
           if (successor_block_info == NULL) break;
           ::llvm::BasicBlock *target =
-              GetLLVMBlock(successor_block_info->block->id);
+              GetLLVMBlock(successor_block_info->block);
           int type_index = successor_block_info->key;
           sw->addCase(irb_->getInt32(type_index), target);
         }
@@ -1761,9 +1759,9 @@
   }
 
   if (bb->block_type == kEntryBlock) {
-    entry_target_bb_ = GetLLVMBlock(bb->fall_through->id);
-  } else if ((bb->fall_through != NULL) && !bb->terminated_by_return) {
-    irb_->CreateBr(GetLLVMBlock(bb->fall_through->id));
+    entry_target_bb_ = GetLLVMBlock(bb->fall_through);
+  } else if ((bb->fall_through != NullBasicBlockId) && !bb->terminated_by_return) {
+    irb_->CreateBr(GetLLVMBlock(bb->fall_through));
   }
 
   return false;
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 3c646c4..cc40e99 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1031,8 +1031,7 @@
   } else if (LIKELY(!lir->flags.is_nop)) {
     const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
     uint32_t bits = encoder->skeleton;
-    int i;
-    for (i = 0; i < 4; i++) {
+    for (int i = 0; i < 4; i++) {
       uint32_t operand;
       uint32_t value;
       operand = lir->operands[i];
@@ -1088,7 +1087,7 @@
           case kFmtDfp: {
             DCHECK(ARM_DOUBLEREG(operand));
             DCHECK_EQ((operand & 0x1), 0U);
-            int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
+            uint32_t reg_name = (operand & ARM_FP_REG_MASK) >> 1;
             /* Snag the 1-bit slice and position it */
             value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
             /* Extract and position the 4-bit slice */
@@ -1155,9 +1154,9 @@
   LIR* lir;
   LIR* prev_lir;
   int assembler_retries = 0;
-  int starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
+  CodeOffset starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
   data_offset_ = (starting_offset + 0x3) & ~0x3;
-  int offset_adjustment;
+  int32_t offset_adjustment;
   AssignDataOffsets();
 
   /*
@@ -1200,10 +1199,10 @@
            * we revert to a multiple-instruction materialization sequence.
            */
           LIR *lir_target = lir->target;
-          uintptr_t pc = (lir->offset + 4) & ~3;
-          uintptr_t target = lir_target->offset +
+          CodeOffset pc = (lir->offset + 4) & ~3;
+          CodeOffset target = lir_target->offset +
               ((lir_target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int delta = target - pc;
+          int32_t delta = target - pc;
           if (res != kSuccess) {
             /*
              * In this case, we're just estimating and will do it again for real.  Ensure offset
@@ -1281,10 +1280,10 @@
         }
         case kFixupCBxZ: {
           LIR *target_lir = lir->target;
-          uintptr_t pc = lir->offset + 4;
-          uintptr_t target = target_lir->offset +
+          CodeOffset pc = lir->offset + 4;
+          CodeOffset target = target_lir->offset +
               ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int delta = target - pc;
+          int32_t delta = target - pc;
           if (delta > 126 || delta < 0) {
             /*
              * Convert to cmp rx,#0 / b[eq/ne] tgt pair
@@ -1351,10 +1350,10 @@
         }
         case kFixupCondBranch: {
           LIR *target_lir = lir->target;
-          int delta = 0;
+          int32_t delta = 0;
           DCHECK(target_lir);
-          uintptr_t pc = lir->offset + 4;
-          uintptr_t target = target_lir->offset +
+          CodeOffset pc = lir->offset + 4;
+          CodeOffset target = target_lir->offset +
               ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
           delta = target - pc;
           if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
@@ -1370,10 +1369,10 @@
         }
         case kFixupT2Branch: {
           LIR *target_lir = lir->target;
-          uintptr_t pc = lir->offset + 4;
-          uintptr_t target = target_lir->offset +
+          CodeOffset pc = lir->offset + 4;
+          CodeOffset target = target_lir->offset +
               ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int delta = target - pc;
+          int32_t delta = target - pc;
           lir->operands[0] = delta >> 1;
           if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 0) {
             // Useless branch
@@ -1387,10 +1386,10 @@
         }
         case kFixupT1Branch: {
           LIR *target_lir = lir->target;
-          uintptr_t pc = lir->offset + 4;
-          uintptr_t target = target_lir->offset +
+          CodeOffset pc = lir->offset + 4;
+          CodeOffset target = target_lir->offset +
               ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int delta = target - pc;
+          int32_t delta = target - pc;
           if (delta > 2046 || delta < -2048) {
             // Convert to Thumb2BCond w/ kArmCondAl
             offset_adjustment -= lir->flags.size;
@@ -1416,14 +1415,14 @@
         case kFixupBlx1: {
           DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
           /* cur_pc is Thumb */
-          uintptr_t cur_pc = (lir->offset + 4) & ~3;
-          uintptr_t target = lir->operands[1];
+          CodeOffset cur_pc = (lir->offset + 4) & ~3;
+          CodeOffset target = lir->operands[1];
 
           /* Match bit[1] in target with base */
           if (cur_pc & 0x2) {
             target |= 0x2;
           }
-          int delta = target - cur_pc;
+          int32_t delta = target - cur_pc;
           DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
 
           lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1433,10 +1432,10 @@
         case kFixupBl1: {
           DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
           /* Both cur_pc and target are Thumb */
-          uintptr_t cur_pc = lir->offset + 4;
-          uintptr_t target = lir->operands[1];
+          CodeOffset cur_pc = lir->offset + 4;
+          CodeOffset target = lir->operands[1];
 
-          int delta = target - cur_pc;
+          int32_t delta = target - cur_pc;
           DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
 
           lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1444,20 +1443,19 @@
           break;
         }
         case kFixupAdr: {
-          SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+          EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[2]));
           LIR* target = lir->target;
-          int target_disp = (tab_rec != NULL) ?  tab_rec->offset + offset_adjustment
+          int32_t target_disp = (tab_rec != NULL) ?  tab_rec->offset + offset_adjustment
               : target->offset + ((target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int disp = target_disp - ((lir->offset + 4) & ~3);
+          int32_t disp = target_disp - ((lir->offset + 4) & ~3);
           if (disp < 4096) {
             lir->operands[1] = disp;
           } else {
             // convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
             // TUNING: if this case fires often, it can be improved.  Not expected to be common.
             LIR *new_mov16L =
-                RawLIR(lir->dalvik_offset, kThumb2MovImm16LST,
-                       lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
-                       reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+                RawLIR(lir->dalvik_offset, kThumb2MovImm16LST, lir->operands[0], 0,
+                       WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
             new_mov16L->flags.size = EncodingMap[new_mov16L->opcode].size;
             new_mov16L->flags.fixup = kFixupMovImmLST;
             new_mov16L->offset = lir->offset;
@@ -1467,11 +1465,9 @@
             offset_adjustment += new_mov16L->flags.size;
             InsertFixupBefore(prev_lir, lir, new_mov16L);
             prev_lir = new_mov16L;   // Now we've got a new prev.
-
             LIR *new_mov16H =
-                RawLIR(lir->dalvik_offset, kThumb2MovImm16HST,
-                       lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
-                       reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+                RawLIR(lir->dalvik_offset, kThumb2MovImm16HST, lir->operands[0], 0,
+                       WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
             new_mov16H->flags.size = EncodingMap[new_mov16H->opcode].size;
             new_mov16H->flags.fixup = kFixupMovImmHST;
             new_mov16H->offset = lir->offset;
@@ -1499,27 +1495,27 @@
         }
         case kFixupMovImmLST: {
           // operands[1] should hold disp, [2] has add, [3] has tab_rec
-          LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
-          SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+          LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+          EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
           // If tab_rec is null, this is a literal load. Use target
           LIR* target = lir->target;
-          int target_disp = tab_rec ? tab_rec->offset : target->offset;
+          int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
           lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
           break;
         }
         case kFixupMovImmHST: {
           // operands[1] should hold disp, [2] has add, [3] has tab_rec
-          LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
-          SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+          LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+          EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
           // If tab_rec is null, this is a literal load. Use target
           LIR* target = lir->target;
-          int target_disp = tab_rec ? tab_rec->offset : target->offset;
+          int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
           lir->operands[1] =
               ((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
           break;
         }
         case kFixupAlign4: {
-          int required_size = lir->offset & 0x2;
+          int32_t required_size = lir->offset & 0x2;
           if (lir->flags.size != required_size) {
             offset_adjustment += required_size - lir->flags.size;
             lir->flags.size = required_size;
@@ -1647,7 +1643,7 @@
 
 void ArmMir2Lir::AssignDataOffsets() {
   /* Set up offsets for literals */
-  int offset = data_offset_;
+  CodeOffset offset = data_offset_;
 
   offset = AssignLiteralOffset(offset);
 
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 401da2a..51aca85 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -92,7 +92,7 @@
 }
 
 /* Find the next MIR, which may be in a following basic block */
-// TODO: should this be a utility in mir_graph?
+// TODO: make this a utility in mir_graph.
 MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) {
   BasicBlock* bb = *p_bb;
   MIR* orig_mir = mir;
@@ -103,7 +103,7 @@
     if (mir != NULL) {
       return mir;
     } else {
-      bb = bb->fall_through;
+      bb = mir_graph_->GetBasicBlock(bb->fall_through);
       *p_bb = bb;
       if (bb) {
          mir = bb->first_mir_insn;
@@ -128,7 +128,7 @@
 
 MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
                              OpSize size, bool long_or_double, bool is_object) {
-  int field_offset;
+  int32_t field_offset;
   bool is_volatile;
   uint32_t field_idx = mir->dalvikInsn.vC;
   bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -153,7 +153,7 @@
 
 MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
                              OpSize size, bool long_or_double, bool is_object) {
-  int field_offset;
+  int32_t field_offset;
   bool is_volatile;
   uint32_t field_idx = mir->dalvikInsn.vC;
   bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -320,9 +320,9 @@
       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
-  int size = table[1];
+  uint32_t size = table[1];
   tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
-                                                      ArenaAllocator::kAllocLIR));
+                                                     ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
 
   // Get the switch value
@@ -338,7 +338,7 @@
     r_key = tmp;
   }
   // Materialize a pointer to the switch table
-  NewLIR3(kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR3(kThumb2Adr, rBase, 0, WrapPointer(tab_rec));
   // Set up r_idx
   int r_idx = AllocTemp();
   LoadConstant(r_idx, size);
@@ -368,7 +368,7 @@
       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),  ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
-  int size = table[1];
+  uint32_t size = table[1];
   tab_rec->targets =
       static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
@@ -377,7 +377,7 @@
   rl_src = LoadValue(rl_src, kCoreReg);
   int table_base = AllocTemp();
   // Materialize a pointer to the switch table
-  NewLIR3(kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR3(kThumb2Adr, table_base, 0, WrapPointer(tab_rec));
   int low_key = s4FromSwitchData(&table[2]);
   int keyReg;
   // Remove the bias, if necessary
@@ -433,7 +433,7 @@
   LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
                rARM_LR);
   // Materialize a pointer to the fill data image
-  NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, rARM_LR);
   MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index aa5782b..0a3bfc1 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -74,7 +74,6 @@
     uint32_t EncodeRange(LIR* head_lir, LIR* tail_lir, uint32_t starting_offset);
     int AssignInsnOffsets();
     void AssignOffsets();
-    AssemblerStatus AssembleInstructions(uintptr_t start_addr);
     void EncodeLIR(LIR* lir);
     void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
     void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
@@ -120,7 +119,7 @@
     void GenDivZeroCheck(int reg_lo, int reg_hi);
     void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
     void GenExitSequence();
-    void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
     void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
     void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
     void GenSelect(BasicBlock* bb, MIR* mir);
@@ -132,8 +131,8 @@
                                                int first_bit, int second_bit);
     void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
     void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-    void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
-    void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+    void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
     void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
 
     // Required for target - single operation generators.
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 08d6778..480e021 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -176,7 +176,7 @@
 
 void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double) {
-  LIR* target = &block_label_list_[bb->taken->id];
+  LIR* target = &block_label_list_[bb->taken];
   RegLocation rl_src1;
   RegLocation rl_src2;
   if (is_double) {
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index b1772fd..69ea4e9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -122,8 +122,8 @@
   int32_t val_hi = High32Bits(val);
   DCHECK_GE(ModifiedImmediate(val_lo), 0);
   DCHECK_GE(ModifiedImmediate(val_hi), 0);
-  LIR* taken = &block_label_list_[bb->taken->id];
-  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  LIR* taken = &block_label_list_[bb->taken];
+  LIR* not_taken = &block_label_list_[bb->fall_through];
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   int32_t low_reg = rl_src1.low_reg;
   int32_t high_reg = rl_src1.high_reg;
@@ -178,23 +178,6 @@
 void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
   RegLocation rl_result;
   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
-  // Temporary debugging code
-  int dest_sreg = mir->ssa_rep->defs[0];
-  if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
-    LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
-              << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
-    LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
-    LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
-    if (mir->ssa_rep->num_uses == 1) {
-      LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
-    } else {
-      LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
-                << mir->ssa_rep->uses[2];
-    }
-    CHECK(false) << "Invalid target sreg on Select.";
-  }
-  // End temporary debugging code
   RegLocation rl_dest = mir_graph_->GetDest(mir);
   rl_src = LoadValue(rl_src, kCoreReg);
   if (mir->ssa_rep->num_uses == 1) {
@@ -270,8 +253,8 @@
       return;
     }
   }
-  LIR* taken = &block_label_list_[bb->taken->id];
-  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  LIR* taken = &block_label_list_[bb->taken];
+  LIR* not_taken = &block_label_list_[bb->fall_through];
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 933c1a3..3395ae7 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -282,8 +282,8 @@
   return buf;
 }
 
-static int ExpandImmediate(int value) {
-  int mode = (value & 0xf00) >> 8;
+static int32_t ExpandImmediate(int value) {
+  int32_t mode = (value & 0xf00) >> 8;
   uint32_t bits = value & 0xff;
   switch (mode) {
     case 0:
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 00de8de..a2ac6ef 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -22,14 +22,14 @@
 
 /* This file contains codegen for the Thumb ISA. */
 
-static int EncodeImmSingle(int value) {
-  int res;
-  int bit_a =  (value & 0x80000000) >> 31;
-  int not_bit_b = (value & 0x40000000) >> 30;
-  int bit_b =  (value & 0x20000000) >> 29;
-  int b_smear =  (value & 0x3e000000) >> 25;
-  int slice =   (value & 0x01f80000) >> 19;
-  int zeroes =  (value & 0x0007ffff);
+static int32_t EncodeImmSingle(int32_t value) {
+  int32_t res;
+  int32_t bit_a =  (value & 0x80000000) >> 31;
+  int32_t not_bit_b = (value & 0x40000000) >> 30;
+  int32_t bit_b =  (value & 0x20000000) >> 29;
+  int32_t b_smear =  (value & 0x3e000000) >> 25;
+  int32_t slice =   (value & 0x01f80000) >> 19;
+  int32_t zeroes =  (value & 0x0007ffff);
   if (zeroes != 0)
     return -1;
   if (bit_b) {
@@ -47,15 +47,15 @@
  * Determine whether value can be encoded as a Thumb2 floating point
  * immediate.  If not, return -1.  If so return encoded 8-bit value.
  */
-static int EncodeImmDouble(int64_t value) {
-  int res;
-  int bit_a = (value & 0x8000000000000000ll) >> 63;
-  int not_bit_b = (value & 0x4000000000000000ll) >> 62;
-  int bit_b = (value & 0x2000000000000000ll) >> 61;
-  int b_smear = (value & 0x3fc0000000000000ll) >> 54;
-  int slice =  (value & 0x003f000000000000ll) >> 48;
+static int32_t EncodeImmDouble(int64_t value) {
+  int32_t res;
+  int32_t bit_a = (value & 0x8000000000000000ll) >> 63;
+  int32_t not_bit_b = (value & 0x4000000000000000ll) >> 62;
+  int32_t bit_b = (value & 0x2000000000000000ll) >> 61;
+  int32_t b_smear = (value & 0x3fc0000000000000ll) >> 54;
+  int32_t slice =  (value & 0x003f000000000000ll) >> 48;
   uint64_t zeroes = (value & 0x0000ffffffffffffll);
-  if (zeroes != 0)
+  if (zeroes != 0ull)
     return -1;
   if (bit_b) {
     if ((not_bit_b != 0) || (b_smear != 0xff))
@@ -96,8 +96,8 @@
 
 static int LeadingZeros(uint32_t val) {
   uint32_t alt;
-  int n;
-  int count;
+  int32_t n;
+  int32_t count;
 
   count = 16;
   n = 32;
@@ -117,8 +117,8 @@
  * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
  */
 int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
-  int z_leading;
-  int z_trailing;
+  int32_t z_leading;
+  int32_t z_trailing;
   uint32_t b0 = value & 0xff;
 
   /* Note: case of value==0 must use 0:000:0:0000000 encoding */
@@ -421,12 +421,12 @@
 LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
   LIR* res;
   bool neg = (value < 0);
-  int abs_value = (neg) ? -value : value;
+  int32_t abs_value = (neg) ? -value : value;
   ArmOpcode opcode = kThumbBkpt;
   ArmOpcode alt_opcode = kThumbBkpt;
   bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
-  int mod_imm = ModifiedImmediate(value);
-  int mod_imm_neg = ModifiedImmediate(-value);
+  int32_t mod_imm = ModifiedImmediate(value);
+  int32_t mod_imm_neg = ModifiedImmediate(-value);
 
   switch (op) {
     case kOpLsl:
@@ -544,7 +544,7 @@
 /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
 LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
   bool neg = (value < 0);
-  int abs_value = (neg) ? -value : value;
+  int32_t abs_value = (neg) ? -value : value;
   bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 617f357..2ce8f58 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -160,7 +160,8 @@
       break;
     case kPseudoDalvikByteCodeBoundary:
       if (lir->operands[0] == 0) {
-         lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
+         // NOTE: only used for debug listings.
+         lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string"));
       }
       LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
                 << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
@@ -369,6 +370,17 @@
   buf.push_back((data >> 24) & 0xff);
 }
 
+// Push 8 bytes on 64-bit systems; 4 on 32-bit systems.
+static void PushPointer(std::vector<uint8_t>&buf, void const* pointer) {
+  uintptr_t data = reinterpret_cast<uintptr_t>(pointer);
+  if (sizeof(void*) == sizeof(uint64_t)) {
+    PushWord(buf, (data >> (sizeof(void*) * 4)) & 0xFFFFFFFF);
+    PushWord(buf, data & 0xFFFFFFFF);
+  } else {
+    PushWord(buf, data);
+  }
+}
+
 static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
   while (buf.size() < offset) {
     buf.push_back(0);
@@ -395,9 +407,8 @@
                                        static_cast<InvokeType>(data_lir->operands[1]),
                                        code_buffer_.size());
     const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
-    // unique based on target to ensure code deduplication works
-    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
-    PushWord(code_buffer_, unique_patch_value);
+    // unique value based on target to ensure code deduplication works
+    PushPointer(code_buffer_, &id);
     data_lir = NEXT_LIR(data_lir);
   }
   data_lir = method_literal_list_;
@@ -411,9 +422,8 @@
                                          static_cast<InvokeType>(data_lir->operands[1]),
                                          code_buffer_.size());
     const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
-    // unique based on target to ensure code deduplication works
-    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
-    PushWord(code_buffer_, unique_patch_value);
+    // unique value based on target to ensure code deduplication works
+    PushPointer(code_buffer_, &id);
     data_lir = NEXT_LIR(data_lir);
   }
 }
@@ -449,7 +459,7 @@
       LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
     }
     if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
-      const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
+      const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2]));
       for (int elems = 0; elems < tab_rec->table[1]; elems++) {
         int disp = tab_rec->targets[elems]->offset - bx_offset;
         if (cu_->verbose) {
@@ -490,7 +500,7 @@
   }
 }
 
-static int AssignLiteralOffsetCommon(LIR* lir, int offset) {
+static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
   for (; lir != NULL; lir = lir->next) {
     lir->offset = offset;
     offset += 4;
@@ -498,6 +508,17 @@
   return offset;
 }
 
+static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset) {
+  unsigned int element_size = sizeof(void*);
+  // Align to natural pointer size.
+  offset = (offset + (element_size - 1)) & ~(element_size - 1);
+  for (; lir != NULL; lir = lir->next) {
+    lir->offset = offset;
+    offset += element_size;
+  }
+  return offset;
+}
+
 // Make sure we have a code address for every declared catch entry
 bool Mir2Lir::VerifyCatchEntries() {
   bool success = true;
@@ -607,8 +628,8 @@
       table_index = (table_index + 1) % entries_;
     }
     in_use_[table_index] = true;
-    SetNativeOffset(table_index, native_offset);
-    DCHECK_EQ(native_offset, GetNativeOffset(table_index));
+    SetCodeOffset(table_index, native_offset);
+    DCHECK_EQ(native_offset, GetCodeOffset(table_index));
     SetReferences(table_index, references);
   }
 
@@ -617,7 +638,7 @@
     return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
   }
 
-  uint32_t GetNativeOffset(size_t table_index) {
+  uint32_t GetCodeOffset(size_t table_index) {
     uint32_t native_offset = 0;
     size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
     for (size_t i = 0; i < native_offset_width_; i++) {
@@ -626,7 +647,7 @@
     return native_offset;
   }
 
-  void SetNativeOffset(size_t table_index, uint32_t native_offset) {
+  void SetCodeOffset(size_t table_index, uint32_t native_offset) {
     size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
     for (size_t i = 0; i < native_offset_width_; i++) {
       (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
@@ -681,17 +702,17 @@
 }
 
 /* Determine the offset of each literal field */
-int Mir2Lir::AssignLiteralOffset(int offset) {
+int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
   offset = AssignLiteralOffsetCommon(literal_list_, offset);
-  offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
-  offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
+  offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset);
+  offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset);
   return offset;
 }
 
-int Mir2Lir::AssignSwitchTablesOffset(int offset) {
+int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
   GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
   while (true) {
-    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
+    Mir2Lir::SwitchTable* tab_rec = iterator.Next();
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -705,7 +726,7 @@
   return offset;
 }
 
-int Mir2Lir::AssignFillArrayDataOffset(int offset) {
+int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
   GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
   while (true) {
     Mir2Lir::FillArrayData *tab_rec = iterator.Next();
@@ -725,7 +746,7 @@
  * branch table during the assembly phase.  All resource flags
  * are set to prevent code motion.  KeyVal is just there for debugging.
  */
-LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
+LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) {
   LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
   LIR* res = boundary_lir;
   if (cu_->verbose) {
@@ -743,10 +764,10 @@
   return res;
 }
 
-void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
+void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
   const uint16_t* table = tab_rec->table;
-  int base_vaddr = tab_rec->vaddr;
-  const int *targets = reinterpret_cast<const int*>(&table[4]);
+  DexOffset base_vaddr = tab_rec->vaddr;
+  const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]);
   int entries = table[1];
   int low_key = s4FromSwitchData(&table[2]);
   for (int i = 0; i < entries; i++) {
@@ -754,12 +775,12 @@
   }
 }
 
-void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
+void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
   const uint16_t* table = tab_rec->table;
-  int base_vaddr = tab_rec->vaddr;
+  DexOffset base_vaddr = tab_rec->vaddr;
   int entries = table[1];
-  const int* keys = reinterpret_cast<const int*>(&table[2]);
-  const int* targets = &keys[entries];
+  const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+  const int32_t* targets = &keys[entries];
   for (int i = 0; i < entries; i++) {
     tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
   }
@@ -792,8 +813,8 @@
    */
   uint16_t ident = table[0];
   int entries = table[1];
-  const int* keys = reinterpret_cast<const int*>(&table[2]);
-  const int* targets = &keys[entries];
+  const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+  const int32_t* targets = &keys[entries];
   LOG(INFO) <<  "Sparse switch table - ident:0x" << std::hex << ident
             << ", entries: " << std::dec << entries;
   for (int i = 0; i < entries; i++) {
@@ -812,7 +833,7 @@
    * Total size is (4+size*2) 16-bit code units.
    */
   uint16_t ident = table[0];
-  const int* targets = reinterpret_cast<const int*>(&table[4]);
+  const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]);
   int entries = table[1];
   int low_key = s4FromSwitchData(&table[2]);
   LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
@@ -824,8 +845,9 @@
 }
 
 /* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
-void Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
-  NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
+void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+  // NOTE: only used for debug listings.
+  NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
 }
 
 bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
@@ -883,6 +905,7 @@
       intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
       tempreg_info_(arena, 20, kGrowableArrayMisc),
       reginfo_map_(arena, 64, kGrowableArrayMisc),
+      pointer_storage_(arena, 128, kGrowableArrayMisc),
       data_offset_(0),
       total_size_(0),
       block_label_list_(NULL),
@@ -900,6 +923,9 @@
   promotion_map_ = static_cast<PromotionMap*>
       (arena_->Alloc((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
                       sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
+  // Reserve pointer id 0 for NULL.
+  size_t null_idx = WrapPointer(NULL);
+  DCHECK_EQ(null_idx, 0U);
 }
 
 void Mir2Lir::Materialize() {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2670c23..2b3404a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -40,7 +40,7 @@
   barrier->u.m.def_mask = ENCODE_ALL;
 }
 
-// FIXME: need to do some work to split out targets with
+// TODO: need to do some work to split out targets with
 // condition codes and those without
 LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
   DCHECK_NE(cu_->instruction_set, kMips);
@@ -503,7 +503,7 @@
     ResetRegPool();
     ResetDefTracking();
     LIR* lab = suspend_launchpads_.Get(i);
-    LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
+    LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
     current_dalvik_offset_ = lab->operands[1];
     AppendLIR(lab);
     int r_tgt = CallHelperSetup(helper_offset);
@@ -518,12 +518,12 @@
     ResetRegPool();
     ResetDefTracking();
     LIR* lab = intrinsic_launchpads_.Get(i);
-    CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
+    CallInfo* info = reinterpret_cast<CallInfo*>(UnwrapPointer(lab->operands[0]));
     current_dalvik_offset_ = info->offset;
     AppendLIR(lab);
     // NOTE: GenInvoke handles MarkSafepointPC
     GenInvoke(info);
-    LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
+    LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[2]));
     if (resume_lab != NULL) {
       OpUnconditionalBranch(resume_lab);
     }
@@ -1351,7 +1351,7 @@
 }
 
 // Returns the index of the lowest set bit in 'x'.
-static int LowestSetBit(unsigned int x) {
+static int32_t LowestSetBit(uint32_t x) {
   int bit_posn = 0;
   while ((x & 0xf) == 0) {
     bit_posn += 4;
@@ -1752,8 +1752,8 @@
   FlushAllRegs();
   LIR* branch = OpTestSuspend(NULL);
   LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
-  LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
-                       reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
+  LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
+                       current_dalvik_offset_);
   branch->target = target;
   suspend_launchpads_.Insert(target);
 }
@@ -1766,8 +1766,8 @@
   }
   OpTestSuspend(target);
   LIR* launch_pad =
-      RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
-             reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
+      RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
+             current_dalvik_offset_);
   FlushAllRegs();
   OpUnconditionalBranch(launch_pad);
   suspend_launchpads_.Insert(launch_pad);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 0a0cc17..3def7f5 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -908,7 +908,7 @@
     LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
     if (range_check) {
       // Set up a launch pad to allow retry in case of bounds violation */
-      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
       intrinsic_launchpads_.Insert(launch_pad);
       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
       FreeTemp(reg_max);
@@ -919,7 +919,7 @@
       reg_max = AllocTemp();
       LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
       // Set up a launch pad to allow retry in case of bounds violation */
-      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
       intrinsic_launchpads_.Insert(launch_pad);
       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
       FreeTemp(reg_max);
@@ -1085,7 +1085,7 @@
   }
   int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
   GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
-  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
   intrinsic_launchpads_.Insert(launch_pad);
   OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
   // NOTE: not a safepoint
@@ -1095,7 +1095,7 @@
     OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
   }
   LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
-  launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
+  launch_pad->operands[2] = WrapPointer(resume_tgt);
   // Record that we've already inlined & null checked
   info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
   RegLocation rl_return = GetReturn(false);
@@ -1123,7 +1123,7 @@
       LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
   GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
   // TUNING: check if rl_cmp.s_reg_low is already null checked
-  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
   intrinsic_launchpads_.Insert(launch_pad);
   OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
   // NOTE: not a safepoint
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 6bfccfd..ea8b7a6 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -489,12 +489,12 @@
   LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC);
   InsertLIRBefore(lir, curr_pc);
   LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
-  LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0,
-                        reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+  LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0, WrapPointer(anchor), 0, 0,
+                         lir->target);
   InsertLIRBefore(lir, delta_hi);
   InsertLIRBefore(lir, anchor);
-  LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0,
-                        reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+  LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0, WrapPointer(anchor), 0, 0,
+                         lir->target);
   InsertLIRBefore(lir, delta_lo);
   LIR* addu = RawLIR(dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
   InsertLIRBefore(lir, addu);
@@ -512,7 +512,7 @@
  * instruction.  In those cases we will try to substitute a new code
  * sequence or request that the trace be shortened and retried.
  */
-AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) {
+AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
@@ -538,8 +538,8 @@
          * and is found in lir->target.  If operands[3] is non-NULL,
          * then it is a Switch/Data table.
          */
-        int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
-        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+        EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
         int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
         int delta = offset2 - offset1;
         if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
@@ -565,21 +565,21 @@
           res = kRetryAll;
         }
       } else if (lir->opcode == kMipsDeltaLo) {
-        int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
-        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+        EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
         int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
         int delta = offset2 - offset1;
         lir->operands[1] = delta & 0xffff;
       } else if (lir->opcode == kMipsDeltaHi) {
-        int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
-        SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+        int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+        EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
         int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
         int delta = offset2 - offset1;
         lir->operands[1] = (delta >> 16) & 0xffff;
       } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
         LIR *target_lir = lir->target;
-        uintptr_t pc = lir->offset + 4;
-        uintptr_t target = target_lir->offset;
+        CodeOffset pc = lir->offset + 4;
+        CodeOffset target = target_lir->offset;
         int delta = target - pc;
         if (delta & 0x3) {
           LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -592,8 +592,8 @@
         }
       } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
         LIR *target_lir = lir->target;
-        uintptr_t pc = lir->offset + 4;
-        uintptr_t target = target_lir->offset;
+        CodeOffset pc = lir->offset + 4;
+        CodeOffset target = target_lir->offset;
         int delta = target - pc;
         if (delta & 0x3) {
           LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -606,8 +606,8 @@
         }
       } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
         LIR *target_lir = lir->target;
-        uintptr_t pc = lir->offset + 4;
-        uintptr_t target = target_lir->offset;
+        CodeOffset pc = lir->offset + 4;
+        CodeOffset target = target_lir->offset;
         int delta = target - pc;
         if (delta & 0x3) {
           LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -619,8 +619,8 @@
           lir->operands[2] = delta >> 2;
         }
       } else if (lir->opcode == kMipsJal) {
-        uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
-        uintptr_t target = lir->operands[0];
+        CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
+        CodeOffset target = lir->operands[0];
         /* ensure PC-region branch can be used */
         DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
         if (target & 0x3) {
@@ -629,11 +629,11 @@
         lir->operands[0] =  target >> 2;
       } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
         LIR *target_lir = lir->target;
-        uintptr_t target = start_addr + target_lir->offset;
+        CodeOffset target = start_addr + target_lir->offset;
         lir->operands[1] = target >> 16;
       } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
         LIR *target_lir = lir->target;
-        uintptr_t target = start_addr + target_lir->offset;
+        CodeOffset target = start_addr + target_lir->offset;
         lir->operands[2] = lir->operands[2] + target;
       }
     }
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 9a5ca2c..18c8cf8 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -59,14 +59,14 @@
  * done:
  *
  */
-void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
                                   RegLocation rl_src) {
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
-  SwitchTable *tab_rec =
+  SwitchTable* tab_rec =
       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
@@ -101,8 +101,7 @@
   // Remember base label so offsets can be computed later
   tab_rec->anchor = base_label;
   int rBase = AllocTemp();
-  NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
-          reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
   OpRegRegReg(kOpAdd, rEnd, rEnd, rBase);
 
   // Grab switch test value
@@ -138,20 +137,20 @@
  *   jr    r_RA
  * done:
  */
-void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
                                   RegLocation rl_src) {
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
-  SwitchTable *tab_rec =
+  SwitchTable* tab_rec =
       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
   tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
-                                                       ArenaAllocator::kAllocLIR));
+                                                      ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
 
   // Get the switch value
@@ -196,8 +195,7 @@
 
   // Materialize the table base pointer
   int rBase = AllocTemp();
-  NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
-          reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
 
   // Load the displacement from the switch table
   int r_disp = AllocTemp();
@@ -222,10 +220,10 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
+void MipsMir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
-  FillArrayData *tab_rec =
+  FillArrayData* tab_rec =
       reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData),
                                                      ArenaAllocator::kAllocData));
   tab_rec->table = table;
@@ -252,8 +250,7 @@
   LIR* base_label = NewLIR0(kPseudoTargetLabel);
 
   // Materialize a pointer to the fill data image
-  NewLIR4(kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
-          reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR4(kMipsDelta, rMIPS_ARG1, 0, WrapPointer(base_label), WrapPointer(tab_rec));
 
   // And go...
   ClobberCalleeSave();
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 387fef3..0be20e8 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -74,7 +74,7 @@
     void AssembleLIR();
     int AssignInsnOffsets();
     void AssignOffsets();
-    AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+    AssemblerStatus AssembleInstructions(CodeOffset start_addr);
     void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
     void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
     const char* GetTargetInstFmt(int opcode);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 5d9ae33..2ba2c84 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -93,7 +93,7 @@
   } else if ((value < 0) && (value >= -32768)) {
     res = NewLIR3(kMipsAddiu, r_dest, r_ZERO, value);
   } else {
-    res = NewLIR2(kMipsLui, r_dest, value>>16);
+    res = NewLIR2(kMipsLui, r_dest, value >> 16);
     if (value & 0xffff)
       NewLIR3(kMipsOri, r_dest, r_dest, value);
   }
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index f293700..1a30b7a 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -43,7 +43,7 @@
   }
 }
 
-inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
+inline LIR* Mir2Lir::RawLIR(DexOffset dalvik_offset, int opcode, int op0,
                             int op1, int op2, int op3, int op4, LIR* target) {
   LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
   insn->dalvik_offset = dalvik_offset;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 2b26c3d..197e200 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -241,9 +241,9 @@
     case Instruction::GOTO_16:
     case Instruction::GOTO_32:
       if (mir_graph_->IsBackedge(bb, bb->taken)) {
-        GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken->id]);
+        GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
       } else {
-        OpUnconditionalBranch(&label_list[bb->taken->id]);
+        OpUnconditionalBranch(&label_list[bb->taken]);
       }
       break;
 
@@ -272,23 +272,22 @@
     case Instruction::IF_GE:
     case Instruction::IF_GT:
     case Instruction::IF_LE: {
-      LIR* taken = &label_list[bb->taken->id];
-      LIR* fall_through = &label_list[bb->fall_through->id];
+      LIR* taken = &label_list[bb->taken];
+      LIR* fall_through = &label_list[bb->fall_through];
       // Result known at compile time?
       if (rl_src[0].is_const && rl_src[1].is_const) {
         bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
                                        mir_graph_->ConstantValue(rl_src[1].orig_sreg));
-        BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
-        if (mir_graph_->IsBackedge(bb, target)) {
+        BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+        if (mir_graph_->IsBackedge(bb, target_id)) {
           GenSuspendTest(opt_flags);
         }
-        OpUnconditionalBranch(&label_list[target->id]);
+        OpUnconditionalBranch(&label_list[target_id]);
       } else {
         if (mir_graph_->IsBackwardsBranch(bb)) {
           GenSuspendTest(opt_flags);
         }
-        GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken,
-                                fall_through);
+        GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
       }
       break;
       }
@@ -299,16 +298,16 @@
     case Instruction::IF_GEZ:
     case Instruction::IF_GTZ:
     case Instruction::IF_LEZ: {
-      LIR* taken = &label_list[bb->taken->id];
-      LIR* fall_through = &label_list[bb->fall_through->id];
+      LIR* taken = &label_list[bb->taken];
+      LIR* fall_through = &label_list[bb->fall_through];
       // Result known at compile time?
       if (rl_src[0].is_const) {
         bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
-        BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
-        if (mir_graph_->IsBackedge(bb, target)) {
+        BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+        if (mir_graph_->IsBackedge(bb, target_id)) {
           GenSuspendTest(opt_flags);
         }
-        OpUnconditionalBranch(&label_list[target->id]);
+        OpUnconditionalBranch(&label_list[target_id]);
       } else {
         if (mir_graph_->IsBackwardsBranch(bb)) {
           GenSuspendTest(opt_flags);
@@ -831,8 +830,9 @@
   while (curr_bb != NULL) {
     MethodBlockCodeGen(curr_bb);
     // If the fall_through block is no longer laid out consecutively, drop in a branch.
-    if ((curr_bb->fall_through != NULL) && (curr_bb->fall_through != next_bb)) {
-      OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through->id]);
+    BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
+    if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+      OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
     }
     curr_bb = next_bb;
     do {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 5df2672..d629b44 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -30,6 +30,14 @@
 
 namespace art {
 
+/*
+ * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to
+ * add type safety (see runtime/offsets.h).
+ */
+typedef uint32_t DexOffset;          // Dex offset in code units.
+typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
+typedef uint32_t CodeOffset;         // Native code offset in bytes.
+
 // Set to 1 to measure cost of suspend check.
 #define NO_SUSPEND 0
 
@@ -119,8 +127,8 @@
 };
 
 struct LIR {
-  int offset;               // Offset of this instruction.
-  uint16_t dalvik_offset;   // Offset of Dalvik opcode in code units (16-bit words).
+  CodeOffset offset;             // Offset of this instruction.
+  NarrowDexOffset dalvik_offset;   // Offset of Dalvik opcode in code units (16-bit words).
   int16_t opcode;
   LIR* next;
   LIR* prev;
@@ -134,10 +142,10 @@
     unsigned int fixup:8;        // Fixup kind.
   } flags;
   union {
-    UseDefMasks m;          // Use & Def masks used during optimization.
-    AssemblyInfo a;         // Instruction encoding used during assembly phase.
+    UseDefMasks m;               // Use & Def masks used during optimization.
+    AssemblyInfo a;              // Instruction encoding used during assembly phase.
   } u;
-  int operands[5];          // [0..4] = [dest, src1, src2, extra, extra2].
+  int32_t operands[5];           // [0..4] = [dest, src1, src2, extra, extra2].
 };
 
 // Target-specific initialization.
@@ -184,19 +192,23 @@
 
 class Mir2Lir : public Backend {
   public:
-    struct SwitchTable {
-      int offset;
-      const uint16_t* table;      // Original dex table.
-      int vaddr;                  // Dalvik offset of switch opcode.
-      LIR* anchor;                // Reference instruction for relative offsets.
-      LIR** targets;              // Array of case targets.
+    /*
+     * Auxiliary information describing the location of data embedded in the Dalvik
+     * byte code stream.
+     */
+    struct EmbeddedData {
+      CodeOffset offset;        // Code offset of data block.
+      const uint16_t* table;      // Original dex data.
+      DexOffset vaddr;            // Dalvik offset of parent opcode.
     };
 
-    struct FillArrayData {
-      int offset;
-      const uint16_t* table;      // Original dex table.
-      int size;
-      int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
+    struct FillArrayData : EmbeddedData {
+      int32_t size;
+    };
+
+    struct SwitchTable : EmbeddedData {
+      LIR* anchor;                // Reference instruction for relative offsets.
+      LIR** targets;              // Array of case targets.
     };
 
     /* Static register use counts */
@@ -260,6 +272,34 @@
       return (opcode < 0);
     }
 
+    /*
+     * LIR operands are 32-bit integers.  Sometimes, (especially for managing
+     * instructions which require PC-relative fixups), we need the operands to carry
+     * pointers.  To do this, we assign these pointers an index in pointer_storage_, and
+     * hold that index in the operand array.
+     * TUNING: If use of these utilities becomes more common on 32-bit builds, it
+     * may be worth conditionally-compiling a set of identity functions here.
+     */
+    uint32_t WrapPointer(void* pointer) {
+      uint32_t res = pointer_storage_.Size();
+      pointer_storage_.Insert(pointer);
+      return res;
+    }
+
+    void* UnwrapPointer(size_t index) {
+      return pointer_storage_.Get(index);
+    }
+
+    // strdup(), but allocates from the arena.
+    char* ArenaStrdup(const char* str) {
+      size_t len = strlen(str) + 1;
+      char* res = reinterpret_cast<char*>(arena_->Alloc(len, ArenaAllocator::kAllocMisc));
+      if (res != NULL) {
+        strncpy(res, str, len);
+      }
+      return res;
+    }
+
     // Shared by all targets - implemented in codegen_util.cc
     void AppendLIR(LIR* lir);
     void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
@@ -277,7 +317,7 @@
     void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
     void DumpPromotionMap();
     void CodegenDump();
-    LIR* RawLIR(int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
+    LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
                 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
     LIR* NewLIR0(int opcode);
     LIR* NewLIR1(int opcode, int dest);
@@ -292,7 +332,7 @@
     void ProcessSwitchTables();
     void DumpSparseSwitchTable(const uint16_t* table);
     void DumpPackedSwitchTable(const uint16_t* table);
-    void MarkBoundary(int offset, const char* inst_str);
+    void MarkBoundary(DexOffset offset, const char* inst_str);
     void NopLIR(LIR* lir);
     void UnlinkLIR(LIR* lir);
     bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
@@ -307,12 +347,12 @@
     bool VerifyCatchEntries();
     void CreateMappingTables();
     void CreateNativeGcMap();
-    int AssignLiteralOffset(int offset);
-    int AssignSwitchTablesOffset(int offset);
-    int AssignFillArrayDataOffset(int offset);
-    LIR* InsertCaseLabel(int vaddr, int keyVal);
-    void MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec);
-    void MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec);
+    int AssignLiteralOffset(CodeOffset offset);
+    int AssignSwitchTablesOffset(CodeOffset offset);
+    int AssignFillArrayDataOffset(CodeOffset offset);
+    LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
+    void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
+    void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
 
     // Shared by all targets - implemented in local_optimizations.cc
     void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src);
@@ -642,7 +682,7 @@
     virtual void GenEntrySequence(RegLocation* ArgLocs,
                                   RegLocation rl_method) = 0;
     virtual void GenExitSequence() = 0;
-    virtual void GenFillArrayData(uint32_t table_offset,
+    virtual void GenFillArrayData(DexOffset table_offset,
                                   RegLocation rl_src) = 0;
     virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double) = 0;
@@ -655,9 +695,9 @@
                                                int second_bit) = 0;
     virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
     virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset,
+    virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset,
                                  RegLocation rl_src) = 0;
-    virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset,
+    virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset,
                                  RegLocation rl_src) = 0;
     virtual void GenSpecialCase(BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case) = 0;
@@ -672,13 +712,10 @@
 
     // Required for target - single operation generators.
     virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
-    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2,
-                             LIR* target) = 0;
-    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
-                                LIR* target) = 0;
+    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) = 0;
+    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target) = 0;
     virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
-    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg,
-                                LIR* target) = 0;
+    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) = 0;
     virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0;
     virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
     virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0;
@@ -690,16 +727,13 @@
     virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0;
     virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0;
     virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0;
-    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1,
-                             int r_src2) = 0;
+    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) = 0;
     virtual LIR* OpTestSuspend(LIR* target) = 0;
     virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0;
     virtual LIR* OpVldm(int rBase, int count) = 0;
     virtual LIR* OpVstm(int rBase, int count) = 0;
-    virtual void OpLea(int rBase, int reg1, int reg2, int scale,
-                       int offset) = 0;
-    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
-                               int src_hi) = 0;
+    virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset) = 0;
+    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi) = 0;
     virtual void OpTlsCmp(ThreadOffset offset, int val) = 0;
     virtual bool InexpensiveConstantInt(int32_t value) = 0;
     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
@@ -752,6 +786,7 @@
     GrowableArray<LIR*> intrinsic_launchpads_;
     GrowableArray<RegisterInfo*> tempreg_info_;
     GrowableArray<RegisterInfo*> reginfo_map_;
+    GrowableArray<void*> pointer_storage_;
     /*
      * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
      * Native PC is on the return address of the safepointed operation.  Dex PC is for
@@ -763,9 +798,9 @@
      * immediately preceed the instruction.
      */
     std::vector<uint32_t> dex2pc_mapping_table_;
-    int current_code_offset_;             // Working byte offset of machine instructons.
-    int data_offset_;                     // starting offset of literal pool.
-    int total_size_;                      // header + code size.
+    CodeOffset current_code_offset_;    // Working byte offset of machine instructons.
+    CodeOffset data_offset_;            // starting offset of literal pool.
+    size_t total_size_;                   // header + code size.
     LIR* block_label_list_;
     PromotionMap* promotion_map_;
     /*
@@ -777,8 +812,8 @@
      * in the CompilationUnit struct before codegen for each instruction.
      * The low-level LIR creation utilites will pull it from here.  Rework this.
      */
-    int current_dalvik_offset_;
-    int estimated_native_code_size_;     // Just an estimate; used to reserve code_buffer_ size.
+    DexOffset current_dalvik_offset_;
+    size_t estimated_native_code_size_;     // Just an estimate; used to reserve code_buffer_ size.
     RegisterPool* reg_pool_;
     /*
      * Sanity checking for the register temp tracking.  The same ssa
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 7927ff9..41a57af 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -66,10 +66,9 @@
   LOG(INFO) << "================================================";
   for (int i = 0; i < num_regs; i++) {
     LOG(INFO) << StringPrintf(
-        "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
+        "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d",
         p[i].reg, p[i].is_temp, p[i].in_use, p[i].pair, p[i].partner,
-        p[i].live, p[i].dirty, p[i].s_reg, reinterpret_cast<uintptr_t>(p[i].def_start),
-        reinterpret_cast<uintptr_t>(p[i].def_end));
+        p[i].live, p[i].dirty, p[i].s_reg);
   }
   LOG(INFO) << "================================================";
 }
@@ -769,9 +768,9 @@
 
 RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
   DCHECK(loc.wide);
-  int new_regs;
-  int low_reg;
-  int high_reg;
+  int32_t new_regs;
+  int32_t low_reg;
+  int32_t high_reg;
 
   loc = UpdateLocWide(loc);
 
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 064ff31..fb8e75f 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1090,11 +1090,13 @@
                       int base_or_table, uint8_t index, int scale, int table_or_disp) {
   int disp;
   if (entry->opcode == kX86PcRelLoadRA) {
-    Mir2Lir::SwitchTable *tab_rec = reinterpret_cast<Mir2Lir::SwitchTable*>(table_or_disp);
+    Mir2Lir::EmbeddedData *tab_rec =
+        reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(table_or_disp));
     disp = tab_rec->offset;
   } else {
     DCHECK(entry->opcode == kX86PcRelAdr);
-    Mir2Lir::FillArrayData *tab_rec = reinterpret_cast<Mir2Lir::FillArrayData*>(base_or_table);
+    Mir2Lir::EmbeddedData *tab_rec =
+        reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(base_or_table));
     disp = tab_rec->offset;
   }
   if (entry->skeleton.prefix1 != 0) {
@@ -1161,7 +1163,7 @@
  * instruction.  In those cases we will try to substitute a new code
  * sequence or request that the trace be shortened and retried.
  */
-AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) {
+AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
@@ -1181,13 +1183,13 @@
           LIR *target_lir = lir->target;
           DCHECK(target_lir != NULL);
           int delta = 0;
-          uintptr_t pc;
+          CodeOffset pc;
           if (IS_SIMM8(lir->operands[0])) {
             pc = lir->offset + 2 /* opcode + rel8 */;
           } else {
             pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
           }
-          uintptr_t target = target_lir->offset;
+          CodeOffset target = target_lir->offset;
           delta = target - pc;
           if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
             if (kVerbosePcFixup) {
@@ -1211,8 +1213,8 @@
         case kX86Jcc32: {
           LIR *target_lir = lir->target;
           DCHECK(target_lir != NULL);
-          uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
-          uintptr_t target = target_lir->offset;
+          CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+          CodeOffset target = target_lir->offset;
           int delta = target - pc;
           if (kVerbosePcFixup) {
             LOG(INFO) << "Source:";
@@ -1228,13 +1230,13 @@
           LIR *target_lir = lir->target;
           DCHECK(target_lir != NULL);
           int delta = 0;
-          uintptr_t pc;
+          CodeOffset pc;
           if (IS_SIMM8(lir->operands[0])) {
             pc = lir->offset + 2 /* opcode + rel8 */;
           } else {
             pc = lir->offset + 5 /* opcode + rel32 */;
           }
-          uintptr_t target = target_lir->offset;
+          CodeOffset target = target_lir->offset;
           delta = target - pc;
           if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
             // Useless branch
@@ -1257,8 +1259,8 @@
         case kX86Jmp32: {
           LIR *target_lir = lir->target;
           DCHECK(target_lir != NULL);
-          uintptr_t pc = lir->offset + 5 /* opcode + rel32 */;
-          uintptr_t target = target_lir->offset;
+          CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
+          CodeOffset target = target_lir->offset;
           int delta = target - pc;
           lir->operands[0] = delta;
           break;
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 7fad6f0..17924b0 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -31,15 +31,15 @@
  * The sparse table in the literal pool is an array of <key,displacement>
  * pairs.
  */
-void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
                                  RegLocation rl_src) {
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
   int entries = table[1];
-  const int* keys = reinterpret_cast<const int*>(&table[2]);
-  const int* targets = &keys[entries];
+  const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+  const int32_t* targets = &keys[entries];
   rl_src = LoadValue(rl_src, kCoreReg);
   for (int i = 0; i < entries; i++) {
     int key = keys[i];
@@ -66,15 +66,15 @@
  * jmp  r_start_of_method
  * done:
  */
-void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
                                  RegLocation rl_src) {
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
-  SwitchTable *tab_rec =
-      static_cast<SwitchTable *>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+  SwitchTable* tab_rec =
+      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
@@ -103,8 +103,7 @@
 
   // Load the displacement from the switch table
   int disp_reg = AllocTemp();
-  NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
-          reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2, WrapPointer(tab_rec));
   // Add displacement to start of method
   OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
   // ..and go!
@@ -126,10 +125,10 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
+void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
-  FillArrayData *tab_rec =
+  FillArrayData* tab_rec =
       static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
@@ -144,7 +143,7 @@
   LoadValueDirectFixed(rl_src, rX86_ARG0);
   // Materialize a pointer to the fill data image
   NewLIR1(kX86StartOfMethod, rX86_ARG2);
-  NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR2(kX86PcRelAdr, rX86_ARG1, WrapPointer(tab_rec));
   NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
   CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0,
                           rX86_ARG1, true);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index c266e39..b1d95ff 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -74,7 +74,7 @@
     void AssembleLIR();
     int AssignInsnOffsets();
     void AssignOffsets();
-    AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+    AssemblerStatus AssembleInstructions(CodeOffset start_addr);
     void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
     void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
     const char* GetTargetInstFmt(int opcode);
@@ -119,7 +119,7 @@
     void GenDivZeroCheck(int reg_lo, int reg_hi);
     void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
     void GenExitSequence();
-    void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
     void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
     void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
     void GenSelect(BasicBlock* bb, MIR* mir);
@@ -129,8 +129,8 @@
                                                int lit, int first_bit, int second_bit);
     void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
     void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-    void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
-    void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+    void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
     void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
 
     // Single operation generators.
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f736b5e..c9d6bfc 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -284,8 +284,8 @@
 
 void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double) {
-  LIR* taken = &block_label_list_[bb->taken->id];
-  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  LIR* taken = &block_label_list_[bb->taken];
+  LIR* not_taken = &block_label_list_[bb->fall_through];
   LIR* branch = NULL;
   RegLocation rl_src1;
   RegLocation rl_src2;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 14f5348..324d975 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -166,7 +166,7 @@
 }
 
 void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
-  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* taken = &block_label_list_[bb->taken];
   RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
   RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
   FlushAllRegs();
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 0f005da..901ac9e 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -223,7 +223,7 @@
             buf += StringPrintf("%d", operand);
             break;
           case 'p': {
-            SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
+            EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
             buf += StringPrintf("0x%08x", tab_rec->offset);
             break;
           }
@@ -238,7 +238,7 @@
             break;
           case 't':
             buf += StringPrintf("0x%08x (L%p)",
-                                reinterpret_cast<uint32_t>(base_addr)
+                                reinterpret_cast<uintptr_t>(base_addr)
                                 + lir->offset + operand, lir->target);
             break;
           default:
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 0ca5fd4..eb0d412 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -38,18 +38,18 @@
 }
 
 BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
-  BasicBlock* res = NeedsVisit(bb->fall_through);
+  BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
   if (res == NULL) {
-    res = NeedsVisit(bb->taken);
+    res = NeedsVisit(GetBasicBlock(bb->taken));
     if (res == NULL) {
-      if (bb->successor_block_list.block_list_type != kNotUsed) {
-        GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+      if (bb->successor_block_list_type != kNotUsed) {
+        GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
         while (true) {
           SuccessorBlockInfo *sbi = iterator.Next();
           if (sbi == NULL) {
             break;
           }
-          res = NeedsVisit(sbi->block);
+          res = NeedsVisit(GetBasicBlock(sbi->block));
           if (res != NULL) {
             break;
           }
@@ -63,7 +63,9 @@
 void MIRGraph::MarkPreOrder(BasicBlock* block) {
   block->visited = true;
   /* Enqueue the pre_order block id */
-  dfs_order_->Insert(block->id);
+  if (block->id != NullBasicBlockId) {
+    dfs_order_->Insert(block->id);
+  }
 }
 
 void MIRGraph::RecordDFSOrders(BasicBlock* block) {
@@ -79,7 +81,9 @@
       continue;
     }
     curr->dfs_id = dfs_post_order_->Size();
-    dfs_post_order_->Insert(curr->id);
+    if (curr->id != NullBasicBlockId) {
+      dfs_post_order_->Insert(curr->id);
+    }
     succ.pop_back();
   }
 }
@@ -88,7 +92,8 @@
 void MIRGraph::ComputeDFSOrders() {
   /* Initialize or reset the DFS pre_order list */
   if (dfs_order_ == NULL) {
-    dfs_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsOrder);
+    dfs_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
+                                                          kGrowableArrayDfsOrder);
   } else {
     /* Just reset the used length on the counter */
     dfs_order_->Reset();
@@ -96,7 +101,8 @@
 
   /* Initialize or reset the DFS post_order list */
   if (dfs_post_order_ == NULL) {
-    dfs_post_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsPostOrder);
+    dfs_post_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
+                                                               kGrowableArrayDfsPostOrder);
   } else {
     /* Just reset the used length on the counter */
     dfs_post_order_->Reset();
@@ -169,7 +175,7 @@
   if (dom_post_order_traversal_ == NULL) {
     // First time - create the array.
     dom_post_order_traversal_ =
-        new (arena_) GrowableArray<int>(arena_, num_reachable_blocks_,
+        new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
                                         kGrowableArrayDomPostOrderTraversal);
   } else {
     dom_post_order_traversal_->Reset();
@@ -193,11 +199,13 @@
           std::make_pair(new_bb, new (arena_) ArenaBitVector::Iterator(new_bb->i_dominated)));
     } else {
       // no successor/next
-      dom_post_order_traversal_->Insert(curr_bb->id);
+      if (curr_bb->id != NullBasicBlockId) {
+        dom_post_order_traversal_->Insert(curr_bb->id);
+      }
       work_stack.pop_back();
 
       /* hacky loop detection */
-      if (curr_bb->taken && curr_bb->dominators->IsBitSet(curr_bb->taken->id)) {
+      if ((curr_bb->taken != NullBasicBlockId) && curr_bb->dominators->IsBitSet(curr_bb->taken)) {
         attributes_ |= METHOD_HAS_LOOP;
       }
     }
@@ -210,7 +218,7 @@
    * TODO - evaluate whether phi will ever need to be inserted into exit
    * blocks.
    */
-  if (succ_bb->i_dom != dom_bb &&
+  if (succ_bb->i_dom != dom_bb->id &&
     succ_bb->block_type == kDalvikByteCode &&
     succ_bb->hidden == false) {
     dom_bb->dom_frontier->SetBit(succ_bb->id);
@@ -220,20 +228,20 @@
 /* Worker function to compute the dominance frontier */
 bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
   /* Calculate DF_local */
-  if (bb->taken) {
-    CheckForDominanceFrontier(bb, bb->taken);
+  if (bb->taken != NullBasicBlockId) {
+    CheckForDominanceFrontier(bb, GetBasicBlock(bb->taken));
   }
-  if (bb->fall_through) {
-    CheckForDominanceFrontier(bb, bb->fall_through);
+  if (bb->fall_through != NullBasicBlockId) {
+    CheckForDominanceFrontier(bb, GetBasicBlock(bb->fall_through));
   }
-  if (bb->successor_block_list.block_list_type != kNotUsed) {
-    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+  if (bb->successor_block_list_type != kNotUsed) {
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
       while (true) {
         SuccessorBlockInfo *successor_block_info = iterator.Next();
         if (successor_block_info == NULL) {
           break;
         }
-        BasicBlock* succ_bb = successor_block_info->block;
+        BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
         CheckForDominanceFrontier(bb, succ_bb);
       }
   }
@@ -306,17 +314,17 @@
 /* Worker function to compute each block's immediate dominator */
 bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
   /* Special-case entry block */
-  if (bb == GetEntryBlock()) {
+  if ((bb->id == NullBasicBlockId) || (bb == GetEntryBlock())) {
     return false;
   }
 
   /* Iterate through the predecessors */
-  GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+  GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
 
   /* Find the first processed predecessor */
   int idom = -1;
   while (true) {
-    BasicBlock* pred_bb = iter.Next();
+    BasicBlock* pred_bb = GetBasicBlock(iter.Next());
     CHECK(pred_bb != NULL);
     if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
       idom = pred_bb->dfs_id;
@@ -326,7 +334,7 @@
 
   /* Scan the rest of the predecessors */
   while (true) {
-      BasicBlock* pred_bb = iter.Next();
+      BasicBlock* pred_bb = GetBasicBlock(iter.Next());
       if (!pred_bb) {
         break;
       }
@@ -352,7 +360,7 @@
   if (bb == GetEntryBlock()) {
     bb->dominators->ClearAllBits();
   } else {
-    bb->dominators->Copy(bb->i_dom->dominators);
+    bb->dominators->Copy(GetBasicBlock(bb->i_dom)->dominators);
   }
   bb->dominators->SetBit(bb->id);
   return false;
@@ -364,7 +372,7 @@
     DCHECK_NE(idom_dfs_idx, NOTVISITED);
     int i_dom_idx = dfs_post_order_->Get(idom_dfs_idx);
     BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
-    bb->i_dom = i_dom;
+    bb->i_dom = i_dom->id;
     /* Add bb to the i_dominated set of the immediate dominator block */
     i_dom->i_dominated->SetBit(bb->id);
   }
@@ -412,7 +420,7 @@
   } else {
     temp_block_v_->ClearAllBits();
   }
-  GetEntryBlock()->i_dom = NULL;
+  GetEntryBlock()->i_dom = 0;
 
   PreOrderDfsIterator iter3(this);
   for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
@@ -463,20 +471,22 @@
     return false;
   }
   temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
-  if (bb->taken && bb->taken->data_flow_info)
-    ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
+  BasicBlock* bb_taken = GetBasicBlock(bb->taken);
+  BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
+  if (bb_taken && bb_taken->data_flow_info)
+    ComputeSuccLineIn(temp_dalvik_register_v, bb_taken->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
-  if (bb->fall_through && bb->fall_through->data_flow_info)
-    ComputeSuccLineIn(temp_dalvik_register_v, bb->fall_through->data_flow_info->live_in_v,
+  if (bb_fall_through && bb_fall_through->data_flow_info)
+    ComputeSuccLineIn(temp_dalvik_register_v, bb_fall_through->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
-  if (bb->successor_block_list.block_list_type != kNotUsed) {
-    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+  if (bb->successor_block_list_type != kNotUsed) {
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
     while (true) {
       SuccessorBlockInfo *successor_block_info = iterator.Next();
       if (successor_block_info == NULL) {
         break;
       }
-      BasicBlock* succ_bb = successor_block_info->block;
+      BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
       if (succ_bb->data_flow_info) {
         ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
                           bb->data_flow_info->def_v);
@@ -579,50 +589,37 @@
  * predecessor blocks
  */
 bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
-  MIR *mir;
-  std::vector<int> uses;
-  std::vector<int> incoming_arc;
-
   /* Phi nodes are at the beginning of each block */
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
       return true;
     int ssa_reg = mir->ssa_rep->defs[0];
     DCHECK_GE(ssa_reg, 0);   // Shouldn't see compiler temps here
     int v_reg = SRegToVReg(ssa_reg);
 
-    uses.clear();
-    incoming_arc.clear();
-
     /* Iterate through the predecessors */
-    GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+    GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
+    size_t num_uses = bb->predecessors->Size();
+    mir->ssa_rep->num_uses = num_uses;
+    int* uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+                                                ArenaAllocator::kAllocDFInfo));
+    mir->ssa_rep->uses = uses;
+    mir->ssa_rep->fp_use =
+        static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, ArenaAllocator::kAllocDFInfo));
+    BasicBlockId* incoming =
+        static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
+                                                 ArenaAllocator::kAllocDFInfo));
+    mir->meta.phi_incoming = incoming;
+    int idx = 0;
     while (true) {
-      BasicBlock* pred_bb = iter.Next();
+      BasicBlock* pred_bb = GetBasicBlock(iter.Next());
       if (!pred_bb) {
         break;
       }
       int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
-      uses.push_back(ssa_reg);
-      incoming_arc.push_back(pred_bb->id);
-    }
-
-    /* Count the number of SSA registers for a Dalvik register */
-    int num_uses = uses.size();
-    mir->ssa_rep->num_uses = num_uses;
-    mir->ssa_rep->uses =
-        static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
-    mir->ssa_rep->fp_use =
-        static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, ArenaAllocator::kAllocDFInfo));
-    int* incoming =
-        static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
-    // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
-    mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
-
-    /* Set the uses array for the phi node */
-    int *use_ptr = mir->ssa_rep->uses;
-    for (int i = 0; i < num_uses; i++) {
-      *use_ptr++ = uses[i];
-      *incoming++ = incoming_arc[i];
+      uses[idx] = ssa_reg;
+      incoming[idx] = pred_bb->id;
+      idx++;
     }
   }
 
@@ -644,24 +641,24 @@
       static_cast<int*>(arena_->Alloc(map_size, ArenaAllocator::kAllocDalvikToSSAMap));
   memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
 
-  if (block->fall_through) {
-    DoDFSPreOrderSSARename(block->fall_through);
+  if (block->fall_through != NullBasicBlockId) {
+    DoDFSPreOrderSSARename(GetBasicBlock(block->fall_through));
     /* Restore SSA map snapshot */
     memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
   }
-  if (block->taken) {
-    DoDFSPreOrderSSARename(block->taken);
+  if (block->taken != NullBasicBlockId) {
+    DoDFSPreOrderSSARename(GetBasicBlock(block->taken));
     /* Restore SSA map snapshot */
     memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
   }
-  if (block->successor_block_list.block_list_type != kNotUsed) {
-    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_block_list.blocks);
+  if (block->successor_block_list_type != kNotUsed) {
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_blocks);
     while (true) {
       SuccessorBlockInfo *successor_block_info = iterator.Next();
       if (successor_block_info == NULL) {
         break;
       }
-      BasicBlock* succ_bb = successor_block_info->block;
+      BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
       DoDFSPreOrderSSARename(succ_bb);
       /* Restore SSA map snapshot */
       memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);